xref: /linux/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services.h"
27 #include "dc.h"
28 #include "dc_dmub_srv.h"
29 #include "../dmub/dmub_srv.h"
30 #include "dm_helpers.h"
31 #include "dc_hw_types.h"
32 #include "core_types.h"
33 #include "../basics/conversion.h"
34 #include "cursor_reg_cache.h"
35 #include "resource.h"
36 #include "clk_mgr.h"
37 #include "dc_state_priv.h"
38 #include "dc_plane_priv.h"
39 
40 #define CTX dc_dmub_srv->ctx
41 #define DC_LOGGER CTX->logger
42 #define GPINT_RETRY_NUM 20
43 
dc_dmub_srv_construct(struct dc_dmub_srv * dc_srv,struct dc * dc,struct dmub_srv * dmub)44 static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
45 				  struct dmub_srv *dmub)
46 {
47 	dc_srv->dmub = dmub;
48 	dc_srv->ctx = dc->ctx;
49 }
50 
dc_dmub_srv_create(struct dc * dc,struct dmub_srv * dmub)51 struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub)
52 {
53 	struct dc_dmub_srv *dc_srv =
54 		kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL);
55 
56 	if (dc_srv == NULL) {
57 		BREAK_TO_DEBUGGER();
58 		return NULL;
59 	}
60 
61 	dc_dmub_srv_construct(dc_srv, dc, dmub);
62 
63 	return dc_srv;
64 }
65 
dc_dmub_srv_destroy(struct dc_dmub_srv ** dmub_srv)66 void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
67 {
68 	if (*dmub_srv) {
69 		kfree(*dmub_srv);
70 		*dmub_srv = NULL;
71 	}
72 }
73 
dc_dmub_srv_wait_for_pending(struct dc_dmub_srv * dc_dmub_srv)74 bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv)
75 {
76 	struct dmub_srv *dmub;
77 	struct dc_context *dc_ctx;
78 	enum dmub_status status;
79 
80 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
81 		return false;
82 
83 	dc_ctx = dc_dmub_srv->ctx;
84 	dmub = dc_dmub_srv->dmub;
85 
86 	do {
87 		status = dmub_srv_wait_for_pending(dmub, 100000);
88 	} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
89 
90 	if (status != DMUB_STATUS_OK) {
91 		DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
92 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
93 	}
94 
95 	return status == DMUB_STATUS_OK;
96 }
97 
dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv * dc_dmub_srv)98 void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
99 {
100 	struct dmub_srv *dmub = dc_dmub_srv->dmub;
101 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
102 	enum dmub_status status = DMUB_STATUS_OK;
103 
104 	status = dmub_srv_clear_inbox0_ack(dmub);
105 	if (status != DMUB_STATUS_OK) {
106 		DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status);
107 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
108 	}
109 }
110 
dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv * dc_dmub_srv)111 void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
112 {
113 	struct dmub_srv *dmub = dc_dmub_srv->dmub;
114 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
115 	enum dmub_status status = DMUB_STATUS_OK;
116 
117 	status = dmub_srv_wait_for_inbox0_ack(dmub, 100000);
118 	if (status != DMUB_STATUS_OK) {
119 		DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n");
120 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
121 	}
122 }
123 
dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv * dc_dmub_srv,union dmub_inbox0_data_register data)124 void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
125 				 union dmub_inbox0_data_register data)
126 {
127 	struct dmub_srv *dmub = dc_dmub_srv->dmub;
128 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
129 	enum dmub_status status = DMUB_STATUS_OK;
130 
131 	status = dmub_srv_send_inbox0_cmd(dmub, data);
132 	if (status != DMUB_STATUS_OK) {
133 		DC_ERROR("Error sending INBOX0 cmd\n");
134 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
135 	}
136 }
137 
dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv * dc_dmub_srv,unsigned int count,union dmub_rb_cmd * cmd_list)138 static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
139 		unsigned int count,
140 		union dmub_rb_cmd *cmd_list)
141 {
142 	struct dc_context *dc_ctx;
143 	struct dmub_srv *dmub;
144 	enum dmub_status status = DMUB_STATUS_OK;
145 	int i;
146 
147 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
148 		return false;
149 
150 	dc_ctx = dc_dmub_srv->ctx;
151 	dmub = dc_dmub_srv->dmub;
152 
153 	for (i = 0 ; i < count; i++) {
154 		/* confirm no messages pending */
155 		do {
156 			status = dmub_srv_wait_for_idle(dmub, 100000);
157 		} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
158 
159 		/* queue command */
160 		if (status == DMUB_STATUS_OK)
161 			status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]);
162 
163 		/* check for errors */
164 		if (status != DMUB_STATUS_OK) {
165 			break;
166 		}
167 	}
168 
169 	if (status != DMUB_STATUS_OK) {
170 		if (status != DMUB_STATUS_POWER_STATE_D3) {
171 			DC_ERROR("Error starting DMUB execution: status=%d\n", status);
172 			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
173 		}
174 		return false;
175 	}
176 
177 	return true;
178 }
179 
dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv * dc_dmub_srv,unsigned int count,union dmub_rb_cmd * cmd_list)180 static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
181 		unsigned int count,
182 		union dmub_rb_cmd *cmd_list)
183 {
184 	struct dc_context *dc_ctx;
185 	struct dmub_srv *dmub;
186 	enum dmub_status status;
187 	int i;
188 
189 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
190 		return false;
191 
192 	dc_ctx = dc_dmub_srv->ctx;
193 	dmub = dc_dmub_srv->dmub;
194 
195 	for (i = 0 ; i < count; i++) {
196 		// Queue command
197 		if (!cmd_list[i].cmd_common.header.multi_cmd_pending ||
198 				dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
199 			status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
200 		} else {
201 			status = DMUB_STATUS_QUEUE_FULL;
202 		}
203 
204 		if (status == DMUB_STATUS_QUEUE_FULL) {
205 			/* Execute and wait for queue to become empty again. */
206 			status = dmub_srv_fb_cmd_execute(dmub);
207 			if (status == DMUB_STATUS_POWER_STATE_D3)
208 				return false;
209 
210 			do {
211 					status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i);
212 			} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
213 
214 			/* Requeue the command. */
215 			status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
216 		}
217 
218 		if (status != DMUB_STATUS_OK) {
219 			if (status != DMUB_STATUS_POWER_STATE_D3) {
220 				DC_ERROR("Error queueing DMUB command: status=%d\n", status);
221 				dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
222 			}
223 			return false;
224 		}
225 	}
226 
227 	status = dmub_srv_fb_cmd_execute(dmub);
228 	if (status != DMUB_STATUS_OK) {
229 		if (status != DMUB_STATUS_POWER_STATE_D3) {
230 			DC_ERROR("Error starting DMUB execution: status=%d\n", status);
231 			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
232 		}
233 		return false;
234 	}
235 
236 	return true;
237 }
238 
dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv * dc_dmub_srv,unsigned int count,union dmub_rb_cmd * cmd_list)239 bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
240 		unsigned int count,
241 		union dmub_rb_cmd *cmd_list)
242 {
243 	bool res = false;
244 
245 	if (dc_dmub_srv && dc_dmub_srv->dmub) {
246 		if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) {
247 			res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
248 		} else {
249 			res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
250 		}
251 
252 		if (res)
253 			res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK;
254 	}
255 
256 	return res;
257 }
258 
dc_dmub_srv_wait_for_idle(struct dc_dmub_srv * dc_dmub_srv,enum dm_dmub_wait_type wait_type,union dmub_rb_cmd * cmd_list)259 bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
260 		enum dm_dmub_wait_type wait_type,
261 		union dmub_rb_cmd *cmd_list)
262 {
263 	struct dmub_srv *dmub;
264 	enum dmub_status status;
265 
266 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
267 		return false;
268 
269 	dmub = dc_dmub_srv->dmub;
270 
271 	// Wait for DMUB to process command
272 	if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
273 		do {
274 			status = dmub_srv_wait_for_idle(dmub, 100000);
275 		} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
276 
277 		if (status != DMUB_STATUS_OK) {
278 			DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
279 			if (!dmub->debug.timeout_info.timeout_occured) {
280 				dmub->debug.timeout_info.timeout_occured = true;
281 				if (cmd_list)
282 					dmub->debug.timeout_info.timeout_cmd = *cmd_list;
283 				dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
284 			}
285 			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
286 			return false;
287 		}
288 
289 		// Copy data back from ring buffer into command
290 		if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) {
291 			dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list);
292 		}
293 	}
294 
295 	return true;
296 }
297 
dc_dmub_srv_cmd_run(struct dc_dmub_srv * dc_dmub_srv,union dmub_rb_cmd * cmd,enum dm_dmub_wait_type wait_type)298 bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
299 {
300 	return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type);
301 }
302 
dc_dmub_srv_cmd_run_list(struct dc_dmub_srv * dc_dmub_srv,unsigned int count,union dmub_rb_cmd * cmd_list,enum dm_dmub_wait_type wait_type)303 bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
304 {
305 	if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list))
306 		return false;
307 
308 	return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list);
309 }
310 
dc_dmub_srv_optimized_init_done(struct dc_dmub_srv * dc_dmub_srv)311 bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
312 {
313 	struct dmub_srv *dmub;
314 	struct dc_context *dc_ctx;
315 	union dmub_fw_boot_status boot_status;
316 	enum dmub_status status;
317 
318 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
319 		return false;
320 
321 	dmub = dc_dmub_srv->dmub;
322 	dc_ctx = dc_dmub_srv->ctx;
323 
324 	status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
325 	if (status != DMUB_STATUS_OK) {
326 		DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
327 		return false;
328 	}
329 
330 	return boot_status.bits.optimized_init_done;
331 }
332 
dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv * dc_dmub_srv,unsigned int stream_mask)333 bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
334 				    unsigned int stream_mask)
335 {
336 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
337 		return false;
338 
339 	return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
340 					 stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT);
341 }
342 
dc_dmub_srv_is_restore_required(struct dc_dmub_srv * dc_dmub_srv)343 bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
344 {
345 	struct dmub_srv *dmub;
346 	struct dc_context *dc_ctx;
347 	union dmub_fw_boot_status boot_status;
348 	enum dmub_status status;
349 
350 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
351 		return false;
352 
353 	dmub = dc_dmub_srv->dmub;
354 	dc_ctx = dc_dmub_srv->ctx;
355 
356 	status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
357 	if (status != DMUB_STATUS_OK) {
358 		DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
359 		return false;
360 	}
361 
362 	return boot_status.bits.restore_required;
363 }
364 
dc_dmub_srv_get_dmub_outbox0_msg(const struct dc * dc,struct dmcub_trace_buf_entry * entry)365 bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry)
366 {
367 	struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub;
368 	return dmub_srv_get_outbox0_msg(dmub, entry);
369 }
370 
dc_dmub_trace_event_control(struct dc * dc,bool enable)371 void dc_dmub_trace_event_control(struct dc *dc, bool enable)
372 {
373 	dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable);
374 }
375 
dc_dmub_srv_drr_update_cmd(struct dc * dc,uint32_t tg_inst,uint32_t vtotal_min,uint32_t vtotal_max)376 void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max)
377 {
378 	union dmub_rb_cmd cmd = { 0 };
379 
380 	cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
381 	cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE;
382 	cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
383 	cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
384 	cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
385 
386 	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
387 
388 	// Send the command to the DMCUB.
389 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
390 }
391 
dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc * dc,uint32_t tg_inst)392 void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
393 {
394 	union dmub_rb_cmd cmd = { 0 };
395 
396 	cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
397 	cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
398 	cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
399 
400 	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
401 
402 	// Send the command to the DMCUB.
403 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
404 }
405 
dc_dmub_srv_get_pipes_for_stream(struct dc * dc,struct dc_stream_state * stream)406 static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
407 {
408 	uint8_t pipes = 0;
409 	int i = 0;
410 
411 	for (i = 0; i < MAX_PIPES; i++) {
412 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
413 
414 		if (pipe->stream == stream && pipe->stream_res.tg)
415 			pipes = i;
416 	}
417 	return pipes;
418 }
419 
dc_dmub_srv_populate_fams_pipe_info(struct dc * dc,struct dc_state * context,struct pipe_ctx * head_pipe,struct dmub_cmd_fw_assisted_mclk_switch_pipe_data * fams_pipe_data)420 static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context,
421 		struct pipe_ctx *head_pipe,
422 		struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data)
423 {
424 	int j;
425 	int pipe_idx = 0;
426 
427 	fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst;
428 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
429 		struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j];
430 
431 		if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) {
432 			fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst;
433 		}
434 	}
435 	fams_pipe_data->pipe_count = pipe_idx;
436 }
437 
dc_dmub_srv_p_state_delegate(struct dc * dc,bool should_manage_pstate,struct dc_state * context)438 bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context)
439 {
440 	union dmub_rb_cmd cmd = { 0 };
441 	struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data;
442 	int i = 0, k = 0;
443 	int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
444 	uint8_t visual_confirm_enabled;
445 	int pipe_idx = 0;
446 	struct dc_stream_status *stream_status = NULL;
447 
448 	if (dc == NULL)
449 		return false;
450 
451 	visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
452 
453 	// Format command.
454 	cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
455 	cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL;
456 	cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate;
457 	cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
458 
459 	if (should_manage_pstate) {
460 		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
461 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
462 
463 			if (!pipe->stream)
464 				continue;
465 
466 			/* If FAMS is being used to support P-State and there is a stream
467 			 * that does not use FAMS, we are in an FPO + VActive scenario.
468 			 * Assign vactive stretch margin in this case.
469 			 */
470 			stream_status = dc_state_get_stream_status(context, pipe->stream);
471 			if (stream_status && !stream_status->fpo_in_use) {
472 				cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
473 				break;
474 			}
475 			pipe_idx++;
476 		}
477 	}
478 
479 	for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
480 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
481 
482 		if (!resource_is_pipe_type(pipe, OTG_MASTER))
483 			continue;
484 
485 		stream_status = dc_state_get_stream_status(context, pipe->stream);
486 		if (stream_status && stream_status->fpo_in_use) {
487 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
488 			uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000;
489 
490 			config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz;
491 			config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz;
492 			config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps;
493 			config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream);
494 			dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]);
495 			k++;
496 		}
497 	}
498 	cmd.fw_assisted_mclk_switch.header.payload_bytes =
499 		sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
500 
501 	// Send the command to the DMCUB.
502 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
503 
504 	return true;
505 }
506 
dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv * dc_dmub_srv)507 void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
508 {
509 	union dmub_rb_cmd cmd = { 0 };
510 
511 	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
512 		return;
513 
514 	memset(&cmd, 0, sizeof(cmd));
515 
516 	/* Prepare fw command */
517 	cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS;
518 	cmd.query_feature_caps.header.sub_type = 0;
519 	cmd.query_feature_caps.header.ret_status = 1;
520 	cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
521 
522 	/* If command was processed, copy feature caps to dmub srv */
523 	if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
524 	    cmd.query_feature_caps.header.ret_status == 0) {
525 		memcpy(&dc_dmub_srv->dmub->feature_caps,
526 		       &cmd.query_feature_caps.query_feature_caps_data,
527 		       sizeof(struct dmub_feature_caps));
528 	}
529 }
530 
dc_dmub_srv_get_visual_confirm_color_cmd(struct dc * dc,struct pipe_ctx * pipe_ctx)531 void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx)
532 {
533 	union dmub_rb_cmd cmd = { 0 };
534 	unsigned int panel_inst = 0;
535 
536 	if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) &&
537 			dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
538 		return;
539 
540 	memset(&cmd, 0, sizeof(cmd));
541 
542 	// Prepare fw command
543 	cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR;
544 	cmd.visual_confirm_color.header.sub_type = 0;
545 	cmd.visual_confirm_color.header.ret_status = 1;
546 	cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data);
547 	cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
548 
549 	// If command was processed, copy feature caps to dmub srv
550 	if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
551 		cmd.visual_confirm_color.header.ret_status == 0) {
552 		memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
553 			&cmd.visual_confirm_color.visual_confirm_color_data,
554 			sizeof(struct dmub_visual_confirm_color));
555 	}
556 }
557 
558 /**
559  * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
560  *
561  * @dc: [in] pointer to dc object
562  * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
563  * @vblank_pipe: [in] pipe_ctx for the DRR pipe
564  * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
565  * @context: [in] DC state for access to phantom stream
566  *
567  * Populate the DMCUB SubVP command with DRR pipe info. All the information
568  * required for calculating the SubVP + DRR microschedule is populated here.
569  *
570  * High level algorithm:
571  * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
572  * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule
573  * 3. Populate the drr_info with the min and max supported vtotal values
574  */
populate_subvp_cmd_drr_info(struct dc * dc,struct dc_state * context,struct pipe_ctx * subvp_pipe,struct pipe_ctx * vblank_pipe,struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 * pipe_data)575 static void populate_subvp_cmd_drr_info(struct dc *dc,
576 		struct dc_state *context,
577 		struct pipe_ctx *subvp_pipe,
578 		struct pipe_ctx *vblank_pipe,
579 		struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
580 {
581 	struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
582 	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
583 	struct dc_crtc_timing *phantom_timing;
584 	struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
585 	uint16_t drr_frame_us = 0;
586 	uint16_t min_drr_supported_us = 0;
587 	uint16_t max_drr_supported_us = 0;
588 	uint16_t max_drr_vblank_us = 0;
589 	uint16_t max_drr_mallregion_us = 0;
590 	uint16_t mall_region_us = 0;
591 	uint16_t prefetch_us = 0;
592 	uint16_t subvp_active_us = 0;
593 	uint16_t drr_active_us = 0;
594 	uint16_t min_vtotal_supported = 0;
595 	uint16_t max_vtotal_supported = 0;
596 
597 	if (!phantom_stream)
598 		return;
599 
600 	phantom_timing = &phantom_stream->timing;
601 
602 	pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
603 	pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
604 	pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
605 
606 	drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
607 			(((uint64_t)drr_timing->pix_clk_100hz * 100)));
608 	// P-State allow width and FW delays already included phantom_timing->v_addressable
609 	mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
610 			(((uint64_t)phantom_timing->pix_clk_100hz * 100)));
611 	min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
612 	min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
613 			(((uint64_t)drr_timing->h_total * 1000000)));
614 
615 	prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
616 			(((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
617 	subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
618 			(((uint64_t)main_timing->pix_clk_100hz * 100)));
619 	drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
620 			(((uint64_t)drr_timing->pix_clk_100hz * 100)));
621 	max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us -
622 			dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us;
623 	max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us;
624 	max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
625 	max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
626 			(((uint64_t)drr_timing->h_total * 1000000)));
627 
628 	/* When calculating the max vtotal supported for SubVP + DRR cases, add
629 	 * margin due to possible rounding errors (being off by 1 line in the
630 	 * FW calculation can incorrectly push the P-State switch to wait 1 frame
631 	 * longer).
632 	 */
633 	max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us;
634 
635 	pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
636 	pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
637 	pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us;
638 }
639 
640 /**
641  * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command
642  *
643  * @dc: [in] current dc state
644  * @context: [in] new dc state
645  * @cmd: [in] DMUB cmd to be populated with SubVP info
646  * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe
647  * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
648  *
649  * Populate the DMCUB SubVP command with VBLANK pipe info. All the information
650  * required to calculate the microschedule for SubVP + VBLANK case is stored in
651  * the pipe_data (subvp_data and vblank_data).  Also check if the VBLANK pipe
652  * is a DRR display -- if it is make a call to populate drr_info.
653  */
populate_subvp_cmd_vblank_pipe_info(struct dc * dc,struct dc_state * context,union dmub_rb_cmd * cmd,struct pipe_ctx * vblank_pipe,uint8_t cmd_pipe_index)654 static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
655 		struct dc_state *context,
656 		union dmub_rb_cmd *cmd,
657 		struct pipe_ctx *vblank_pipe,
658 		uint8_t cmd_pipe_index)
659 {
660 	uint32_t i;
661 	struct pipe_ctx *pipe = NULL;
662 	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
663 			&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
664 
665 	// Find the SubVP pipe
666 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
667 		pipe = &context->res_ctx.pipe_ctx[i];
668 
669 		// We check for master pipe, but it shouldn't matter since we only need
670 		// the pipe for timing info (stream should be same for any pipe splits)
671 		if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
672 				!resource_is_pipe_type(pipe, DPP_PIPE))
673 			continue;
674 
675 		// Find the SubVP pipe
676 		if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
677 			break;
678 	}
679 
680 	pipe_data->mode = VBLANK;
681 	pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz;
682 	pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total -
683 							vblank_pipe->stream->timing.v_front_porch;
684 	pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total;
685 	pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total;
686 	pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx;
687 	pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start;
688 	pipe_data->pipe_config.vblank_data.vblank_end =
689 			vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable;
690 
691 	if (vblank_pipe->stream->ignore_msa_timing_param &&
692 		(vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
693 		populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data);
694 }
695 
696 /**
697  * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case
698  *
699  * @dc: [in] current dc state
700  * @context: [in] new dc state
701  * @cmd: [in] DMUB cmd to be populated with SubVP info
702  * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2)
703  *
704  * For SubVP + SubVP, we use a single vertical interrupt to start the
705  * microschedule for both SubVP pipes. In order for this to work correctly, the
706  * MALL REGION of both SubVP pipes must start at the same time. This function
707  * lengthens the prefetch end to mall start delay of the SubVP pipe that has
708  * the shorter prefetch so that both MALL REGION's will start at the same time.
709  */
update_subvp_prefetch_end_to_mall_start(struct dc * dc,struct dc_state * context,union dmub_rb_cmd * cmd,struct pipe_ctx * subvp_pipes[])710 static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
711 		struct dc_state *context,
712 		union dmub_rb_cmd *cmd,
713 		struct pipe_ctx *subvp_pipes[])
714 {
715 	uint32_t subvp0_prefetch_us = 0;
716 	uint32_t subvp1_prefetch_us = 0;
717 	uint32_t prefetch_delta_us = 0;
718 	struct dc_stream_state *phantom_stream0 = NULL;
719 	struct dc_stream_state *phantom_stream1 = NULL;
720 	struct dc_crtc_timing *phantom_timing0 = NULL;
721 	struct dc_crtc_timing *phantom_timing1 = NULL;
722 	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
723 
724 	phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
725 	if (!phantom_stream0)
726 		return;
727 
728 	phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
729 	if (!phantom_stream1)
730 		return;
731 
732 	phantom_timing0 = &phantom_stream0->timing;
733 	phantom_timing1 = &phantom_stream1->timing;
734 
735 	subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
736 			(uint64_t)phantom_timing0->h_total * 1000000),
737 			(((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
738 	subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
739 			(uint64_t)phantom_timing1->h_total * 1000000),
740 			(((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
741 
742 	// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
743 	// should increase it's prefetch time to match the other
744 	if (subvp0_prefetch_us > subvp1_prefetch_us) {
745 		pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
746 		prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
747 		pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
748 				div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
749 					((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
750 					((uint64_t)phantom_timing1->h_total * 1000000));
751 
752 	} else if (subvp1_prefetch_us >  subvp0_prefetch_us) {
753 		pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
754 		prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
755 		pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
756 				div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
757 					((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
758 					((uint64_t)phantom_timing0->h_total * 1000000));
759 	}
760 }
761 
762 /**
763  * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command
764  *
765  * @dc: [in] current dc state
766  * @context: [in] new dc state
767  * @cmd: [in] DMUB cmd to be populated with SubVP info
768  * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
769  * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
770  *
771  * Populate the DMCUB SubVP command with SubVP pipe info. All the information
772  * required to calculate the microschedule for the SubVP pipe is stored in the
773  * pipe_data of the DMCUB SubVP command.
774  */
populate_subvp_cmd_pipe_info(struct dc * dc,struct dc_state * context,union dmub_rb_cmd * cmd,struct pipe_ctx * subvp_pipe,uint8_t cmd_pipe_index)775 static void populate_subvp_cmd_pipe_info(struct dc *dc,
776 		struct dc_state *context,
777 		union dmub_rb_cmd *cmd,
778 		struct pipe_ctx *subvp_pipe,
779 		uint8_t cmd_pipe_index)
780 {
781 	uint32_t j;
782 	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
783 			&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
784 	struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
785 	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
786 	struct dc_crtc_timing *phantom_timing;
787 	uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
788 
789 	if (!phantom_stream)
790 		return;
791 
792 	phantom_timing = &phantom_stream->timing;
793 
794 	pipe_data->mode = SUBVP;
795 	pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
796 	pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total;
797 	pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total;
798 	pipe_data->pipe_config.subvp_data.main_vblank_start =
799 			main_timing->v_total - main_timing->v_front_porch;
800 	pipe_data->pipe_config.subvp_data.main_vblank_end =
801 			main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
802 	pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
803 	pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst;
804 	pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param &&
805 		(subvp_pipe->stream->allow_freesync || subvp_pipe->stream->vrr_active_variable || subvp_pipe->stream->vrr_active_fixed);
806 
807 	/* Calculate the scaling factor from the src and dst height.
808 	 * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
809 	 * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
810 	 *
811 	 * Make sure to combine stream and plane scaling together.
812 	 */
813 	reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height,
814 			&out_num_stream, &out_den_stream);
815 	reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height,
816 			&out_num_plane, &out_den_plane);
817 	reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den);
818 	pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
819 	pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
820 
821 	// Prefetch lines is equal to VACTIVE + BP + VSYNC
822 	pipe_data->pipe_config.subvp_data.prefetch_lines =
823 			phantom_timing->v_total - phantom_timing->v_front_porch;
824 
825 	// Round up
826 	pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
827 			div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
828 					((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
829 	pipe_data->pipe_config.subvp_data.processing_delay_lines =
830 			div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
831 					((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
832 
833 	if (subvp_pipe->bottom_pipe) {
834 		pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx;
835 	} else if (subvp_pipe->next_odm_pipe) {
836 		pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
837 	} else {
838 		pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;
839 	}
840 
841 	// Find phantom pipe index based on phantom stream
842 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
843 		struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
844 
845 		if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) &&
846 				phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
847 			pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
848 			if (phantom_pipe->bottom_pipe) {
849 				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
850 			} else if (phantom_pipe->next_odm_pipe) {
851 				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
852 			} else {
853 				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;
854 			}
855 			break;
856 		}
857 	}
858 }
859 
860 /**
861  * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command
862  *
863  * @dc: [in] current dc state
864  * @context: [in] new dc state
865  * @enable: [in] if true enables the pipes population
866  *
867  * This function loops through each pipe and populates the DMUB SubVP CMD info
868  * based on the pipe (e.g. SubVP, VBLANK).
869  */
dc_dmub_setup_subvp_dmub_command(struct dc * dc,struct dc_state * context,bool enable)870 void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
871 		struct dc_state *context,
872 		bool enable)
873 {
874 	uint8_t cmd_pipe_index = 0;
875 	uint32_t i, pipe_idx;
876 	uint8_t subvp_count = 0;
877 	union dmub_rb_cmd cmd;
878 	struct pipe_ctx *subvp_pipes[2];
879 	uint32_t wm_val_refclk = 0;
880 	enum mall_stream_type pipe_mall_type;
881 
882 	memset(&cmd, 0, sizeof(cmd));
883 	// FW command for SUBVP
884 	cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
885 	cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD;
886 	cmd.fw_assisted_mclk_switch_v2.header.payload_bytes =
887 			sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header);
888 
889 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
890 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
891 
892 		/* For SubVP pipe count, only count the top most (ODM / MPC) pipe
893 		 */
894 		if (resource_is_pipe_type(pipe, OTG_MASTER) &&
895 				resource_is_pipe_type(pipe, DPP_PIPE) &&
896 				dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
897 			subvp_pipes[subvp_count++] = pipe;
898 	}
899 
900 	if (enable) {
901 		// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
902 		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
903 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
904 			pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
905 
906 			if (!pipe->stream)
907 				continue;
908 
909 			/* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe.
910 			 * Any ODM or MPC splits being used in SubVP will be handled internally in
911 			 * populate_subvp_cmd_pipe_info
912 			 */
913 			if (resource_is_pipe_type(pipe, OTG_MASTER) &&
914 					resource_is_pipe_type(pipe, DPP_PIPE) &&
915 					pipe_mall_type == SUBVP_MAIN) {
916 				populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
917 			} else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
918 					resource_is_pipe_type(pipe, DPP_PIPE) &&
919 					pipe_mall_type == SUBVP_NONE) {
920 				// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
921 				// we run through DML without calculating "natural" P-state support
922 				populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
923 
924 			}
925 			pipe_idx++;
926 		}
927 		if (subvp_count == 2) {
928 			update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
929 		}
930 		cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us;
931 		cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us;
932 
933 		// Store the original watermark value for this SubVP config so we can lower it when the
934 		// MCLK switch starts
935 		wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
936 				(dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
937 
938 		cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
939 	}
940 
941 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
942 }
943 
dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv * dc_dmub_srv)944 bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
945 {
946 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
947 		return false;
948 	return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub);
949 }
950 
dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv * dc_dmub_srv)951 void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
952 {
953 	uint32_t i;
954 
955 	if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
956 		DC_LOG_ERROR("%s: invalid parameters.", __func__);
957 		return;
958 	}
959 
960 	DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
961 
962 	if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv)) {
963 		DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
964 		return;
965 	}
966 
967 	DC_LOG_DEBUG("DMCUB STATE:");
968 	DC_LOG_DEBUG("    dmcub_version      : %08x", dc_dmub_srv->dmub->debug.dmcub_version);
969 	DC_LOG_DEBUG("    scratch  [0]       : %08x", dc_dmub_srv->dmub->debug.scratch[0]);
970 	DC_LOG_DEBUG("    scratch  [1]       : %08x", dc_dmub_srv->dmub->debug.scratch[1]);
971 	DC_LOG_DEBUG("    scratch  [2]       : %08x", dc_dmub_srv->dmub->debug.scratch[2]);
972 	DC_LOG_DEBUG("    scratch  [3]       : %08x", dc_dmub_srv->dmub->debug.scratch[3]);
973 	DC_LOG_DEBUG("    scratch  [4]       : %08x", dc_dmub_srv->dmub->debug.scratch[4]);
974 	DC_LOG_DEBUG("    scratch  [5]       : %08x", dc_dmub_srv->dmub->debug.scratch[5]);
975 	DC_LOG_DEBUG("    scratch  [6]       : %08x", dc_dmub_srv->dmub->debug.scratch[6]);
976 	DC_LOG_DEBUG("    scratch  [7]       : %08x", dc_dmub_srv->dmub->debug.scratch[7]);
977 	DC_LOG_DEBUG("    scratch  [8]       : %08x", dc_dmub_srv->dmub->debug.scratch[8]);
978 	DC_LOG_DEBUG("    scratch  [9]       : %08x", dc_dmub_srv->dmub->debug.scratch[9]);
979 	DC_LOG_DEBUG("    scratch [10]       : %08x", dc_dmub_srv->dmub->debug.scratch[10]);
980 	DC_LOG_DEBUG("    scratch [11]       : %08x", dc_dmub_srv->dmub->debug.scratch[11]);
981 	DC_LOG_DEBUG("    scratch [12]       : %08x", dc_dmub_srv->dmub->debug.scratch[12]);
982 	DC_LOG_DEBUG("    scratch [13]       : %08x", dc_dmub_srv->dmub->debug.scratch[13]);
983 	DC_LOG_DEBUG("    scratch [14]       : %08x", dc_dmub_srv->dmub->debug.scratch[14]);
984 	DC_LOG_DEBUG("    scratch [15]       : %08x", dc_dmub_srv->dmub->debug.scratch[15]);
985 	for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
986 		DC_LOG_DEBUG("    pc[%d]             : %08x", i, dc_dmub_srv->dmub->debug.pc[i]);
987 	DC_LOG_DEBUG("    unk_fault_addr     : %08x", dc_dmub_srv->dmub->debug.undefined_address_fault_addr);
988 	DC_LOG_DEBUG("    inst_fault_addr    : %08x", dc_dmub_srv->dmub->debug.inst_fetch_fault_addr);
989 	DC_LOG_DEBUG("    data_fault_addr    : %08x", dc_dmub_srv->dmub->debug.data_write_fault_addr);
990 	DC_LOG_DEBUG("    inbox1_rptr        : %08x", dc_dmub_srv->dmub->debug.inbox1_rptr);
991 	DC_LOG_DEBUG("    inbox1_wptr        : %08x", dc_dmub_srv->dmub->debug.inbox1_wptr);
992 	DC_LOG_DEBUG("    inbox1_size        : %08x", dc_dmub_srv->dmub->debug.inbox1_size);
993 	DC_LOG_DEBUG("    inbox0_rptr        : %08x", dc_dmub_srv->dmub->debug.inbox0_rptr);
994 	DC_LOG_DEBUG("    inbox0_wptr        : %08x", dc_dmub_srv->dmub->debug.inbox0_wptr);
995 	DC_LOG_DEBUG("    inbox0_size        : %08x", dc_dmub_srv->dmub->debug.inbox0_size);
996 	DC_LOG_DEBUG("    outbox1_rptr       : %08x", dc_dmub_srv->dmub->debug.outbox1_rptr);
997 	DC_LOG_DEBUG("    outbox1_wptr       : %08x", dc_dmub_srv->dmub->debug.outbox1_wptr);
998 	DC_LOG_DEBUG("    outbox1_size       : %08x", dc_dmub_srv->dmub->debug.outbox1_size);
999 	DC_LOG_DEBUG("    is_enabled         : %d", dc_dmub_srv->dmub->debug.is_dmcub_enabled);
1000 	DC_LOG_DEBUG("    is_soft_reset      : %d", dc_dmub_srv->dmub->debug.is_dmcub_soft_reset);
1001 	DC_LOG_DEBUG("    is_secure_reset    : %d", dc_dmub_srv->dmub->debug.is_dmcub_secure_reset);
1002 	DC_LOG_DEBUG("    is_traceport_en    : %d", dc_dmub_srv->dmub->debug.is_traceport_en);
1003 	DC_LOG_DEBUG("    is_cw0_en          : %d", dc_dmub_srv->dmub->debug.is_cw0_enabled);
1004 	DC_LOG_DEBUG("    is_cw6_en          : %d", dc_dmub_srv->dmub->debug.is_cw6_enabled);
1005 }
1006 
dc_dmub_should_update_cursor_data(struct pipe_ctx * pipe_ctx)1007 static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
1008 {
1009 	if (pipe_ctx->plane_state != NULL) {
1010 		if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
1011 			resource_can_pipe_disable_cursor(pipe_ctx))
1012 			return false;
1013 	}
1014 
1015 	if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
1016 		pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
1017 		pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
1018 		return true;
1019 
1020 	if (pipe_ctx->stream->link->replay_settings.config.replay_supported)
1021 		return true;
1022 
1023 	return false;
1024 }
1025 
dc_build_cursor_update_payload0(struct pipe_ctx * pipe_ctx,uint8_t p_idx,struct dmub_cmd_update_cursor_payload0 * payload)1026 static void dc_build_cursor_update_payload0(
1027 		struct pipe_ctx *pipe_ctx, uint8_t p_idx,
1028 		struct dmub_cmd_update_cursor_payload0 *payload)
1029 {
1030 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1031 	unsigned int panel_inst = 0;
1032 
1033 	if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
1034 		pipe_ctx->stream->link, &panel_inst))
1035 		return;
1036 
1037 	/* Payload: Cursor Rect is built from position & attribute
1038 	 * x & y are obtained from postion
1039 	 */
1040 	payload->cursor_rect.x = hubp->cur_rect.x;
1041 	payload->cursor_rect.y = hubp->cur_rect.y;
1042 	/* w & h are obtained from attribute */
1043 	payload->cursor_rect.width  = hubp->cur_rect.w;
1044 	payload->cursor_rect.height = hubp->cur_rect.h;
1045 
1046 	payload->enable      = hubp->pos.cur_ctl.bits.cur_enable;
1047 	payload->pipe_idx    = p_idx;
1048 	payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
1049 	payload->panel_inst  = panel_inst;
1050 }
1051 
dc_build_cursor_position_update_payload0(struct dmub_cmd_update_cursor_payload0 * pl,const uint8_t p_idx,const struct hubp * hubp,const struct dpp * dpp)1052 static void dc_build_cursor_position_update_payload0(
1053 		struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
1054 		const struct hubp *hubp, const struct dpp *dpp)
1055 {
1056 	/* Hubp */
1057 	pl->position_cfg.pHubp.cur_ctl.raw  = hubp->pos.cur_ctl.raw;
1058 	pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
1059 	pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
1060 	pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
1061 
1062 	/* dpp */
1063 	pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
1064 	pl->position_cfg.pipe_idx = p_idx;
1065 }
1066 
dc_build_cursor_attribute_update_payload1(struct dmub_cursor_attributes_cfg * pl_A,const uint8_t p_idx,const struct hubp * hubp,const struct dpp * dpp)1067 static void dc_build_cursor_attribute_update_payload1(
1068 		struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
1069 		const struct hubp *hubp, const struct dpp *dpp)
1070 {
1071 	/* Hubp */
1072 	pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
1073 	pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
1074 	pl_A->aHubp.cur_ctl.raw  = hubp->att.cur_ctl.raw;
1075 	pl_A->aHubp.size.raw     = hubp->att.size.raw;
1076 	pl_A->aHubp.settings.raw = hubp->att.settings.raw;
1077 
1078 	/* dpp */
1079 	pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
1080 }
1081 
1082 /**
1083  * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command
1084  *
1085  * @pCtx: [in] pipe context
1086  * @pipe_idx: [in] pipe index
1087  *
1088  * This function would store the cursor related information and pass it into
1089  * dmub
1090  */
dc_send_update_cursor_info_to_dmu(struct pipe_ctx * pCtx,uint8_t pipe_idx)1091 void dc_send_update_cursor_info_to_dmu(
1092 		struct pipe_ctx *pCtx, uint8_t pipe_idx)
1093 {
1094 	union dmub_rb_cmd cmd[2];
1095 	union dmub_cmd_update_cursor_info_data *update_cursor_info_0 =
1096 					&cmd[0].update_cursor_info.update_cursor_info_data;
1097 
1098 	memset(cmd, 0, sizeof(cmd));
1099 
1100 	if (!dc_dmub_should_update_cursor_data(pCtx))
1101 		return;
1102 	/*
1103 	 * Since we use multi_cmd_pending for dmub command, the 2nd command is
1104 	 * only assigned to store cursor attributes info.
1105 	 * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
1106 	 * is to store cursor position info.
1107 	 *
1108 	 * Command heaer type must be the same type if using  multi_cmd_pending.
1109 	 * Besides, while process 2nd command in DMU, the sub type is useless.
1110 	 * So it's meanless to pass the sub type header with different type.
1111 	 */
1112 
1113 	{
1114 		/* Build Payload#0 Header */
1115 		cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1116 		cmd[0].update_cursor_info.header.payload_bytes =
1117 				sizeof(cmd[0].update_cursor_info.update_cursor_info_data);
1118 		cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd
1119 
1120 		/* Prepare Payload */
1121 		dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0);
1122 
1123 		dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx,
1124 				pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1125 		}
1126 	{
1127 		/* Build Payload#1 Header */
1128 		cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1129 		cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
1130 		cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command.
1131 
1132 		dc_build_cursor_attribute_update_payload1(
1133 				&cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
1134 				pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1135 
1136 		/* Combine 2nd cmds update_curosr_info to DMU */
1137 		dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
1138 	}
1139 }
1140 
dc_dmub_check_min_version(struct dmub_srv * srv)1141 bool dc_dmub_check_min_version(struct dmub_srv *srv)
1142 {
1143 	if (!srv->hw_funcs.is_psrsu_supported)
1144 		return true;
1145 	return srv->hw_funcs.is_psrsu_supported(srv);
1146 }
1147 
dc_dmub_srv_enable_dpia_trace(const struct dc * dc)1148 void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
1149 {
1150 	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1151 
1152 	if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
1153 		DC_LOG_ERROR("%s: invalid parameters.", __func__);
1154 		return;
1155 	}
1156 
1157 	if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1,
1158 				       0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1159 		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1160 		return;
1161 	}
1162 
1163 	if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK,
1164 				       0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1165 		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1166 		return;
1167 	}
1168 
1169 	DC_LOG_DEBUG("Enabled DPIA trace\n");
1170 }
1171 
dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv * dc_dmub_srv,const struct dc_plane_address * addr,uint8_t subvp_index)1172 void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index)
1173 {
1174 	dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
1175 }
1176 
dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv * dc_dmub_srv,bool wait)1177 bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
1178 {
1179 	struct dc_context *dc_ctx;
1180 	enum dmub_status status;
1181 
1182 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1183 		return true;
1184 
1185 	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
1186 		return true;
1187 
1188 	dc_ctx = dc_dmub_srv->ctx;
1189 
1190 	if (wait) {
1191 		if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
1192 			do {
1193 				status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1194 			} while (status != DMUB_STATUS_OK);
1195 		} else {
1196 			status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1197 			if (status != DMUB_STATUS_OK) {
1198 				DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
1199 				return false;
1200 			}
1201 		}
1202 	} else
1203 		return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub);
1204 
1205 	return true;
1206 }
1207 
count_active_streams(const struct dc * dc)1208 static int count_active_streams(const struct dc *dc)
1209 {
1210 	int i, count = 0;
1211 
1212 	for (i = 0; i < dc->current_state->stream_count; ++i) {
1213 		struct dc_stream_state *stream = dc->current_state->streams[i];
1214 
1215 		if (stream && (!stream->dpms_off || dc->config.disable_ips_in_dpms_off))
1216 			count += 1;
1217 	}
1218 
1219 	return count;
1220 }
1221 
dc_dmub_srv_notify_idle(const struct dc * dc,bool allow_idle)1222 static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
1223 {
1224 	volatile const struct dmub_shared_state_ips_fw *ips_fw;
1225 	struct dc_dmub_srv *dc_dmub_srv;
1226 	union dmub_rb_cmd cmd = {0};
1227 
1228 	if (dc->debug.dmcub_emulation)
1229 		return;
1230 
1231 	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1232 		return;
1233 
1234 	dc_dmub_srv = dc->ctx->dmub_srv;
1235 	ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1236 
1237 	memset(&cmd, 0, sizeof(cmd));
1238 	cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
1239 	cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
1240 	cmd.idle_opt_notify_idle.header.payload_bytes =
1241 		sizeof(cmd.idle_opt_notify_idle) -
1242 		sizeof(cmd.idle_opt_notify_idle.header);
1243 
1244 	cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
1245 
1246 	if (dc->work_arounds.skip_psr_ips_crtc_disable)
1247 		cmd.idle_opt_notify_idle.cntl_data.skip_otg_disable = true;
1248 
1249 	if (allow_idle) {
1250 		volatile struct dmub_shared_state_ips_driver *ips_driver =
1251 			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1252 		union dmub_shared_state_ips_driver_signals new_signals;
1253 
1254 		DC_LOG_IPS(
1255 			"%s wait idle (ips1_commit=%u ips2_commit=%u)",
1256 			__func__,
1257 			ips_fw->signals.bits.ips1_commit,
1258 			ips_fw->signals.bits.ips2_commit);
1259 
1260 		dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
1261 
1262 		memset(&new_signals, 0, sizeof(new_signals));
1263 
1264 		new_signals.bits.allow_idle = 1; /* always set */
1265 
1266 		if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
1267 		    dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
1268 			new_signals.bits.allow_pg = 1;
1269 			new_signals.bits.allow_ips1 = 1;
1270 			new_signals.bits.allow_ips2 = 1;
1271 			new_signals.bits.allow_z10 = 1;
1272 			// New in IPSv2.0
1273 			new_signals.bits.allow_ips1z8 = 1;
1274 		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
1275 			new_signals.bits.allow_ips1 = 1;
1276 		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
1277 			// IPSv1.0 only
1278 			new_signals.bits.allow_pg = 1;
1279 			new_signals.bits.allow_ips1 = 1;
1280 		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
1281 			// IPSv1.0 only
1282 			new_signals.bits.allow_pg = 1;
1283 			new_signals.bits.allow_ips1 = 1;
1284 			new_signals.bits.allow_ips2 = 1;
1285 		} else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) {
1286 			/* TODO: Move this logic out to hwseq */
1287 			if (count_active_streams(dc) == 0) {
1288 				/* IPS2 - Display off */
1289 				new_signals.bits.allow_pg = 1;
1290 				new_signals.bits.allow_ips1 = 1;
1291 				new_signals.bits.allow_ips2 = 1;
1292 				new_signals.bits.allow_z10 = 1;
1293 				// New in IPSv2.0
1294 				new_signals.bits.allow_ips1z8 = 1;
1295 			} else {
1296 				/* RCG only */
1297 				new_signals.bits.allow_pg = 0;
1298 				new_signals.bits.allow_ips1 = 1;
1299 				new_signals.bits.allow_ips2 = 0;
1300 				new_signals.bits.allow_z10 = 0;
1301 			}
1302 		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) {
1303 			new_signals.bits.allow_pg = 1;
1304 			new_signals.bits.allow_ips1 = 1;
1305 			new_signals.bits.allow_ips2 = 1;
1306 			new_signals.bits.allow_z10 = 1;
1307 		}
1308 		// Setting RCG allow bits (IPSv2.0)
1309 		if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) {
1310 			new_signals.bits.allow_ips0_rcg = 1;
1311 			new_signals.bits.allow_ips1_rcg = 1;
1312 		} else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) {
1313 			new_signals.bits.allow_ips1_rcg = 1;
1314 		} else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) {
1315 			new_signals.bits.allow_ips0_rcg = 1;
1316 		}
1317 		// IPS dynamic allow bits (IPSv2 change, vpb use case)
1318 		if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) {
1319 			new_signals.bits.allow_dynamic_ips1 = 1;
1320 		} else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) {
1321 			new_signals.bits.allow_dynamic_ips1 = 1;
1322 			new_signals.bits.allow_dynamic_ips1_z8 = 1;
1323 		}
1324 		ips_driver->signals = new_signals;
1325 		dc_dmub_srv->driver_signals = ips_driver->signals;
1326 	}
1327 
1328 	DC_LOG_IPS(
1329 		"%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)",
1330 		__func__,
1331 		allow_idle,
1332 		ips_fw->signals.bits.ips1_commit,
1333 		ips_fw->signals.bits.ips2_commit);
1334 
1335 	/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
1336 	/* We also do not perform a wait since DMCUB could enter idle after the notification. */
1337 	dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
1338 
1339 	/* Register access should stop at this point. */
1340 	if (allow_idle)
1341 		dc_dmub_srv->needs_idle_wake = true;
1342 }
1343 
dc_dmub_srv_exit_low_power_state(const struct dc * dc)1344 static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
1345 {
1346 	struct dc_dmub_srv *dc_dmub_srv;
1347 	uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0;
1348 
1349 	if (dc->debug.dmcub_emulation)
1350 		return;
1351 
1352 	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1353 		return;
1354 
1355 	dc_dmub_srv = dc->ctx->dmub_srv;
1356 
1357 	if (dc->clk_mgr->funcs->exit_low_power_state) {
1358 		volatile const struct dmub_shared_state_ips_fw *ips_fw =
1359 			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1360 		volatile struct dmub_shared_state_ips_driver *ips_driver =
1361 			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1362 		union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
1363 
1364 		rcg_exit_count = ips_fw->rcg_exit_count;
1365 		ips1_exit_count = ips_fw->ips1_exit_count;
1366 		ips2_exit_count = ips_fw->ips2_exit_count;
1367 		ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count;
1368 
1369 		ips_driver->signals.all = 0;
1370 		dc_dmub_srv->driver_signals = ips_driver->signals;
1371 
1372 		DC_LOG_IPS(
1373 			"%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)",
1374 			__func__,
1375 			ips_driver->signals.bits.allow_ips1,
1376 			ips_driver->signals.bits.allow_ips2,
1377 			ips_fw->signals.bits.ips1_commit,
1378 			ips_fw->signals.bits.ips2_commit,
1379 			ips_fw->signals.bits.ips1z8_commit,
1380 			ips_fw->rcg_entry_count,
1381 			ips_fw->ips1_entry_count,
1382 			ips_fw->ips2_entry_count,
1383 			ips_fw->ips1_z8ret_entry_count);
1384 
1385 		/* Note: register access has technically not resumed for DCN here, but we
1386 		 * need to be message PMFW through our standard register interface.
1387 		 */
1388 		dc_dmub_srv->needs_idle_wake = false;
1389 
1390 		if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
1391 		    (!dc->debug.optimize_ips_handshake ||
1392 		     ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) {
1393 			DC_LOG_IPS(
1394 				"wait IPS2 eval (ips1_commit=%u ips2_commit=%u )",
1395 				ips_fw->signals.bits.ips1_commit,
1396 				ips_fw->signals.bits.ips2_commit);
1397 
1398 			if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
1399 				udelay(dc->debug.ips2_eval_delay_us);
1400 
1401 			DC_LOG_IPS(
1402 				"exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
1403 				ips_fw->signals.bits.ips1_commit,
1404 				ips_fw->signals.bits.ips2_commit);
1405 
1406 			// Tell PMFW to exit low power state
1407 			dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1408 
1409 			if (ips_fw->signals.bits.ips2_commit) {
1410 
1411 				DC_LOG_IPS(
1412 					"wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
1413 					ips_fw->signals.bits.ips1_commit,
1414 					ips_fw->signals.bits.ips2_commit);
1415 
1416 				// Wait for IPS2 entry upper bound
1417 				udelay(dc->debug.ips2_entry_delay_us);
1418 
1419 				DC_LOG_IPS(
1420 					"exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)",
1421 					ips_fw->signals.bits.ips1_commit,
1422 					ips_fw->signals.bits.ips2_commit);
1423 
1424 				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1425 
1426 				DC_LOG_IPS(
1427 					"wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)",
1428 					ips_fw->signals.bits.ips1_commit,
1429 					ips_fw->signals.bits.ips2_commit);
1430 
1431 				while (ips_fw->signals.bits.ips2_commit)
1432 					udelay(1);
1433 
1434 				DC_LOG_IPS(
1435 					"wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)",
1436 					ips_fw->signals.bits.ips1_commit,
1437 					ips_fw->signals.bits.ips2_commit);
1438 
1439 				if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1440 					ASSERT(0);
1441 
1442 				DC_LOG_IPS(
1443 					"resync inbox1 (ips1_commit=%u ips2_commit=%u)",
1444 					ips_fw->signals.bits.ips1_commit,
1445 					ips_fw->signals.bits.ips2_commit);
1446 
1447 				dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub);
1448 			}
1449 		}
1450 
1451 		dc_dmub_srv_notify_idle(dc, false);
1452 		if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
1453 			DC_LOG_IPS(
1454 				"wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
1455 				ips_fw->signals.bits.ips1_commit,
1456 				ips_fw->signals.bits.ips2_commit,
1457 				ips_fw->signals.bits.ips1z8_commit);
1458 
1459 			while (ips_fw->signals.bits.ips1_commit)
1460 				udelay(1);
1461 
1462 			DC_LOG_IPS(
1463 				"wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
1464 				ips_fw->signals.bits.ips1_commit,
1465 				ips_fw->signals.bits.ips2_commit,
1466 				ips_fw->signals.bits.ips1z8_commit);
1467 		}
1468 	}
1469 
1470 	if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1471 		ASSERT(0);
1472 
1473 	DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)",
1474 		__func__,
1475 		rcg_exit_count,
1476 		ips1_exit_count,
1477 		ips2_exit_count,
1478 		ips1z8_exit_count);
1479 }
1480 
dc_dmub_srv_set_power_state(struct dc_dmub_srv * dc_dmub_srv,enum dc_acpi_cm_power_state power_state)1481 void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
1482 {
1483 	struct dmub_srv *dmub;
1484 
1485 	if (!dc_dmub_srv)
1486 		return;
1487 
1488 	dmub = dc_dmub_srv->dmub;
1489 
1490 	if (power_state == DC_ACPI_CM_POWER_STATE_D0)
1491 		dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
1492 	else
1493 		dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
1494 }
1495 
dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv * dc_dmub_srv,enum dc_acpi_cm_power_state power_state)1496 void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv,
1497 					  enum dc_acpi_cm_power_state power_state)
1498 {
1499 	union dmub_rb_cmd cmd;
1500 
1501 	if (!dc_dmub_srv)
1502 		return;
1503 
1504 	memset(&cmd, 0, sizeof(cmd));
1505 
1506 	cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT;
1507 	cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE;
1508 	cmd.idle_opt_set_dc_power_state.header.payload_bytes =
1509 		sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header);
1510 
1511 	if (power_state == DC_ACPI_CM_POWER_STATE_D0) {
1512 		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0;
1513 	} else if (power_state == DC_ACPI_CM_POWER_STATE_D3) {
1514 		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3;
1515 	} else {
1516 		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN;
1517 	}
1518 
1519 	dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1520 }
1521 
dc_dmub_srv_should_detect(struct dc_dmub_srv * dc_dmub_srv)1522 bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv)
1523 {
1524 	volatile const struct dmub_shared_state_ips_fw *ips_fw;
1525 	bool reallow_idle = false, should_detect = false;
1526 
1527 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1528 		return false;
1529 
1530 	if (dc_dmub_srv->dmub->shared_state &&
1531 	    dc_dmub_srv->dmub->meta_info.feature_bits.bits.shared_state_link_detection) {
1532 		ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1533 		return ips_fw->signals.bits.detection_required;
1534 	}
1535 
1536 	/* Detection may require reading scratch 0 - exit out of idle prior to the read. */
1537 	if (dc_dmub_srv->idle_allowed) {
1538 		dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, false);
1539 		reallow_idle = true;
1540 	}
1541 
1542 	should_detect = dmub_srv_should_detect(dc_dmub_srv->dmub);
1543 
1544 	/* Re-enter idle if we're not about to immediately redetect links. */
1545 	if (!should_detect && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1546 	    !dc_dmub_srv->ctx->dc->debug.disable_dmub_reallow_idle)
1547 		dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, true);
1548 
1549 	return should_detect;
1550 }
1551 
dc_dmub_srv_apply_idle_power_optimizations(const struct dc * dc,bool allow_idle)1552 void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle)
1553 {
1554 	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1555 
1556 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1557 		return;
1558 
1559 	allow_idle &= (!dc->debug.ips_disallow_entry);
1560 
1561 	if (dc_dmub_srv->idle_allowed == allow_idle)
1562 		return;
1563 
1564 	DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle);
1565 
1566 	/*
1567 	 * Entering a low power state requires a driver notification.
1568 	 * Powering up the hardware requires notifying PMFW and DMCUB.
1569 	 * Clearing the driver idle allow requires a DMCUB command.
1570 	 * DMCUB commands requires the DMCUB to be powered up and restored.
1571 	 */
1572 
1573 	if (!allow_idle) {
1574 		dc_dmub_srv->idle_exit_counter += 1;
1575 
1576 		dc_dmub_srv_exit_low_power_state(dc);
1577 		/*
1578 		 * Idle is considered fully exited only after the sequence above
1579 		 * fully completes. If we have a race of two threads exiting
1580 		 * at the same time then it's safe to perform the sequence
1581 		 * twice as long as we're not re-entering.
1582 		 *
1583 		 * Infinite command submission is avoided by using the
1584 		 * dm_execute_dmub_cmd submission instead of the "wake" helpers.
1585 		 */
1586 		dc_dmub_srv->idle_allowed = false;
1587 
1588 		dc_dmub_srv->idle_exit_counter -= 1;
1589 		if (dc_dmub_srv->idle_exit_counter < 0) {
1590 			ASSERT(0);
1591 			dc_dmub_srv->idle_exit_counter = 0;
1592 		}
1593 	} else {
1594 		/* Consider idle as notified prior to the actual submission to
1595 		 * prevent multiple entries. */
1596 		dc_dmub_srv->idle_allowed = true;
1597 
1598 		dc_dmub_srv_notify_idle(dc, allow_idle);
1599 	}
1600 }
1601 
dc_wake_and_execute_dmub_cmd(const struct dc_context * ctx,union dmub_rb_cmd * cmd,enum dm_dmub_wait_type wait_type)1602 bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
1603 				  enum dm_dmub_wait_type wait_type)
1604 {
1605 	return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type);
1606 }
1607 
dc_wake_and_execute_dmub_cmd_list(const struct dc_context * ctx,unsigned int count,union dmub_rb_cmd * cmd,enum dm_dmub_wait_type wait_type)1608 bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
1609 				       union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
1610 {
1611 	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1612 	bool result = false, reallow_idle = false;
1613 
1614 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1615 		return false;
1616 
1617 	if (count == 0)
1618 		return true;
1619 
1620 	if (dc_dmub_srv->idle_allowed) {
1621 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1622 		reallow_idle = true;
1623 	}
1624 
1625 	/*
1626 	 * These may have different implementations in DM, so ensure
1627 	 * that we guide it to the expected helper.
1628 	 */
1629 	if (count > 1)
1630 		result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type);
1631 	else
1632 		result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
1633 
1634 	if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1635 	    !ctx->dc->debug.disable_dmub_reallow_idle)
1636 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1637 
1638 	return result;
1639 }
1640 
dc_dmub_execute_gpint(const struct dc_context * ctx,enum dmub_gpint_command command_code,uint16_t param,uint32_t * response,enum dm_dmub_wait_type wait_type)1641 static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1642 				  uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1643 {
1644 	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1645 	const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30;
1646 	enum dmub_status status;
1647 
1648 	if (response)
1649 		*response = 0;
1650 
1651 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1652 		return false;
1653 
1654 	status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us);
1655 	if (status != DMUB_STATUS_OK) {
1656 		if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT)
1657 			return true;
1658 
1659 		return false;
1660 	}
1661 
1662 	if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
1663 		dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response);
1664 
1665 	return true;
1666 }
1667 
dc_wake_and_execute_gpint(const struct dc_context * ctx,enum dmub_gpint_command command_code,uint16_t param,uint32_t * response,enum dm_dmub_wait_type wait_type)1668 bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1669 			       uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1670 {
1671 	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1672 	bool result = false, reallow_idle = false;
1673 
1674 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1675 		return false;
1676 
1677 	if (dc_dmub_srv->idle_allowed) {
1678 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1679 		reallow_idle = true;
1680 	}
1681 
1682 	result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
1683 
1684 	if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1685 	    !ctx->dc->debug.disable_dmub_reallow_idle)
1686 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1687 
1688 	return result;
1689 }
1690 
dc_dmub_srv_rb_based_fams2_update_config(struct dc * dc,struct dc_state * context,bool enable)1691 static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc,
1692 		struct dc_state *context,
1693 		bool enable)
1694 {
1695 	uint8_t num_cmds = 1;
1696 	uint32_t i;
1697 	union dmub_rb_cmd cmd[2 * MAX_STREAMS + 1];
1698 	struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config;
1699 
1700 	memset(cmd, 0, sizeof(union dmub_rb_cmd) * (2 * MAX_STREAMS + 1));
1701 	/* fill in generic command header */
1702 	global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1703 	global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
1704 	global_cmd->header.payload_bytes =
1705 			sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
1706 
1707 	if (enable) {
1708 		/* send global configuration parameters */
1709 		memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config));
1710 
1711 		/* copy static feature configuration overrides */
1712 		global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
1713 		global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
1714 		global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
1715 
1716 		/* construct per-stream configs */
1717 		for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
1718 			struct dmub_rb_cmd_fams2 *stream_base_cmd = &cmd[i+1].fams2_config;
1719 			struct dmub_rb_cmd_fams2 *stream_sub_state_cmd = &cmd[i+1+context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config;
1720 
1721 			/* configure command header */
1722 			stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1723 			stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
1724 			stream_base_cmd->header.payload_bytes =
1725 					sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
1726 			stream_base_cmd->header.multi_cmd_pending = 1;
1727 			stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1728 			stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
1729 			stream_sub_state_cmd->header.payload_bytes =
1730 					sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
1731 			stream_sub_state_cmd->header.multi_cmd_pending = 1;
1732 			/* copy stream static base state */
1733 			memcpy(&stream_base_cmd->config,
1734 					&context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
1735 					sizeof(union dmub_cmd_fams2_config));
1736 			/* copy stream static sub state */
1737 			memcpy(&stream_sub_state_cmd->config,
1738 					&context->bw_ctx.bw.dcn.fams2_stream_sub_params[i],
1739 					sizeof(union dmub_cmd_fams2_config));
1740 		}
1741 	}
1742 
1743 	/* apply feature configuration based on current driver state */
1744 	global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
1745 	global_cmd->config.global.features.bits.enable = enable;
1746 
1747 	if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
1748 		/* set multi pending for global, and unset for last stream cmd */
1749 		global_cmd->header.multi_cmd_pending = 1;
1750 		cmd[2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
1751 		num_cmds += 2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
1752 	}
1753 
1754 	dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
1755 }
1756 
dc_dmub_srv_ib_based_fams2_update_config(struct dc * dc,struct dc_state * context,bool enable)1757 static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc,
1758 		struct dc_state *context,
1759 		bool enable)
1760 {
1761 	struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr;
1762 	union dmub_rb_cmd cmd;
1763 	uint32_t i;
1764 
1765 	memset(config, 0, sizeof(*config));
1766 	memset(&cmd, 0, sizeof(cmd));
1767 
1768 	cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1769 	cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG;
1770 
1771 	cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr;
1772 	cmd.ib_fams2_config.ib_data.size = sizeof(*config);
1773 
1774 	if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
1775 		/* copy static feature configuration overrides */
1776 		config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
1777 		config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
1778 		config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
1779 
1780 		/* send global configuration parameters */
1781 		memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config,
1782 			sizeof(struct dmub_cmd_fams2_global_config));
1783 
1784 		/* construct per-stream configs */
1785 		for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
1786 			/* copy stream static base state */
1787 			memcpy(&config->stream_v1[i].base,
1788 				&context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
1789 				sizeof(config->stream_v1[i].base));
1790 
1791 			/* copy stream static sub-state */
1792 			memcpy(&config->stream_v1[i].sub_state,
1793 				&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i],
1794 				sizeof(config->stream_v1[i].sub_state));
1795 		}
1796 	}
1797 
1798 	config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
1799 	config->global.features.bits.enable = enable;
1800 
1801 	dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1802 }
1803 
dc_dmub_srv_fams2_update_config(struct dc * dc,struct dc_state * context,bool enable)1804 void dc_dmub_srv_fams2_update_config(struct dc *dc,
1805 		struct dc_state *context,
1806 		bool enable)
1807 {
1808 	if (dc->debug.fams_version.major == 2)
1809 		dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable);
1810 	if (dc->debug.fams_version.major == 3)
1811 		dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable);
1812 }
1813 
dc_dmub_srv_fams2_drr_update(struct dc * dc,uint32_t tg_inst,uint32_t vtotal_min,uint32_t vtotal_max,uint32_t vtotal_mid,uint32_t vtotal_mid_frame_num,bool program_manual_trigger)1814 void dc_dmub_srv_fams2_drr_update(struct dc *dc,
1815 		uint32_t tg_inst,
1816 		uint32_t vtotal_min,
1817 		uint32_t vtotal_max,
1818 		uint32_t vtotal_mid,
1819 		uint32_t vtotal_mid_frame_num,
1820 		bool program_manual_trigger)
1821 {
1822 	union dmub_rb_cmd cmd = { 0 };
1823 
1824 	cmd.fams2_drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1825 	cmd.fams2_drr_update.header.sub_type = DMUB_CMD__FAMS2_DRR_UPDATE;
1826 	cmd.fams2_drr_update.dmub_optc_state_req.tg_inst = tg_inst;
1827 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
1828 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
1829 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid = vtotal_mid;
1830 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
1831 	cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
1832 
1833 	cmd.fams2_drr_update.header.payload_bytes =
1834 			sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
1835 
1836 	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1837 }
1838 
dc_dmub_srv_fams2_passthrough_flip(struct dc * dc,struct dc_state * state,struct dc_stream_state * stream,struct dc_surface_update * srf_updates,int surface_count)1839 void dc_dmub_srv_fams2_passthrough_flip(
1840 		struct dc *dc,
1841 		struct dc_state *state,
1842 		struct dc_stream_state *stream,
1843 		struct dc_surface_update *srf_updates,
1844 		int surface_count)
1845 {
1846 	int plane_index;
1847 	union dmub_rb_cmd cmds[MAX_PLANES];
1848 	struct dc_plane_address *address;
1849 	struct dc_plane_state *plane_state;
1850 	int num_cmds = 0;
1851 	struct dc_stream_status *stream_status = dc_stream_get_status(stream);
1852 
1853 	if (surface_count <= 0 || stream_status == NULL)
1854 		return;
1855 
1856 	memset(cmds, 0, sizeof(union dmub_rb_cmd) * MAX_PLANES);
1857 
1858 	/* build command for each surface update */
1859 	for (plane_index = 0; plane_index < surface_count; plane_index++) {
1860 		plane_state = srf_updates[plane_index].surface;
1861 		address = &plane_state->address;
1862 
1863 		/* skip if there is no address update for plane */
1864 		if (!srf_updates[plane_index].flip_addr)
1865 			continue;
1866 
1867 		/* build command header */
1868 		cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1869 		cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
1870 		cmds[num_cmds].fams2_flip.header.payload_bytes =
1871 				sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header);
1872 
1873 		/* for chaining multiple commands, all but last command should set to 1 */
1874 		cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
1875 
1876 		/* set topology info */
1877 		cmds[num_cmds].fams2_flip.flip_info.pipe_mask = dc_plane_get_pipe_mask(state, plane_state);
1878 		if (stream_status)
1879 			cmds[num_cmds].fams2_flip.flip_info.otg_inst = stream_status->primary_otg_inst;
1880 
1881 		cmds[num_cmds].fams2_flip.flip_info.config.bits.is_immediate = plane_state->flip_immediate;
1882 
1883 		/* build address info for command */
1884 		switch (address->type) {
1885 		case PLN_ADDR_TYPE_GRAPHICS:
1886 			if (address->grph.addr.quad_part == 0) {
1887 				BREAK_TO_DEBUGGER();
1888 				break;
1889 			}
1890 
1891 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
1892 					address->grph.meta_addr.low_part;
1893 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
1894 					(uint16_t)address->grph.meta_addr.high_part;
1895 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
1896 					address->grph.addr.low_part;
1897 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
1898 					(uint16_t)address->grph.addr.high_part;
1899 			break;
1900 		case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
1901 			if (address->video_progressive.luma_addr.quad_part == 0 ||
1902 				address->video_progressive.chroma_addr.quad_part == 0) {
1903 				BREAK_TO_DEBUGGER();
1904 				break;
1905 			}
1906 
1907 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
1908 					address->video_progressive.luma_meta_addr.low_part;
1909 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
1910 					(uint16_t)address->video_progressive.luma_meta_addr.high_part;
1911 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_lo =
1912 					address->video_progressive.chroma_meta_addr.low_part;
1913 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_hi =
1914 					(uint16_t)address->video_progressive.chroma_meta_addr.high_part;
1915 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
1916 					address->video_progressive.luma_addr.low_part;
1917 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
1918 					(uint16_t)address->video_progressive.luma_addr.high_part;
1919 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_lo =
1920 					address->video_progressive.chroma_addr.low_part;
1921 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_hi =
1922 					(uint16_t)address->video_progressive.chroma_addr.high_part;
1923 			break;
1924 		default:
1925 			// Should never be hit
1926 			BREAK_TO_DEBUGGER();
1927 			break;
1928 		}
1929 
1930 		num_cmds++;
1931 	}
1932 
1933 	if (num_cmds > 0)  {
1934 		cmds[num_cmds - 1].fams2_flip.header.multi_cmd_pending = 0;
1935 		dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT);
1936 	}
1937 }
1938 
1939 
dc_dmub_srv_ips_residency_cntl(const struct dc_context * ctx,uint8_t panel_inst,bool start_measurement)1940 bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement)
1941 {
1942 	union dmub_rb_cmd cmd;
1943 
1944 	memset(&cmd, 0, sizeof(cmd));
1945 
1946 	cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS;
1947 	cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL;
1948 	cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data);
1949 
1950 	// only panel_inst=0 is supported at the moment
1951 	cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst;
1952 	cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement;
1953 
1954 	if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1955 		return false;
1956 
1957 	return true;
1958 }
1959 
dc_dmub_srv_ips_query_residency_info(const struct dc_context * ctx,uint8_t panel_inst,struct dmub_ips_residency_info * driver_info,enum ips_residency_mode ips_mode)1960 bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info,
1961 					  enum ips_residency_mode ips_mode)
1962 {
1963 	union dmub_rb_cmd cmd;
1964 	uint32_t bytes = sizeof(struct dmub_ips_residency_info);
1965 
1966 	dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb);
1967 	memset(&cmd, 0, sizeof(cmd));
1968 
1969 	cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS;
1970 	cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO;
1971 	cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data);
1972 
1973 	cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
1974 	cmd.ips_query_residency_info.info_data.size = bytes;
1975 	cmd.ips_query_residency_info.info_data.panel_inst = panel_inst;
1976 	cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode;
1977 
1978 	if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) ||
1979 					  cmd.ips_query_residency_info.header.ret_status == 0)
1980 		return false;
1981 
1982 	// copy the result to the output since ret_status != 0 means the command returned data
1983 	memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
1984 
1985 	return true;
1986 }
1987 
dmub_lsdma_init(struct dc_dmub_srv * dc_dmub_srv)1988 bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
1989 {
1990 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
1991 	union dmub_rb_cmd cmd;
1992 	enum dm_dmub_wait_type wait_type;
1993 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
1994 	bool result;
1995 
1996 	memset(&cmd, 0, sizeof(cmd));
1997 
1998 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
1999 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG;
2000 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2001 
2002 	lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr;
2003 	lsdma_data->u.init_data.ring_size               = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size;
2004 
2005 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2006 
2007 	if (!result)
2008 		DC_ERROR("LSDMA Init failed in DMUB");
2009 
2010 	return result;
2011 }
2012 
dmub_lsdma_send_linear_copy_packet(struct dc_dmub_srv * dc_dmub_srv,uint64_t src_addr,uint64_t dst_addr,uint32_t count)2013 bool dmub_lsdma_send_linear_copy_packet(
2014 	struct dc_dmub_srv *dc_dmub_srv,
2015 	uint64_t src_addr,
2016 	uint64_t dst_addr,
2017 	uint32_t count)
2018 {
2019 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2020 	union dmub_rb_cmd cmd;
2021 	enum dm_dmub_wait_type wait_type;
2022 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2023 	bool result;
2024 
2025 	memset(&cmd, 0, sizeof(cmd));
2026 
2027 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2028 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY;
2029 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2030 
2031 	lsdma_data->u.linear_copy_data.count   = count - 1; // LSDMA controller expects bytes to copy -1
2032 	lsdma_data->u.linear_copy_data.src_lo  = src_addr & 0xFFFFFFFF;
2033 	lsdma_data->u.linear_copy_data.src_hi  = (src_addr >> 32) & 0xFFFFFFFF;
2034 	lsdma_data->u.linear_copy_data.dst_lo  = dst_addr & 0xFFFFFFFF;
2035 	lsdma_data->u.linear_copy_data.dst_hi  = (dst_addr >> 32) & 0xFFFFFFFF;
2036 
2037 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2038 
2039 	if (!result)
2040 		DC_ERROR("LSDMA Linear Copy failed in DMUB");
2041 
2042 	return result;
2043 }
2044 
dmub_lsdma_send_tiled_to_tiled_copy_command(struct dc_dmub_srv * dc_dmub_srv,struct lsdma_send_tiled_to_tiled_copy_command_params params)2045 bool dmub_lsdma_send_tiled_to_tiled_copy_command(
2046 	struct dc_dmub_srv *dc_dmub_srv,
2047 	struct lsdma_send_tiled_to_tiled_copy_command_params params)
2048 {
2049 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2050 	union dmub_rb_cmd cmd;
2051 	enum dm_dmub_wait_type wait_type;
2052 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2053 	bool result;
2054 
2055 	memset(&cmd, 0, sizeof(cmd));
2056 
2057 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2058 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY;
2059 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2060 
2061 	lsdma_data->u.tiled_copy_data.src_addr_lo      = params.src_addr & 0xFFFFFFFF;
2062 	lsdma_data->u.tiled_copy_data.src_addr_hi      = (params.src_addr >> 32) & 0xFFFFFFFF;
2063 	lsdma_data->u.tiled_copy_data.dst_addr_lo      = params.dst_addr & 0xFFFFFFFF;
2064 	lsdma_data->u.tiled_copy_data.dst_addr_hi      = (params.dst_addr >> 32) & 0xFFFFFFFF;
2065 	lsdma_data->u.tiled_copy_data.src_x            = params.src_x;
2066 	lsdma_data->u.tiled_copy_data.src_y            = params.src_y;
2067 	lsdma_data->u.tiled_copy_data.dst_x            = params.dst_x;
2068 	lsdma_data->u.tiled_copy_data.dst_y            = params.dst_y;
2069 	lsdma_data->u.tiled_copy_data.src_width        = params.src_width - 1; // LSDMA controller expects width -1
2070 	lsdma_data->u.tiled_copy_data.dst_width        = params.dst_width - 1; // LSDMA controller expects width -1
2071 	lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode;
2072 	lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode;
2073 	lsdma_data->u.tiled_copy_data.src_element_size = params.element_size;
2074 	lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size;
2075 	lsdma_data->u.tiled_copy_data.rect_x           = params.rect_x;
2076 	lsdma_data->u.tiled_copy_data.rect_y           = params.rect_y;
2077 	lsdma_data->u.tiled_copy_data.dcc              = params.dcc;
2078 	lsdma_data->u.tiled_copy_data.tmz              = params.tmz;
2079 	lsdma_data->u.tiled_copy_data.read_compress    = params.read_compress;
2080 	lsdma_data->u.tiled_copy_data.write_compress   = params.write_compress;
2081 	lsdma_data->u.tiled_copy_data.src_height       = params.src_height - 1; // LSDMA controller expects height -1
2082 	lsdma_data->u.tiled_copy_data.dst_height       = params.dst_height - 1; // LSDMA controller expects height -1
2083 	lsdma_data->u.tiled_copy_data.data_format      = params.data_format;
2084 	lsdma_data->u.tiled_copy_data.max_com          = params.max_com;
2085 	lsdma_data->u.tiled_copy_data.max_uncom        = params.max_uncom;
2086 
2087 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2088 
2089 	if (!result)
2090 		DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB");
2091 
2092 	return result;
2093 }
2094 
dmub_lsdma_send_pio_copy_command(struct dc_dmub_srv * dc_dmub_srv,uint64_t src_addr,uint64_t dst_addr,uint32_t byte_count,uint32_t overlap_disable)2095 bool dmub_lsdma_send_pio_copy_command(
2096 	struct dc_dmub_srv *dc_dmub_srv,
2097 	uint64_t src_addr,
2098 	uint64_t dst_addr,
2099 	uint32_t byte_count,
2100 	uint32_t overlap_disable)
2101 {
2102 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2103 	union dmub_rb_cmd cmd;
2104 	enum dm_dmub_wait_type wait_type;
2105 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2106 	bool result;
2107 
2108 	memset(&cmd, 0, sizeof(cmd));
2109 
2110 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2111 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY;
2112 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2113 
2114 	lsdma_data->u.pio_copy_data.packet.fields.byte_count      = byte_count;
2115 	lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable;
2116 	lsdma_data->u.pio_copy_data.src_lo                        = src_addr & 0xFFFFFFFF;
2117 	lsdma_data->u.pio_copy_data.src_hi                        = (src_addr >> 32) & 0xFFFFFFFF;
2118 	lsdma_data->u.pio_copy_data.dst_lo                        = dst_addr & 0xFFFFFFFF;
2119 	lsdma_data->u.pio_copy_data.dst_hi                        = (dst_addr >> 32) & 0xFFFFFFFF;
2120 
2121 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2122 
2123 	if (!result)
2124 		DC_ERROR("LSDMA PIO Copy failed in DMUB");
2125 
2126 	return result;
2127 }
2128 
dmub_lsdma_send_pio_constfill_command(struct dc_dmub_srv * dc_dmub_srv,uint64_t dst_addr,uint32_t byte_count,uint32_t data)2129 bool dmub_lsdma_send_pio_constfill_command(
2130 	struct dc_dmub_srv *dc_dmub_srv,
2131 	uint64_t dst_addr,
2132 	uint32_t byte_count,
2133 	uint32_t data)
2134 {
2135 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2136 	union dmub_rb_cmd cmd;
2137 	enum dm_dmub_wait_type wait_type;
2138 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2139 	bool result;
2140 
2141 	memset(&cmd, 0, sizeof(cmd));
2142 
2143 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2144 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL;
2145 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2146 
2147 	lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1;
2148 	lsdma_data->u.pio_constfill_data.packet.fields.byte_count    = byte_count;
2149 	lsdma_data->u.pio_constfill_data.dst_lo                      = dst_addr & 0xFFFFFFFF;
2150 	lsdma_data->u.pio_constfill_data.dst_hi                      = (dst_addr >> 32) & 0xFFFFFFFF;
2151 	lsdma_data->u.pio_constfill_data.data                        = data;
2152 
2153 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2154 
2155 	if (!result)
2156 		DC_ERROR("LSDMA PIO Constfill failed in DMUB");
2157 
2158 	return result;
2159 }
2160 
dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv * dc_dmub_srv,uint32_t reg_addr,uint32_t reg_data)2161 bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data)
2162 {
2163 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2164 	union dmub_rb_cmd cmd;
2165 	enum dm_dmub_wait_type wait_type;
2166 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2167 	bool result;
2168 
2169 	memset(&cmd, 0, sizeof(cmd));
2170 
2171 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2172 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE;
2173 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2174 
2175 	lsdma_data->u.reg_write_data.reg_addr = reg_addr;
2176 	lsdma_data->u.reg_write_data.reg_data = reg_data;
2177 
2178 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2179 
2180 	if (!result)
2181 		DC_ERROR("LSDMA Poll Reg failed in DMUB");
2182 
2183 	return result;
2184 }
2185 
dc_dmub_srv_release_hw(const struct dc * dc)2186 void dc_dmub_srv_release_hw(const struct dc *dc)
2187 {
2188 	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
2189 	union dmub_rb_cmd cmd = {0};
2190 
2191 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
2192 		return;
2193 
2194 	memset(&cmd, 0, sizeof(cmd));
2195 	cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
2196 	cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW;
2197 	cmd.idle_opt_notify_idle.header.payload_bytes =
2198 		sizeof(cmd.idle_opt_notify_idle) -
2199 		sizeof(cmd.idle_opt_notify_idle.header);
2200 
2201 	dm_execute_dmub_cmd(dc->ctx, &cmd,  DM_DMUB_WAIT_TYPE_WAIT);
2202 }
2203