1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include "../dmub_srv.h"
27 #include "dmub_dcn20.h"
28 #include "dmub_dcn21.h"
29 #include "dmub_cmd.h"
30 #include "dmub_dcn30.h"
31 #include "dmub_dcn301.h"
32 #include "dmub_dcn302.h"
33 #include "dmub_dcn303.h"
34 #include "dmub_dcn31.h"
35 #include "dmub_dcn314.h"
36 #include "dmub_dcn315.h"
37 #include "dmub_dcn316.h"
38 #include "dmub_dcn32.h"
39 #include "dmub_dcn35.h"
40 #include "dmub_dcn351.h"
41 #include "dmub_dcn36.h"
42 #include "dmub_dcn401.h"
43 #include "os_types.h"
44 /*
45 * Note: the DMUB service is standalone. No additional headers should be
46 * added below or above this line unless they reside within the DMUB
47 * folder.
48 */
49
50 /* Alignment for framebuffer memory. */
51 #define DMUB_FB_ALIGNMENT (1024 * 1024)
52
53 /* Stack size. */
54 #define DMUB_STACK_SIZE (128 * 1024)
55
56 /* Context size. */
57 #define DMUB_CONTEXT_SIZE (512 * 1024)
58
59 /* Mailbox size : Ring buffers are required for both inbox and outbox */
60 #define DMUB_MAILBOX_SIZE ((2 * DMUB_RB_SIZE))
61
62 /* Default state size if meta is absent. */
63 #define DMUB_FW_STATE_SIZE (64 * 1024)
64
65 /* Default scratch mem size. */
66 #define DMUB_SCRATCH_MEM_SIZE (1024)
67
68 /* Default indirect buffer size. */
69 #define DMUB_IB_MEM_SIZE (1280)
70
71 /* Default LSDMA ring buffer size. */
72 #define DMUB_LSDMA_RB_SIZE (64 * 1024)
73
74 /* Number of windows in use. */
75 #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
76 /* Base addresses. */
77
78 #define DMUB_CW0_BASE (0x60000000)
79 #define DMUB_CW1_BASE (0x61000000)
80 #define DMUB_CW3_BASE (0x63000000)
81 #define DMUB_CW4_BASE (0x64000000)
82 #define DMUB_CW5_BASE (0x65000000)
83 #define DMUB_CW6_BASE (0x66000000)
84
85 #define DMUB_REGION5_BASE (0xA0000000)
86 #define DMUB_REGION6_BASE (0xC0000000)
87
88 static struct dmub_srv_dcn32_regs dmub_srv_dcn32_regs;
89 static struct dmub_srv_dcn35_regs dmub_srv_dcn35_regs;
90
dmub_align(uint32_t val,uint32_t factor)91 static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
92 {
93 return (val + factor - 1) / factor * factor;
94 }
95
dmub_flush_buffer_mem(const struct dmub_fb * fb)96 void dmub_flush_buffer_mem(const struct dmub_fb *fb)
97 {
98 const uint8_t *base = (const uint8_t *)fb->cpu_addr;
99 uint8_t buf[64];
100 uint32_t pos, end;
101
102 /**
103 * Read 64-byte chunks since we don't want to store a
104 * large temporary buffer for this purpose.
105 */
106 end = fb->size / sizeof(buf) * sizeof(buf);
107
108 for (pos = 0; pos < end; pos += sizeof(buf))
109 dmub_memcpy(buf, base + pos, sizeof(buf));
110
111 /* Read anything leftover into the buffer. */
112 if (end < fb->size)
113 dmub_memcpy(buf, base + pos, fb->size - end);
114 }
115
116 static const struct dmub_fw_meta_info *
dmub_get_fw_meta_info_from_blob(const uint8_t * blob,uint32_t blob_size,uint32_t meta_offset)117 dmub_get_fw_meta_info_from_blob(const uint8_t *blob, uint32_t blob_size, uint32_t meta_offset)
118 {
119 const union dmub_fw_meta *meta;
120
121 if (!blob || !blob_size)
122 return NULL;
123
124 if (blob_size < sizeof(union dmub_fw_meta) + meta_offset)
125 return NULL;
126
127 meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset -
128 sizeof(union dmub_fw_meta));
129
130 if (meta->info.magic_value != DMUB_FW_META_MAGIC)
131 return NULL;
132
133 return &meta->info;
134 }
135
136 static const struct dmub_fw_meta_info *
dmub_get_fw_meta_info(const struct dmub_srv_region_params * params)137 dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
138 {
139 const struct dmub_fw_meta_info *info = NULL;
140
141 if (params->fw_bss_data && params->bss_data_size) {
142 /* Legacy metadata region. */
143 info = dmub_get_fw_meta_info_from_blob(params->fw_bss_data,
144 params->bss_data_size,
145 DMUB_FW_META_OFFSET);
146 } else if (params->fw_inst_const && params->inst_const_size) {
147 /* Combined metadata region - can be aligned to 16-bytes. */
148 uint32_t i;
149
150 for (i = 0; i < 16; ++i) {
151 info = dmub_get_fw_meta_info_from_blob(
152 params->fw_inst_const, params->inst_const_size, i);
153
154 if (info)
155 break;
156 }
157 }
158
159 return info;
160 }
161
dmub_srv_hw_setup(struct dmub_srv * dmub,enum dmub_asic asic)162 static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
163 {
164 struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
165
166 /* default to specifying now inbox type */
167 enum dmub_inbox_cmd_interface_type default_inbox_type = DMUB_CMD_INTERFACE_DEFAULT;
168
169 switch (asic) {
170 case DMUB_ASIC_DCN20:
171 case DMUB_ASIC_DCN21:
172 case DMUB_ASIC_DCN30:
173 case DMUB_ASIC_DCN301:
174 case DMUB_ASIC_DCN302:
175 case DMUB_ASIC_DCN303:
176 dmub->regs = &dmub_srv_dcn20_regs;
177
178 funcs->reset = dmub_dcn20_reset;
179 funcs->reset_release = dmub_dcn20_reset_release;
180 funcs->backdoor_load = dmub_dcn20_backdoor_load;
181 funcs->setup_windows = dmub_dcn20_setup_windows;
182 funcs->setup_mailbox = dmub_dcn20_setup_mailbox;
183 funcs->get_inbox1_wptr = dmub_dcn20_get_inbox1_wptr;
184 funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
185 funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
186 funcs->is_supported = dmub_dcn20_is_supported;
187 funcs->is_hw_init = dmub_dcn20_is_hw_init;
188 funcs->set_gpint = dmub_dcn20_set_gpint;
189 funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked;
190 funcs->get_gpint_response = dmub_dcn20_get_gpint_response;
191 funcs->get_fw_status = dmub_dcn20_get_fw_boot_status;
192 funcs->enable_dmub_boot_options = dmub_dcn20_enable_dmub_boot_options;
193 funcs->skip_dmub_panel_power_sequence = dmub_dcn20_skip_dmub_panel_power_sequence;
194 funcs->get_current_time = dmub_dcn20_get_current_time;
195
196 // Out mailbox register access functions for RN and above
197 funcs->setup_out_mailbox = dmub_dcn20_setup_out_mailbox;
198 funcs->get_outbox1_wptr = dmub_dcn20_get_outbox1_wptr;
199 funcs->set_outbox1_rptr = dmub_dcn20_set_outbox1_rptr;
200
201 //outbox0 call stacks
202 funcs->setup_outbox0 = dmub_dcn20_setup_outbox0;
203 funcs->get_outbox0_wptr = dmub_dcn20_get_outbox0_wptr;
204 funcs->set_outbox0_rptr = dmub_dcn20_set_outbox0_rptr;
205
206 funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data;
207
208 if (asic == DMUB_ASIC_DCN21)
209 dmub->regs = &dmub_srv_dcn21_regs;
210
211 if (asic == DMUB_ASIC_DCN30) {
212 dmub->regs = &dmub_srv_dcn30_regs;
213
214 funcs->backdoor_load = dmub_dcn30_backdoor_load;
215 funcs->setup_windows = dmub_dcn30_setup_windows;
216 }
217 if (asic == DMUB_ASIC_DCN301) {
218 dmub->regs = &dmub_srv_dcn301_regs;
219
220 funcs->backdoor_load = dmub_dcn30_backdoor_load;
221 funcs->setup_windows = dmub_dcn30_setup_windows;
222 }
223 if (asic == DMUB_ASIC_DCN302) {
224 dmub->regs = &dmub_srv_dcn302_regs;
225
226 funcs->backdoor_load = dmub_dcn30_backdoor_load;
227 funcs->setup_windows = dmub_dcn30_setup_windows;
228 }
229 if (asic == DMUB_ASIC_DCN303) {
230 dmub->regs = &dmub_srv_dcn303_regs;
231
232 funcs->backdoor_load = dmub_dcn30_backdoor_load;
233 funcs->setup_windows = dmub_dcn30_setup_windows;
234 }
235 break;
236
237 case DMUB_ASIC_DCN31:
238 case DMUB_ASIC_DCN31B:
239 case DMUB_ASIC_DCN314:
240 case DMUB_ASIC_DCN315:
241 case DMUB_ASIC_DCN316:
242 if (asic == DMUB_ASIC_DCN314) {
243 dmub->regs_dcn31 = &dmub_srv_dcn314_regs;
244 funcs->is_psrsu_supported = dmub_dcn314_is_psrsu_supported;
245 } else if (asic == DMUB_ASIC_DCN315) {
246 dmub->regs_dcn31 = &dmub_srv_dcn315_regs;
247 } else if (asic == DMUB_ASIC_DCN316) {
248 dmub->regs_dcn31 = &dmub_srv_dcn316_regs;
249 } else {
250 dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
251 funcs->is_psrsu_supported = dmub_dcn31_is_psrsu_supported;
252 }
253 funcs->reset = dmub_dcn31_reset;
254 funcs->reset_release = dmub_dcn31_reset_release;
255 funcs->backdoor_load = dmub_dcn31_backdoor_load;
256 funcs->setup_windows = dmub_dcn31_setup_windows;
257 funcs->setup_mailbox = dmub_dcn31_setup_mailbox;
258 funcs->get_inbox1_wptr = dmub_dcn31_get_inbox1_wptr;
259 funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr;
260 funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr;
261 funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox;
262 funcs->get_outbox1_wptr = dmub_dcn31_get_outbox1_wptr;
263 funcs->set_outbox1_rptr = dmub_dcn31_set_outbox1_rptr;
264 funcs->is_supported = dmub_dcn31_is_supported;
265 funcs->is_hw_init = dmub_dcn31_is_hw_init;
266 funcs->set_gpint = dmub_dcn31_set_gpint;
267 funcs->is_gpint_acked = dmub_dcn31_is_gpint_acked;
268 funcs->get_gpint_response = dmub_dcn31_get_gpint_response;
269 funcs->get_gpint_dataout = dmub_dcn31_get_gpint_dataout;
270 funcs->get_fw_status = dmub_dcn31_get_fw_boot_status;
271 funcs->get_fw_boot_option = dmub_dcn31_get_fw_boot_option;
272 funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options;
273 funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence;
274 //outbox0 call stacks
275 funcs->setup_outbox0 = dmub_dcn31_setup_outbox0;
276 funcs->get_outbox0_wptr = dmub_dcn31_get_outbox0_wptr;
277 funcs->set_outbox0_rptr = dmub_dcn31_set_outbox0_rptr;
278
279 funcs->get_diagnostic_data = dmub_dcn31_get_diagnostic_data;
280 funcs->should_detect = dmub_dcn31_should_detect;
281 funcs->get_current_time = dmub_dcn31_get_current_time;
282
283 break;
284
285 case DMUB_ASIC_DCN32:
286 case DMUB_ASIC_DCN321:
287 dmub->regs_dcn32 = &dmub_srv_dcn32_regs;
288 funcs->configure_dmub_in_system_memory = dmub_dcn32_configure_dmub_in_system_memory;
289 funcs->send_inbox0_cmd = dmub_dcn32_send_inbox0_cmd;
290 funcs->clear_inbox0_ack_register = dmub_dcn32_clear_inbox0_ack_register;
291 funcs->read_inbox0_ack_register = dmub_dcn32_read_inbox0_ack_register;
292 funcs->subvp_save_surf_addr = dmub_dcn32_save_surf_addr;
293 funcs->reset = dmub_dcn32_reset;
294 funcs->reset_release = dmub_dcn32_reset_release;
295 funcs->backdoor_load = dmub_dcn32_backdoor_load;
296 funcs->backdoor_load_zfb_mode = dmub_dcn32_backdoor_load_zfb_mode;
297 funcs->setup_windows = dmub_dcn32_setup_windows;
298 funcs->setup_mailbox = dmub_dcn32_setup_mailbox;
299 funcs->get_inbox1_wptr = dmub_dcn32_get_inbox1_wptr;
300 funcs->get_inbox1_rptr = dmub_dcn32_get_inbox1_rptr;
301 funcs->set_inbox1_wptr = dmub_dcn32_set_inbox1_wptr;
302 funcs->setup_out_mailbox = dmub_dcn32_setup_out_mailbox;
303 funcs->get_outbox1_wptr = dmub_dcn32_get_outbox1_wptr;
304 funcs->set_outbox1_rptr = dmub_dcn32_set_outbox1_rptr;
305 funcs->is_supported = dmub_dcn32_is_supported;
306 funcs->is_hw_init = dmub_dcn32_is_hw_init;
307 funcs->set_gpint = dmub_dcn32_set_gpint;
308 funcs->is_gpint_acked = dmub_dcn32_is_gpint_acked;
309 funcs->get_gpint_response = dmub_dcn32_get_gpint_response;
310 funcs->get_gpint_dataout = dmub_dcn32_get_gpint_dataout;
311 funcs->get_fw_status = dmub_dcn32_get_fw_boot_status;
312 funcs->enable_dmub_boot_options = dmub_dcn32_enable_dmub_boot_options;
313 funcs->skip_dmub_panel_power_sequence = dmub_dcn32_skip_dmub_panel_power_sequence;
314
315 /* outbox0 call stacks */
316 funcs->setup_outbox0 = dmub_dcn32_setup_outbox0;
317 funcs->get_outbox0_wptr = dmub_dcn32_get_outbox0_wptr;
318 funcs->set_outbox0_rptr = dmub_dcn32_set_outbox0_rptr;
319 funcs->get_current_time = dmub_dcn32_get_current_time;
320 funcs->get_diagnostic_data = dmub_dcn32_get_diagnostic_data;
321 funcs->init_reg_offsets = dmub_srv_dcn32_regs_init;
322
323 break;
324
325 case DMUB_ASIC_DCN35:
326 case DMUB_ASIC_DCN351:
327 case DMUB_ASIC_DCN36:
328 dmub->regs_dcn35 = &dmub_srv_dcn35_regs;
329 funcs->configure_dmub_in_system_memory = dmub_dcn35_configure_dmub_in_system_memory;
330 funcs->send_inbox0_cmd = dmub_dcn35_send_inbox0_cmd;
331 funcs->clear_inbox0_ack_register = dmub_dcn35_clear_inbox0_ack_register;
332 funcs->read_inbox0_ack_register = dmub_dcn35_read_inbox0_ack_register;
333 funcs->reset = dmub_dcn35_reset;
334 funcs->reset_release = dmub_dcn35_reset_release;
335 funcs->backdoor_load = dmub_dcn35_backdoor_load;
336 funcs->backdoor_load_zfb_mode = dmub_dcn35_backdoor_load_zfb_mode;
337 funcs->setup_windows = dmub_dcn35_setup_windows;
338 funcs->setup_mailbox = dmub_dcn35_setup_mailbox;
339 funcs->get_inbox1_wptr = dmub_dcn35_get_inbox1_wptr;
340 funcs->get_inbox1_rptr = dmub_dcn35_get_inbox1_rptr;
341 funcs->set_inbox1_wptr = dmub_dcn35_set_inbox1_wptr;
342 funcs->setup_out_mailbox = dmub_dcn35_setup_out_mailbox;
343 funcs->get_outbox1_wptr = dmub_dcn35_get_outbox1_wptr;
344 funcs->set_outbox1_rptr = dmub_dcn35_set_outbox1_rptr;
345 funcs->is_supported = dmub_dcn35_is_supported;
346 funcs->is_hw_init = dmub_dcn35_is_hw_init;
347 funcs->set_gpint = dmub_dcn35_set_gpint;
348 funcs->is_gpint_acked = dmub_dcn35_is_gpint_acked;
349 funcs->get_gpint_response = dmub_dcn35_get_gpint_response;
350 funcs->get_gpint_dataout = dmub_dcn35_get_gpint_dataout;
351 funcs->get_fw_status = dmub_dcn35_get_fw_boot_status;
352 funcs->get_fw_boot_option = dmub_dcn35_get_fw_boot_option;
353 funcs->enable_dmub_boot_options = dmub_dcn35_enable_dmub_boot_options;
354 funcs->skip_dmub_panel_power_sequence = dmub_dcn35_skip_dmub_panel_power_sequence;
355 //outbox0 call stacks
356 funcs->setup_outbox0 = dmub_dcn35_setup_outbox0;
357 funcs->get_outbox0_wptr = dmub_dcn35_get_outbox0_wptr;
358 funcs->set_outbox0_rptr = dmub_dcn35_set_outbox0_rptr;
359
360 funcs->get_current_time = dmub_dcn35_get_current_time;
361 funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data;
362
363 funcs->init_reg_offsets = dmub_srv_dcn35_regs_init;
364 if (asic == DMUB_ASIC_DCN351)
365 funcs->init_reg_offsets = dmub_srv_dcn351_regs_init;
366 if (asic == DMUB_ASIC_DCN36)
367 funcs->init_reg_offsets = dmub_srv_dcn36_regs_init;
368
369 funcs->is_hw_powered_up = dmub_dcn35_is_hw_powered_up;
370 funcs->should_detect = dmub_dcn35_should_detect;
371 break;
372
373 case DMUB_ASIC_DCN401:
374 dmub->regs_dcn401 = &dmub_srv_dcn401_regs;
375 funcs->configure_dmub_in_system_memory = dmub_dcn401_configure_dmub_in_system_memory;
376 funcs->send_inbox0_cmd = dmub_dcn401_send_inbox0_cmd;
377 funcs->clear_inbox0_ack_register = dmub_dcn401_clear_inbox0_ack_register;
378 funcs->read_inbox0_ack_register = dmub_dcn401_read_inbox0_ack_register;
379 funcs->reset = dmub_dcn401_reset;
380 funcs->reset_release = dmub_dcn401_reset_release;
381 funcs->backdoor_load = dmub_dcn401_backdoor_load;
382 funcs->backdoor_load_zfb_mode = dmub_dcn401_backdoor_load_zfb_mode;
383 funcs->setup_windows = dmub_dcn401_setup_windows;
384 funcs->setup_mailbox = dmub_dcn401_setup_mailbox;
385 funcs->get_inbox1_wptr = dmub_dcn401_get_inbox1_wptr;
386 funcs->get_inbox1_rptr = dmub_dcn401_get_inbox1_rptr;
387 funcs->set_inbox1_wptr = dmub_dcn401_set_inbox1_wptr;
388 funcs->setup_out_mailbox = dmub_dcn401_setup_out_mailbox;
389 funcs->get_outbox1_wptr = dmub_dcn401_get_outbox1_wptr;
390 funcs->set_outbox1_rptr = dmub_dcn401_set_outbox1_rptr;
391 funcs->is_supported = dmub_dcn401_is_supported;
392 funcs->is_hw_init = dmub_dcn401_is_hw_init;
393 funcs->set_gpint = dmub_dcn401_set_gpint;
394 funcs->is_gpint_acked = dmub_dcn401_is_gpint_acked;
395 funcs->get_gpint_response = dmub_dcn401_get_gpint_response;
396 funcs->get_gpint_dataout = dmub_dcn401_get_gpint_dataout;
397 funcs->get_fw_status = dmub_dcn401_get_fw_boot_status;
398 funcs->enable_dmub_boot_options = dmub_dcn401_enable_dmub_boot_options;
399 funcs->skip_dmub_panel_power_sequence = dmub_dcn401_skip_dmub_panel_power_sequence;
400 //outbox0 call stacks
401 funcs->setup_outbox0 = dmub_dcn401_setup_outbox0;
402 funcs->get_outbox0_wptr = dmub_dcn401_get_outbox0_wptr;
403 funcs->set_outbox0_rptr = dmub_dcn401_set_outbox0_rptr;
404
405 funcs->get_current_time = dmub_dcn401_get_current_time;
406 funcs->get_diagnostic_data = dmub_dcn401_get_diagnostic_data;
407
408 funcs->send_reg_inbox0_cmd_msg = dmub_dcn401_send_reg_inbox0_cmd_msg;
409 funcs->read_reg_inbox0_rsp_int_status = dmub_dcn401_read_reg_inbox0_rsp_int_status;
410 funcs->read_reg_inbox0_cmd_rsp = dmub_dcn401_read_reg_inbox0_cmd_rsp;
411 funcs->write_reg_inbox0_rsp_int_ack = dmub_dcn401_write_reg_inbox0_rsp_int_ack;
412 funcs->clear_reg_inbox0_rsp_int_ack = dmub_dcn401_clear_reg_inbox0_rsp_int_ack;
413 funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int;
414 default_inbox_type = DMUB_CMD_INTERFACE_FB; // still default to FB for now
415
416 funcs->write_reg_outbox0_rdy_int_ack = dmub_dcn401_write_reg_outbox0_rdy_int_ack;
417 funcs->read_reg_outbox0_msg = dmub_dcn401_read_reg_outbox0_msg;
418 funcs->write_reg_outbox0_rsp = dmub_dcn401_write_reg_outbox0_rsp;
419 funcs->read_reg_outbox0_rdy_int_status = dmub_dcn401_read_reg_outbox0_rdy_int_status;
420 funcs->read_reg_outbox0_rsp_int_status = dmub_dcn401_read_reg_outbox0_rsp_int_status;
421 funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int;
422 funcs->enable_reg_outbox0_rdy_int = dmub_dcn401_enable_reg_outbox0_rdy_int;
423 break;
424 default:
425 return false;
426 }
427
428 /* set default inbox type if not overriden */
429 if (dmub->inbox_type == DMUB_CMD_INTERFACE_DEFAULT) {
430 if (default_inbox_type != DMUB_CMD_INTERFACE_DEFAULT) {
431 /* use default inbox type as specified by DCN rev */
432 dmub->inbox_type = default_inbox_type;
433 } else if (funcs->send_reg_inbox0_cmd_msg) {
434 /* prefer reg as default inbox type if present */
435 dmub->inbox_type = DMUB_CMD_INTERFACE_REG;
436 } else {
437 /* use fb as fallback */
438 dmub->inbox_type = DMUB_CMD_INTERFACE_FB;
439 }
440 }
441
442 return true;
443 }
444
dmub_srv_create(struct dmub_srv * dmub,const struct dmub_srv_create_params * params)445 enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
446 const struct dmub_srv_create_params *params)
447 {
448 enum dmub_status status = DMUB_STATUS_OK;
449
450 dmub_memset(dmub, 0, sizeof(*dmub));
451
452 dmub->funcs = params->funcs;
453 dmub->user_ctx = params->user_ctx;
454 dmub->asic = params->asic;
455 dmub->fw_version = params->fw_version;
456 dmub->is_virtual = params->is_virtual;
457 dmub->inbox_type = params->inbox_type;
458
459 /* Setup asic dependent hardware funcs. */
460 if (!dmub_srv_hw_setup(dmub, params->asic)) {
461 status = DMUB_STATUS_INVALID;
462 goto cleanup;
463 }
464
465 /* Override (some) hardware funcs based on user params. */
466 if (params->hw_funcs) {
467 if (params->hw_funcs->emul_get_inbox1_rptr)
468 dmub->hw_funcs.emul_get_inbox1_rptr =
469 params->hw_funcs->emul_get_inbox1_rptr;
470
471 if (params->hw_funcs->emul_set_inbox1_wptr)
472 dmub->hw_funcs.emul_set_inbox1_wptr =
473 params->hw_funcs->emul_set_inbox1_wptr;
474
475 if (params->hw_funcs->is_supported)
476 dmub->hw_funcs.is_supported =
477 params->hw_funcs->is_supported;
478 }
479
480 /* Sanity checks for required hw func pointers. */
481 if (!dmub->hw_funcs.get_inbox1_rptr ||
482 !dmub->hw_funcs.set_inbox1_wptr) {
483 status = DMUB_STATUS_INVALID;
484 goto cleanup;
485 }
486
487 cleanup:
488 if (status == DMUB_STATUS_OK)
489 dmub->sw_init = true;
490 else
491 dmub_srv_destroy(dmub);
492
493 return status;
494 }
495
dmub_srv_destroy(struct dmub_srv * dmub)496 void dmub_srv_destroy(struct dmub_srv *dmub)
497 {
498 dmub_memset(dmub, 0, sizeof(*dmub));
499 }
500
dmub_srv_calc_regions_for_memory_type(const struct dmub_srv_region_params * params,struct dmub_srv_region_info * out,const uint32_t * window_sizes,enum dmub_window_memory_type memory_type)501 static uint32_t dmub_srv_calc_regions_for_memory_type(const struct dmub_srv_region_params *params,
502 struct dmub_srv_region_info *out,
503 const uint32_t *window_sizes,
504 enum dmub_window_memory_type memory_type)
505 {
506 uint32_t i, top = 0;
507
508 for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) {
509 if (params->window_memory_type[i] == memory_type) {
510 struct dmub_region *region = &out->regions[i];
511
512 region->base = dmub_align(top, 256);
513 region->top = region->base + dmub_align(window_sizes[i], 64);
514 top = region->top;
515 }
516 }
517
518 return dmub_align(top, 4096);
519 }
520
521 enum dmub_status
dmub_srv_calc_region_info(struct dmub_srv * dmub,const struct dmub_srv_region_params * params,struct dmub_srv_region_info * out)522 dmub_srv_calc_region_info(struct dmub_srv *dmub,
523 const struct dmub_srv_region_params *params,
524 struct dmub_srv_region_info *out)
525 {
526 const struct dmub_fw_meta_info *fw_info;
527 uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
528 uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
529 uint32_t shared_state_size = DMUB_FW_HEADER_SHARED_STATE_SIZE;
530 uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 };
531
532 if (!dmub->sw_init)
533 return DMUB_STATUS_INVALID;
534
535 memset(out, 0, sizeof(*out));
536 memset(window_sizes, 0, sizeof(window_sizes));
537
538 out->num_regions = DMUB_NUM_WINDOWS;
539
540 fw_info = dmub_get_fw_meta_info(params);
541
542 if (fw_info) {
543 memcpy(&dmub->meta_info, fw_info, sizeof(*fw_info));
544
545 fw_state_size = fw_info->fw_region_size;
546 trace_buffer_size = fw_info->trace_buffer_size;
547 shared_state_size = fw_info->shared_state_size;
548
549 /**
550 * If DM didn't fill in a version, then fill it in based on
551 * the firmware meta now that we have it.
552 *
553 * TODO: Make it easier for driver to extract this out to
554 * pass during creation.
555 */
556 if (dmub->fw_version == 0)
557 dmub->fw_version = fw_info->fw_version;
558 }
559
560 window_sizes[DMUB_WINDOW_0_INST_CONST] = params->inst_const_size;
561 window_sizes[DMUB_WINDOW_1_STACK] = DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
562 window_sizes[DMUB_WINDOW_2_BSS_DATA] = params->bss_data_size;
563 window_sizes[DMUB_WINDOW_3_VBIOS] = params->vbios_size;
564 window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE;
565 window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
566 window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
567 window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
568 window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE;
569 window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size);
570 window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE;
571
572 out->fb_size =
573 dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
574
575 out->gart_size =
576 dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_GART);
577
578 return DMUB_STATUS_OK;
579 }
580
dmub_srv_calc_mem_info(struct dmub_srv * dmub,const struct dmub_srv_memory_params * params,struct dmub_srv_fb_info * out)581 enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
582 const struct dmub_srv_memory_params *params,
583 struct dmub_srv_fb_info *out)
584 {
585 uint32_t i;
586
587 if (!dmub->sw_init)
588 return DMUB_STATUS_INVALID;
589
590 memset(out, 0, sizeof(*out));
591
592 if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
593 return DMUB_STATUS_INVALID;
594
595 for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
596 const struct dmub_region *reg =
597 ¶ms->region_info->regions[i];
598
599 if (params->window_memory_type[i] == DMUB_WINDOW_MEMORY_TYPE_GART) {
600 out->fb[i].cpu_addr = (uint8_t *)params->cpu_gart_addr + reg->base;
601 out->fb[i].gpu_addr = params->gpu_gart_addr + reg->base;
602 } else {
603 out->fb[i].cpu_addr = (uint8_t *)params->cpu_fb_addr + reg->base;
604 out->fb[i].gpu_addr = params->gpu_fb_addr + reg->base;
605 }
606
607 out->fb[i].size = reg->top - reg->base;
608 }
609
610 out->num_fb = DMUB_NUM_WINDOWS;
611
612 return DMUB_STATUS_OK;
613 }
614
dmub_srv_has_hw_support(struct dmub_srv * dmub,bool * is_supported)615 enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
616 bool *is_supported)
617 {
618 *is_supported = false;
619
620 if (!dmub->sw_init)
621 return DMUB_STATUS_INVALID;
622
623 if (dmub->hw_funcs.is_supported)
624 *is_supported = dmub->hw_funcs.is_supported(dmub);
625
626 return DMUB_STATUS_OK;
627 }
628
dmub_srv_is_hw_init(struct dmub_srv * dmub,bool * is_hw_init)629 enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init)
630 {
631 *is_hw_init = false;
632
633 if (!dmub->sw_init)
634 return DMUB_STATUS_INVALID;
635
636 if (!dmub->hw_init)
637 return DMUB_STATUS_OK;
638
639 if (dmub->hw_funcs.is_hw_init)
640 *is_hw_init = dmub->hw_funcs.is_hw_init(dmub);
641
642 return DMUB_STATUS_OK;
643 }
644
dmub_srv_hw_init(struct dmub_srv * dmub,const struct dmub_srv_hw_params * params)645 enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
646 const struct dmub_srv_hw_params *params)
647 {
648 struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST];
649 struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK];
650 struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA];
651 struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS];
652 struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
653 struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
654 struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
655 struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
656 struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM];
657 struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
658
659 struct dmub_rb_init_params rb_params, outbox0_rb_params;
660 struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6;
661 struct dmub_region inbox1, outbox1, outbox0;
662
663 if (!dmub->sw_init)
664 return DMUB_STATUS_INVALID;
665
666 if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
667 !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) {
668 ASSERT(0);
669 return DMUB_STATUS_INVALID;
670 }
671
672 dmub->fb_base = params->fb_base;
673 dmub->fb_offset = params->fb_offset;
674 dmub->psp_version = params->psp_version;
675
676 if (dmub->hw_funcs.reset)
677 dmub->hw_funcs.reset(dmub);
678
679 /* reset the cache of the last wptr as well now that hw is reset */
680 dmub->inbox1_last_wptr = 0;
681
682 cw0.offset.quad_part = inst_fb->gpu_addr;
683 cw0.region.base = DMUB_CW0_BASE;
684 cw0.region.top = cw0.region.base + inst_fb->size - 1;
685
686 cw1.offset.quad_part = stack_fb->gpu_addr;
687 cw1.region.base = DMUB_CW1_BASE;
688 cw1.region.top = cw1.region.base + stack_fb->size - 1;
689
690 if (params->fw_in_system_memory && dmub->hw_funcs.configure_dmub_in_system_memory)
691 dmub->hw_funcs.configure_dmub_in_system_memory(dmub);
692
693 if (params->load_inst_const && dmub->hw_funcs.backdoor_load) {
694 /**
695 * Read back all the instruction memory so we don't hang the
696 * DMCUB when backdoor loading if the write from x86 hasn't been
697 * flushed yet. This only occurs in backdoor loading.
698 */
699 if (params->mem_access_type == DMUB_MEMORY_ACCESS_CPU)
700 dmub_flush_buffer_mem(inst_fb);
701
702 if (params->fw_in_system_memory && dmub->hw_funcs.backdoor_load_zfb_mode)
703 dmub->hw_funcs.backdoor_load_zfb_mode(dmub, &cw0, &cw1);
704 else
705 dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
706 }
707
708 cw2.offset.quad_part = data_fb->gpu_addr;
709 cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
710 cw2.region.top = cw2.region.base + data_fb->size;
711
712 cw3.offset.quad_part = bios_fb->gpu_addr;
713 cw3.region.base = DMUB_CW3_BASE;
714 cw3.region.top = cw3.region.base + bios_fb->size;
715
716 cw4.offset.quad_part = mail_fb->gpu_addr;
717 cw4.region.base = DMUB_CW4_BASE;
718 cw4.region.top = cw4.region.base + mail_fb->size;
719
720 /**
721 * Doubled the mailbox region to accomodate inbox and outbox.
722 * Note: Currently, currently total mailbox size is 16KB. It is split
723 * equally into 8KB between inbox and outbox. If this config is
724 * changed, then uncached base address configuration of outbox1
725 * has to be updated in funcs->setup_out_mailbox.
726 */
727 inbox1.base = cw4.region.base;
728 inbox1.top = cw4.region.base + DMUB_RB_SIZE;
729 outbox1.base = inbox1.top;
730 outbox1.top = inbox1.top + DMUB_RB_SIZE;
731
732 cw5.offset.quad_part = tracebuff_fb->gpu_addr;
733 cw5.region.base = DMUB_CW5_BASE;
734 cw5.region.top = cw5.region.base + tracebuff_fb->size;
735
736 outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET;
737 outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET;
738
739 cw6.offset.quad_part = fw_state_fb->gpu_addr;
740 cw6.region.base = DMUB_CW6_BASE;
741 cw6.region.top = cw6.region.base + fw_state_fb->size;
742
743 dmub->fw_state = (void *)((uintptr_t)(fw_state_fb->cpu_addr) + DMUB_DEBUG_FW_STATE_OFFSET);
744
745 region6.offset.quad_part = shared_state_fb->gpu_addr;
746 region6.region.base = DMUB_CW6_BASE;
747 region6.region.top = region6.region.base + shared_state_fb->size;
748
749 dmub->shared_state = shared_state_fb->cpu_addr;
750
751 dmub->scratch_mem_fb = *scratch_mem_fb;
752
753 dmub->ib_mem_gart = *ib_mem_gart;
754
755 if (dmub->hw_funcs.setup_windows)
756 dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, ®ion6);
757
758 if (dmub->hw_funcs.setup_outbox0)
759 dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
760
761 if (dmub->hw_funcs.setup_mailbox)
762 dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
763 if (dmub->hw_funcs.setup_out_mailbox)
764 dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1);
765 if (dmub->hw_funcs.enable_reg_inbox0_rsp_int)
766 dmub->hw_funcs.enable_reg_inbox0_rsp_int(dmub, true);
767 if (dmub->hw_funcs.enable_reg_outbox0_rdy_int)
768 dmub->hw_funcs.enable_reg_outbox0_rdy_int(dmub, true);
769
770 dmub_memset(&rb_params, 0, sizeof(rb_params));
771 rb_params.ctx = dmub;
772 rb_params.base_address = mail_fb->cpu_addr;
773 rb_params.capacity = DMUB_RB_SIZE;
774 dmub_rb_init(&dmub->inbox1.rb, &rb_params);
775
776 // Initialize outbox1 ring buffer
777 rb_params.ctx = dmub;
778 rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE);
779 rb_params.capacity = DMUB_RB_SIZE;
780 dmub_rb_init(&dmub->outbox1_rb, &rb_params);
781
782 dmub_memset(&outbox0_rb_params, 0, sizeof(outbox0_rb_params));
783 outbox0_rb_params.ctx = dmub;
784 outbox0_rb_params.base_address = (void *)((uintptr_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET);
785 outbox0_rb_params.capacity = tracebuff_fb->size - dmub_align(TRACE_BUFFER_ENTRY_OFFSET, 64);
786 dmub_rb_init(&dmub->outbox0_rb, &outbox0_rb_params);
787
788 /* Report to DMUB what features are supported by current driver */
789 if (dmub->hw_funcs.enable_dmub_boot_options)
790 dmub->hw_funcs.enable_dmub_boot_options(dmub, params);
791
792 if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual)
793 dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub,
794 params->skip_panel_power_sequence);
795
796 if (dmub->hw_funcs.reset_release && !dmub->is_virtual)
797 dmub->hw_funcs.reset_release(dmub);
798
799 dmub->hw_init = true;
800 dmub->power_state = DMUB_POWER_STATE_D0;
801
802 return DMUB_STATUS_OK;
803 }
804
dmub_srv_hw_reset(struct dmub_srv * dmub)805 enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
806 {
807 if (!dmub->sw_init)
808 return DMUB_STATUS_INVALID;
809
810 if (dmub->hw_funcs.reset)
811 dmub->hw_funcs.reset(dmub);
812
813 /* mailboxes have been reset in hw, so reset the sw state as well */
814 dmub->inbox1_last_wptr = 0;
815 dmub->inbox1.rb.wrpt = 0;
816 dmub->inbox1.rb.rptr = 0;
817 dmub->inbox1.num_reported = 0;
818 dmub->inbox1.num_submitted = 0;
819 dmub->reg_inbox0.num_reported = 0;
820 dmub->reg_inbox0.num_submitted = 0;
821 dmub->reg_inbox0.is_pending = 0;
822 dmub->outbox0_rb.wrpt = 0;
823 dmub->outbox0_rb.rptr = 0;
824 dmub->outbox1_rb.wrpt = 0;
825 dmub->outbox1_rb.rptr = 0;
826
827 dmub->hw_init = false;
828
829 return DMUB_STATUS_OK;
830 }
831
dmub_srv_fb_cmd_queue(struct dmub_srv * dmub,const union dmub_rb_cmd * cmd)832 enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
833 const union dmub_rb_cmd *cmd)
834 {
835 if (!dmub->hw_init)
836 return DMUB_STATUS_INVALID;
837
838 if (dmub->power_state != DMUB_POWER_STATE_D0)
839 return DMUB_STATUS_POWER_STATE_D3;
840
841 if (dmub->inbox1.rb.rptr > dmub->inbox1.rb.capacity ||
842 dmub->inbox1.rb.wrpt > dmub->inbox1.rb.capacity) {
843 return DMUB_STATUS_HW_FAILURE;
844 }
845
846 if (dmub_rb_push_front(&dmub->inbox1.rb, cmd)) {
847 dmub->inbox1.num_submitted++;
848 return DMUB_STATUS_OK;
849 }
850
851 return DMUB_STATUS_QUEUE_FULL;
852 }
853
dmub_srv_fb_cmd_execute(struct dmub_srv * dmub)854 enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub)
855 {
856 struct dmub_rb flush_rb;
857
858 if (!dmub->hw_init)
859 return DMUB_STATUS_INVALID;
860
861 if (dmub->power_state != DMUB_POWER_STATE_D0)
862 return DMUB_STATUS_POWER_STATE_D3;
863
864 /**
865 * Read back all the queued commands to ensure that they've
866 * been flushed to framebuffer memory. Otherwise DMCUB might
867 * read back stale, fully invalid or partially invalid data.
868 */
869 flush_rb = dmub->inbox1.rb;
870 flush_rb.rptr = dmub->inbox1_last_wptr;
871 dmub_rb_flush_pending(&flush_rb);
872
873 dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1.rb.wrpt);
874
875 dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
876
877 return DMUB_STATUS_OK;
878 }
879
dmub_srv_is_hw_pwr_up(struct dmub_srv * dmub)880 bool dmub_srv_is_hw_pwr_up(struct dmub_srv *dmub)
881 {
882 if (!dmub->hw_funcs.is_hw_powered_up)
883 return true;
884
885 if (!dmub->hw_funcs.is_hw_powered_up(dmub))
886 return false;
887
888 return true;
889 }
890
dmub_srv_wait_for_hw_pwr_up(struct dmub_srv * dmub,uint32_t timeout_us)891 enum dmub_status dmub_srv_wait_for_hw_pwr_up(struct dmub_srv *dmub,
892 uint32_t timeout_us)
893 {
894 uint32_t i;
895
896 if (!dmub->hw_init)
897 return DMUB_STATUS_INVALID;
898
899 for (i = 0; i <= timeout_us; i += 100) {
900 if (dmub_srv_is_hw_pwr_up(dmub))
901 return DMUB_STATUS_OK;
902
903 udelay(100);
904 }
905
906 return DMUB_STATUS_TIMEOUT;
907 }
908
dmub_srv_wait_for_auto_load(struct dmub_srv * dmub,uint32_t timeout_us)909 enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
910 uint32_t timeout_us)
911 {
912 uint32_t i;
913 bool hw_on = true;
914
915 if (!dmub->hw_init)
916 return DMUB_STATUS_INVALID;
917
918 for (i = 0; i <= timeout_us; i += 100) {
919 union dmub_fw_boot_status status = dmub->hw_funcs.get_fw_status(dmub);
920
921 if (dmub->hw_funcs.is_hw_powered_up)
922 hw_on = dmub->hw_funcs.is_hw_powered_up(dmub);
923
924 if (status.bits.dal_fw && status.bits.mailbox_rdy && hw_on)
925 return DMUB_STATUS_OK;
926
927 udelay(100);
928 }
929
930 return DMUB_STATUS_TIMEOUT;
931 }
932
dmub_srv_update_reg_inbox0_status(struct dmub_srv * dmub)933 static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub)
934 {
935 if (dmub->reg_inbox0.is_pending) {
936 dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
937 !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
938
939 if (!dmub->reg_inbox0.is_pending) {
940 /* ack the rsp interrupt */
941 if (dmub->hw_funcs.write_reg_inbox0_rsp_int_ack)
942 dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
943
944 /* only update the reported count if commands aren't being batched */
945 if (!dmub->reg_inbox0.is_pending && !dmub->reg_inbox0.is_multi_pending) {
946 dmub->reg_inbox0.num_reported = dmub->reg_inbox0.num_submitted;
947 }
948 }
949 }
950 }
951
dmub_srv_wait_for_pending(struct dmub_srv * dmub,uint32_t timeout_us)952 enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
953 uint32_t timeout_us)
954 {
955 uint32_t i;
956 const uint32_t polling_interval_us = 1;
957 struct dmub_srv_inbox scratch_reg_inbox0 = dmub->reg_inbox0;
958 struct dmub_srv_inbox scratch_inbox1 = dmub->inbox1;
959 const volatile struct dmub_srv_inbox *reg_inbox0 = &dmub->reg_inbox0;
960 const volatile struct dmub_srv_inbox *inbox1 = &dmub->inbox1;
961
962 if (!dmub->hw_init ||
963 !dmub->hw_funcs.get_inbox1_wptr)
964 return DMUB_STATUS_INVALID;
965
966 for (i = 0; i <= timeout_us; i += polling_interval_us) {
967 scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
968 scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
969
970 scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending &&
971 dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
972 !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
973
974 if (scratch_inbox1.rb.rptr > dmub->inbox1.rb.capacity)
975 return DMUB_STATUS_HW_FAILURE;
976
977 /* check current HW state first, but use command submission vs reported as a fallback */
978 if ((dmub_rb_empty(&scratch_inbox1.rb) ||
979 inbox1->num_reported >= scratch_inbox1.num_submitted) &&
980 (!scratch_reg_inbox0.is_pending ||
981 reg_inbox0->num_reported >= scratch_reg_inbox0.num_submitted))
982 return DMUB_STATUS_OK;
983
984 udelay(polling_interval_us);
985 }
986
987 return DMUB_STATUS_TIMEOUT;
988 }
989
dmub_srv_wait_for_idle(struct dmub_srv * dmub,uint32_t timeout_us)990 enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
991 uint32_t timeout_us)
992 {
993 enum dmub_status status;
994 uint32_t i;
995 const uint32_t polling_interval_us = 1;
996
997 if (!dmub->hw_init)
998 return DMUB_STATUS_INVALID;
999
1000 for (i = 0; i < timeout_us; i += polling_interval_us) {
1001 status = dmub_srv_update_inbox_status(dmub);
1002
1003 if (status != DMUB_STATUS_OK)
1004 return status;
1005
1006 /* check for idle */
1007 if (dmub_rb_empty(&dmub->inbox1.rb) && !dmub->reg_inbox0.is_pending)
1008 return DMUB_STATUS_OK;
1009
1010 udelay(polling_interval_us);
1011 }
1012
1013 return DMUB_STATUS_TIMEOUT;
1014 }
1015
1016 enum dmub_status
dmub_srv_send_gpint_command(struct dmub_srv * dmub,enum dmub_gpint_command command_code,uint16_t param,uint32_t timeout_us)1017 dmub_srv_send_gpint_command(struct dmub_srv *dmub,
1018 enum dmub_gpint_command command_code,
1019 uint16_t param, uint32_t timeout_us)
1020 {
1021 union dmub_gpint_data_register reg;
1022 uint32_t i;
1023
1024 if (!dmub->sw_init)
1025 return DMUB_STATUS_INVALID;
1026
1027 if (!dmub->hw_funcs.set_gpint)
1028 return DMUB_STATUS_INVALID;
1029
1030 if (!dmub->hw_funcs.is_gpint_acked)
1031 return DMUB_STATUS_INVALID;
1032
1033 reg.bits.status = 1;
1034 reg.bits.command_code = command_code;
1035 reg.bits.param = param;
1036
1037 dmub->hw_funcs.set_gpint(dmub, reg);
1038
1039 for (i = 0; i < timeout_us; ++i) {
1040 udelay(1);
1041
1042 if (dmub->hw_funcs.is_gpint_acked(dmub, reg))
1043 return DMUB_STATUS_OK;
1044 }
1045
1046 return DMUB_STATUS_TIMEOUT;
1047 }
1048
dmub_srv_get_gpint_response(struct dmub_srv * dmub,uint32_t * response)1049 enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
1050 uint32_t *response)
1051 {
1052 *response = 0;
1053
1054 if (!dmub->sw_init)
1055 return DMUB_STATUS_INVALID;
1056
1057 if (!dmub->hw_funcs.get_gpint_response)
1058 return DMUB_STATUS_INVALID;
1059
1060 *response = dmub->hw_funcs.get_gpint_response(dmub);
1061
1062 return DMUB_STATUS_OK;
1063 }
1064
dmub_srv_get_gpint_dataout(struct dmub_srv * dmub,uint32_t * dataout)1065 enum dmub_status dmub_srv_get_gpint_dataout(struct dmub_srv *dmub,
1066 uint32_t *dataout)
1067 {
1068 *dataout = 0;
1069
1070 if (!dmub->sw_init)
1071 return DMUB_STATUS_INVALID;
1072
1073 if (!dmub->hw_funcs.get_gpint_dataout)
1074 return DMUB_STATUS_INVALID;
1075
1076 *dataout = dmub->hw_funcs.get_gpint_dataout(dmub);
1077
1078 return DMUB_STATUS_OK;
1079 }
1080
dmub_srv_get_fw_boot_status(struct dmub_srv * dmub,union dmub_fw_boot_status * status)1081 enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
1082 union dmub_fw_boot_status *status)
1083 {
1084 status->all = 0;
1085
1086 if (!dmub->sw_init)
1087 return DMUB_STATUS_INVALID;
1088
1089 if (dmub->hw_funcs.get_fw_status)
1090 *status = dmub->hw_funcs.get_fw_status(dmub);
1091
1092 return DMUB_STATUS_OK;
1093 }
1094
dmub_srv_get_fw_boot_option(struct dmub_srv * dmub,union dmub_fw_boot_options * option)1095 enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
1096 union dmub_fw_boot_options *option)
1097 {
1098 option->all = 0;
1099
1100 if (!dmub->sw_init)
1101 return DMUB_STATUS_INVALID;
1102
1103 if (dmub->hw_funcs.get_fw_boot_option)
1104 *option = dmub->hw_funcs.get_fw_boot_option(dmub);
1105
1106 return DMUB_STATUS_OK;
1107 }
1108
dmub_srv_set_skip_panel_power_sequence(struct dmub_srv * dmub,bool skip)1109 enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
1110 bool skip)
1111 {
1112 if (!dmub->sw_init)
1113 return DMUB_STATUS_INVALID;
1114
1115 if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual)
1116 dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, skip);
1117
1118 return DMUB_STATUS_OK;
1119 }
1120
dmub_rb_out_trace_buffer_front(struct dmub_rb * rb,void * entry)1121 static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb,
1122 void *entry)
1123 {
1124 const uint64_t *src = (const uint64_t *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
1125 uint64_t *dst = (uint64_t *)entry;
1126 uint8_t i;
1127 uint8_t loop_count;
1128
1129 if (rb->rptr == rb->wrpt)
1130 return false;
1131
1132 loop_count = sizeof(struct dmcub_trace_buf_entry) / sizeof(uint64_t);
1133 // copying data
1134 for (i = 0; i < loop_count; i++)
1135 *dst++ = *src++;
1136
1137 rb->rptr += sizeof(struct dmcub_trace_buf_entry);
1138
1139 rb->rptr %= rb->capacity;
1140
1141 return true;
1142 }
1143
dmub_srv_get_outbox0_msg(struct dmub_srv * dmub,struct dmcub_trace_buf_entry * entry)1144 bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry)
1145 {
1146 dmub->outbox0_rb.wrpt = dmub->hw_funcs.get_outbox0_wptr(dmub);
1147
1148 return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry);
1149 }
1150
dmub_srv_get_diagnostic_data(struct dmub_srv * dmub)1151 bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub)
1152 {
1153 if (!dmub || !dmub->hw_funcs.get_diagnostic_data)
1154 return false;
1155 dmub->hw_funcs.get_diagnostic_data(dmub);
1156 return true;
1157 }
1158
dmub_srv_should_detect(struct dmub_srv * dmub)1159 bool dmub_srv_should_detect(struct dmub_srv *dmub)
1160 {
1161 if (!dmub->hw_init || !dmub->hw_funcs.should_detect)
1162 return false;
1163
1164 return dmub->hw_funcs.should_detect(dmub);
1165 }
1166
dmub_srv_clear_inbox0_ack(struct dmub_srv * dmub)1167 enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub)
1168 {
1169 if (!dmub->hw_init || !dmub->hw_funcs.clear_inbox0_ack_register)
1170 return DMUB_STATUS_INVALID;
1171
1172 dmub->hw_funcs.clear_inbox0_ack_register(dmub);
1173 return DMUB_STATUS_OK;
1174 }
1175
dmub_srv_wait_for_inbox0_ack(struct dmub_srv * dmub,uint32_t timeout_us)1176 enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us)
1177 {
1178 uint32_t i = 0;
1179 uint32_t ack = 0;
1180
1181 if (!dmub->hw_init || !dmub->hw_funcs.read_inbox0_ack_register)
1182 return DMUB_STATUS_INVALID;
1183
1184 for (i = 0; i <= timeout_us; i++) {
1185 ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
1186 if (ack)
1187 return DMUB_STATUS_OK;
1188 udelay(1);
1189 }
1190 return DMUB_STATUS_TIMEOUT;
1191 }
1192
dmub_srv_send_inbox0_cmd(struct dmub_srv * dmub,union dmub_inbox0_data_register data)1193 enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub,
1194 union dmub_inbox0_data_register data)
1195 {
1196 if (!dmub->hw_init || !dmub->hw_funcs.send_inbox0_cmd)
1197 return DMUB_STATUS_INVALID;
1198
1199 dmub->hw_funcs.send_inbox0_cmd(dmub, data);
1200 return DMUB_STATUS_OK;
1201 }
1202
dmub_srv_subvp_save_surf_addr(struct dmub_srv * dmub,const struct dc_plane_address * addr,uint8_t subvp_index)1203 void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index)
1204 {
1205 if (dmub->hw_funcs.subvp_save_surf_addr) {
1206 dmub->hw_funcs.subvp_save_surf_addr(dmub,
1207 addr,
1208 subvp_index);
1209 }
1210 }
1211
dmub_srv_set_power_state(struct dmub_srv * dmub,enum dmub_srv_power_state_type dmub_srv_power_state)1212 void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
1213 {
1214 if (!dmub || !dmub->hw_init)
1215 return;
1216
1217 dmub->power_state = dmub_srv_power_state;
1218 }
1219
dmub_srv_reg_cmd_execute(struct dmub_srv * dmub,union dmub_rb_cmd * cmd)1220 enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd)
1221 {
1222 uint32_t num_pending = 0;
1223
1224 if (!dmub->hw_init)
1225 return DMUB_STATUS_INVALID;
1226
1227 if (dmub->power_state != DMUB_POWER_STATE_D0)
1228 return DMUB_STATUS_POWER_STATE_D3;
1229
1230 if (!dmub->hw_funcs.send_reg_inbox0_cmd_msg ||
1231 !dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack)
1232 return DMUB_STATUS_INVALID;
1233
1234 if (dmub->reg_inbox0.num_submitted >= dmub->reg_inbox0.num_reported)
1235 num_pending = dmub->reg_inbox0.num_submitted - dmub->reg_inbox0.num_reported;
1236 else
1237 /* num_submitted wrapped */
1238 num_pending = DMUB_REG_INBOX0_RB_MAX_ENTRY -
1239 (dmub->reg_inbox0.num_reported - dmub->reg_inbox0.num_submitted);
1240
1241 if (num_pending >= DMUB_REG_INBOX0_RB_MAX_ENTRY)
1242 return DMUB_STATUS_QUEUE_FULL;
1243
1244 /* clear last rsp ack and send message */
1245 dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack(dmub);
1246 dmub->hw_funcs.send_reg_inbox0_cmd_msg(dmub, cmd);
1247
1248 dmub->reg_inbox0.num_submitted++;
1249 dmub->reg_inbox0.is_pending = true;
1250 dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending;
1251
1252 return DMUB_STATUS_OK;
1253 }
1254
dmub_srv_cmd_get_response(struct dmub_srv * dmub,union dmub_rb_cmd * cmd_rsp)1255 void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
1256 union dmub_rb_cmd *cmd_rsp)
1257 {
1258 if (dmub) {
1259 if (dmub->inbox_type == DMUB_CMD_INTERFACE_REG &&
1260 dmub->hw_funcs.read_reg_inbox0_cmd_rsp) {
1261 dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd_rsp);
1262 } else {
1263 dmub_rb_get_return_data(&dmub->inbox1.rb, cmd_rsp);
1264 }
1265 }
1266 }
1267
dmub_srv_sync_reg_inbox0(struct dmub_srv * dmub)1268 static enum dmub_status dmub_srv_sync_reg_inbox0(struct dmub_srv *dmub)
1269 {
1270 if (!dmub || !dmub->sw_init)
1271 return DMUB_STATUS_INVALID;
1272
1273 dmub->reg_inbox0.is_pending = 0;
1274 dmub->reg_inbox0.is_multi_pending = 0;
1275
1276 return DMUB_STATUS_OK;
1277 }
1278
dmub_srv_sync_inbox1(struct dmub_srv * dmub)1279 static enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
1280 {
1281 if (!dmub->sw_init)
1282 return DMUB_STATUS_INVALID;
1283
1284 if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
1285 uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
1286 uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
1287
1288 if (rptr > dmub->inbox1.rb.capacity || wptr > dmub->inbox1.rb.capacity) {
1289 return DMUB_STATUS_HW_FAILURE;
1290 } else {
1291 dmub->inbox1.rb.rptr = rptr;
1292 dmub->inbox1.rb.wrpt = wptr;
1293 dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
1294 }
1295 }
1296
1297 return DMUB_STATUS_OK;
1298 }
1299
dmub_srv_sync_inboxes(struct dmub_srv * dmub)1300 enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub)
1301 {
1302 enum dmub_status status;
1303
1304 status = dmub_srv_sync_reg_inbox0(dmub);
1305 if (status != DMUB_STATUS_OK)
1306 return status;
1307
1308 status = dmub_srv_sync_inbox1(dmub);
1309 if (status != DMUB_STATUS_OK)
1310 return status;
1311
1312 return DMUB_STATUS_OK;
1313 }
1314
dmub_srv_wait_for_inbox_free(struct dmub_srv * dmub,uint32_t timeout_us,uint32_t num_free_required)1315 enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
1316 uint32_t timeout_us,
1317 uint32_t num_free_required)
1318 {
1319 enum dmub_status status;
1320 uint32_t i;
1321 const uint32_t polling_interval_us = 1;
1322
1323 if (!dmub->hw_init)
1324 return DMUB_STATUS_INVALID;
1325
1326 for (i = 0; i < timeout_us; i += polling_interval_us) {
1327 status = dmub_srv_update_inbox_status(dmub);
1328
1329 if (status != DMUB_STATUS_OK)
1330 return status;
1331
1332 /* check for space in inbox1 */
1333 if (dmub_rb_num_free(&dmub->inbox1.rb) >= num_free_required)
1334 return DMUB_STATUS_OK;
1335
1336 udelay(polling_interval_us);
1337 }
1338
1339 return DMUB_STATUS_TIMEOUT;
1340 }
1341
dmub_srv_update_inbox_status(struct dmub_srv * dmub)1342 enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
1343 {
1344 uint32_t rptr;
1345
1346 if (!dmub->hw_init)
1347 return DMUB_STATUS_INVALID;
1348
1349 if (dmub->power_state != DMUB_POWER_STATE_D0)
1350 return DMUB_STATUS_POWER_STATE_D3;
1351
1352 /* update inbox1 state */
1353 rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
1354
1355 if (rptr > dmub->inbox1.rb.capacity)
1356 return DMUB_STATUS_HW_FAILURE;
1357
1358 if (dmub->inbox1.rb.rptr > rptr) {
1359 /* rb wrapped */
1360 dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
1361 } else {
1362 dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
1363 }
1364 dmub->inbox1.rb.rptr = rptr;
1365
1366 /* update reg_inbox0 */
1367 dmub_srv_update_reg_inbox0_status(dmub);
1368
1369 return DMUB_STATUS_OK;
1370 }
1371