1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #ifndef __AMDGPU_SMU_H__
23 #define __AMDGPU_SMU_H__
24
25 #include <linux/acpi_amd_wbrf.h>
26 #include <linux/units.h>
27
28 #include "amdgpu.h"
29 #include "kgd_pp_interface.h"
30 #include "dm_pp_interface.h"
31 #include "dm_pp_smu.h"
32 #include "smu_types.h"
33 #include "linux/firmware.h"
34
35 #define SMU_THERMAL_MINIMUM_ALERT_TEMP 0
36 #define SMU_THERMAL_MAXIMUM_ALERT_TEMP 255
37 #define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
38 #define SMU_FW_NAME_LEN 0x24
39
40 #define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
41 #define SMU_CUSTOM_FAN_SPEED_RPM (1 << 1)
42 #define SMU_CUSTOM_FAN_SPEED_PWM (1 << 2)
43
44 // Power Throttlers
45 #define SMU_THROTTLER_PPT0_BIT 0
46 #define SMU_THROTTLER_PPT1_BIT 1
47 #define SMU_THROTTLER_PPT2_BIT 2
48 #define SMU_THROTTLER_PPT3_BIT 3
49 #define SMU_THROTTLER_SPL_BIT 4
50 #define SMU_THROTTLER_FPPT_BIT 5
51 #define SMU_THROTTLER_SPPT_BIT 6
52 #define SMU_THROTTLER_SPPT_APU_BIT 7
53
54 // Current Throttlers
55 #define SMU_THROTTLER_TDC_GFX_BIT 16
56 #define SMU_THROTTLER_TDC_SOC_BIT 17
57 #define SMU_THROTTLER_TDC_MEM_BIT 18
58 #define SMU_THROTTLER_TDC_VDD_BIT 19
59 #define SMU_THROTTLER_TDC_CVIP_BIT 20
60 #define SMU_THROTTLER_EDC_CPU_BIT 21
61 #define SMU_THROTTLER_EDC_GFX_BIT 22
62 #define SMU_THROTTLER_APCC_BIT 23
63
64 // Temperature
65 #define SMU_THROTTLER_TEMP_GPU_BIT 32
66 #define SMU_THROTTLER_TEMP_CORE_BIT 33
67 #define SMU_THROTTLER_TEMP_MEM_BIT 34
68 #define SMU_THROTTLER_TEMP_EDGE_BIT 35
69 #define SMU_THROTTLER_TEMP_HOTSPOT_BIT 36
70 #define SMU_THROTTLER_TEMP_SOC_BIT 37
71 #define SMU_THROTTLER_TEMP_VR_GFX_BIT 38
72 #define SMU_THROTTLER_TEMP_VR_SOC_BIT 39
73 #define SMU_THROTTLER_TEMP_VR_MEM0_BIT 40
74 #define SMU_THROTTLER_TEMP_VR_MEM1_BIT 41
75 #define SMU_THROTTLER_TEMP_LIQUID0_BIT 42
76 #define SMU_THROTTLER_TEMP_LIQUID1_BIT 43
77 #define SMU_THROTTLER_VRHOT0_BIT 44
78 #define SMU_THROTTLER_VRHOT1_BIT 45
79 #define SMU_THROTTLER_PROCHOT_CPU_BIT 46
80 #define SMU_THROTTLER_PROCHOT_GFX_BIT 47
81
82 // Other
83 #define SMU_THROTTLER_PPM_BIT 56
84 #define SMU_THROTTLER_FIT_BIT 57
85
86 struct smu_hw_power_state {
87 unsigned int magic;
88 };
89
90 struct smu_power_state;
91
92 enum smu_state_ui_label {
93 SMU_STATE_UI_LABEL_NONE,
94 SMU_STATE_UI_LABEL_BATTERY,
95 SMU_STATE_UI_TABEL_MIDDLE_LOW,
96 SMU_STATE_UI_LABEL_BALLANCED,
97 SMU_STATE_UI_LABEL_MIDDLE_HIGHT,
98 SMU_STATE_UI_LABEL_PERFORMANCE,
99 SMU_STATE_UI_LABEL_BACO,
100 };
101
102 enum smu_state_classification_flag {
103 SMU_STATE_CLASSIFICATION_FLAG_BOOT = 0x0001,
104 SMU_STATE_CLASSIFICATION_FLAG_THERMAL = 0x0002,
105 SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE = 0x0004,
106 SMU_STATE_CLASSIFICATION_FLAG_RESET = 0x0008,
107 SMU_STATE_CLASSIFICATION_FLAG_FORCED = 0x0010,
108 SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE = 0x0020,
109 SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE = 0x0040,
110 SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE = 0x0080,
111 SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE = 0x0100,
112 SMU_STATE_CLASSIFICATION_FLAG_UVD = 0x0200,
113 SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW = 0x0400,
114 SMU_STATE_CLASSIFICATION_FLAG_ACPI = 0x0800,
115 SMU_STATE_CLASSIFICATION_FLAG_HD2 = 0x1000,
116 SMU_STATE_CLASSIFICATION_FLAG_UVD_HD = 0x2000,
117 SMU_STATE_CLASSIFICATION_FLAG_UVD_SD = 0x4000,
118 SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE = 0x8000,
119 SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE = 0x10000,
120 SMU_STATE_CLASSIFICATION_FLAG_BACO = 0x20000,
121 SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2 = 0x40000,
122 SMU_STATE_CLASSIFICATION_FLAG_ULV = 0x80000,
123 SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC = 0x100000,
124 };
125
126 struct smu_state_classification_block {
127 enum smu_state_ui_label ui_label;
128 enum smu_state_classification_flag flags;
129 int bios_index;
130 bool temporary_state;
131 bool to_be_deleted;
132 };
133
134 struct smu_state_pcie_block {
135 unsigned int lanes;
136 };
137
138 enum smu_refreshrate_source {
139 SMU_REFRESHRATE_SOURCE_EDID,
140 SMU_REFRESHRATE_SOURCE_EXPLICIT
141 };
142
143 struct smu_state_display_block {
144 bool disable_frame_modulation;
145 bool limit_refreshrate;
146 enum smu_refreshrate_source refreshrate_source;
147 int explicit_refreshrate;
148 int edid_refreshrate_index;
149 bool enable_vari_bright;
150 };
151
152 struct smu_state_memory_block {
153 bool dll_off;
154 uint8_t m3arb;
155 uint8_t unused[3];
156 };
157
158 struct smu_state_software_algorithm_block {
159 bool disable_load_balancing;
160 bool enable_sleep_for_timestamps;
161 };
162
163 struct smu_temperature_range {
164 int min;
165 int max;
166 int edge_emergency_max;
167 int hotspot_min;
168 int hotspot_crit_max;
169 int hotspot_emergency_max;
170 int mem_min;
171 int mem_crit_max;
172 int mem_emergency_max;
173 int software_shutdown_temp;
174 int software_shutdown_temp_offset;
175 };
176
177 struct smu_state_validation_block {
178 bool single_display_only;
179 bool disallow_on_dc;
180 uint8_t supported_power_levels;
181 };
182
183 struct smu_uvd_clocks {
184 uint32_t vclk;
185 uint32_t dclk;
186 };
187
188 /**
189 * Structure to hold a SMU Power State.
190 */
191 struct smu_power_state {
192 uint32_t id;
193 struct list_head ordered_list;
194 struct list_head all_states_list;
195
196 struct smu_state_classification_block classification;
197 struct smu_state_validation_block validation;
198 struct smu_state_pcie_block pcie;
199 struct smu_state_display_block display;
200 struct smu_state_memory_block memory;
201 struct smu_state_software_algorithm_block software;
202 struct smu_uvd_clocks uvd_clocks;
203 struct smu_hw_power_state hardware;
204 };
205
206 enum smu_power_src_type {
207 SMU_POWER_SOURCE_AC,
208 SMU_POWER_SOURCE_DC,
209 SMU_POWER_SOURCE_COUNT,
210 };
211
212 enum smu_ppt_limit_type {
213 SMU_DEFAULT_PPT_LIMIT = 0,
214 SMU_FAST_PPT_LIMIT,
215 };
216
217 enum smu_ppt_limit_level {
218 SMU_PPT_LIMIT_MIN = -1,
219 SMU_PPT_LIMIT_CURRENT,
220 SMU_PPT_LIMIT_DEFAULT,
221 SMU_PPT_LIMIT_MAX,
222 };
223
224 enum smu_memory_pool_size {
225 SMU_MEMORY_POOL_SIZE_ZERO = 0,
226 SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000,
227 SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000,
228 SMU_MEMORY_POOL_SIZE_1_GB = 0x40000000,
229 SMU_MEMORY_POOL_SIZE_2_GB = 0x80000000,
230 };
231
232 struct smu_user_dpm_profile {
233 uint32_t fan_mode;
234 uint32_t power_limit;
235 uint32_t fan_speed_pwm;
236 uint32_t fan_speed_rpm;
237 uint32_t flags;
238 uint32_t user_od;
239
240 /* user clock state information */
241 uint32_t clk_mask[SMU_CLK_COUNT];
242 uint32_t clk_dependency;
243 };
244
245 #define SMU_TABLE_INIT(tables, table_id, s, a, d) \
246 do { \
247 tables[table_id].size = s; \
248 tables[table_id].align = a; \
249 tables[table_id].domain = d; \
250 } while (0)
251
252 struct smu_table_cache {
253 void *buffer;
254 size_t size;
255 /* interval in ms*/
256 uint32_t interval;
257 unsigned long last_cache_time;
258 };
259
260 struct smu_table {
261 uint64_t size;
262 uint32_t align;
263 uint8_t domain;
264 uint64_t mc_address;
265 void *cpu_addr;
266 struct amdgpu_bo *bo;
267 uint32_t version;
268 struct smu_table_cache cache;
269 };
270
271 enum smu_perf_level_designation {
272 PERF_LEVEL_ACTIVITY,
273 PERF_LEVEL_POWER_CONTAINMENT,
274 };
275
276 struct smu_performance_level {
277 uint32_t core_clock;
278 uint32_t memory_clock;
279 uint32_t vddc;
280 uint32_t vddci;
281 uint32_t non_local_mem_freq;
282 uint32_t non_local_mem_width;
283 };
284
285 struct smu_clock_info {
286 uint32_t min_mem_clk;
287 uint32_t max_mem_clk;
288 uint32_t min_eng_clk;
289 uint32_t max_eng_clk;
290 uint32_t min_bus_bandwidth;
291 uint32_t max_bus_bandwidth;
292 };
293
294 struct smu_bios_boot_up_values {
295 uint32_t revision;
296 uint32_t gfxclk;
297 uint32_t uclk;
298 uint32_t socclk;
299 uint32_t dcefclk;
300 uint32_t eclk;
301 uint32_t vclk;
302 uint32_t dclk;
303 uint16_t vddc;
304 uint16_t vddci;
305 uint16_t mvddc;
306 uint16_t vdd_gfx;
307 uint8_t cooling_id;
308 uint32_t pp_table_id;
309 uint32_t format_revision;
310 uint32_t content_revision;
311 uint32_t fclk;
312 uint32_t lclk;
313 uint32_t firmware_caps;
314 };
315
316 enum smu_table_id {
317 SMU_TABLE_PPTABLE = 0,
318 SMU_TABLE_WATERMARKS,
319 SMU_TABLE_CUSTOM_DPM,
320 SMU_TABLE_DPMCLOCKS,
321 SMU_TABLE_AVFS,
322 SMU_TABLE_AVFS_PSM_DEBUG,
323 SMU_TABLE_AVFS_FUSE_OVERRIDE,
324 SMU_TABLE_PMSTATUSLOG,
325 SMU_TABLE_SMU_METRICS,
326 SMU_TABLE_DRIVER_SMU_CONFIG,
327 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
328 SMU_TABLE_OVERDRIVE,
329 SMU_TABLE_I2C_COMMANDS,
330 SMU_TABLE_PACE,
331 SMU_TABLE_ECCINFO,
332 SMU_TABLE_COMBO_PPTABLE,
333 SMU_TABLE_WIFIBAND,
334 SMU_TABLE_GPUBOARD_TEMP_METRICS,
335 SMU_TABLE_BASEBOARD_TEMP_METRICS,
336 SMU_TABLE_PMFW_SYSTEM_METRICS,
337 SMU_TABLE_COUNT,
338 };
339
340 struct smu_table_context {
341 void *power_play_table;
342 uint32_t power_play_table_size;
343 void *hardcode_pptable;
344 unsigned long metrics_time;
345 void *metrics_table;
346 void *clocks_table;
347 void *watermarks_table;
348
349 void *max_sustainable_clocks;
350 struct smu_bios_boot_up_values boot_values;
351 void *driver_pptable;
352 void *combo_pptable;
353 void *ecc_table;
354 void *driver_smu_config_table;
355 struct smu_table tables[SMU_TABLE_COUNT];
356 /*
357 * The driver table is just a staging buffer for
358 * uploading/downloading content from the SMU.
359 *
360 * And the table_id for SMU_MSG_TransferTableSmu2Dram/
361 * SMU_MSG_TransferTableDram2Smu instructs SMU
362 * which content driver is interested.
363 */
364 struct smu_table driver_table;
365 struct smu_table memory_pool;
366 struct smu_table dummy_read_1_table;
367 uint8_t thermal_controller_type;
368
369 void *overdrive_table;
370 void *boot_overdrive_table;
371 void *user_overdrive_table;
372
373 uint32_t gpu_metrics_table_size;
374 void *gpu_metrics_table;
375 };
376
377 struct smu_context;
378 struct smu_dpm_policy;
379
380 struct smu_dpm_policy_desc {
381 const char *name;
382 char *(*get_desc)(struct smu_dpm_policy *dpm_policy, int level);
383 };
384
385 struct smu_dpm_policy {
386 struct smu_dpm_policy_desc *desc;
387 enum pp_pm_policy policy_type;
388 unsigned long level_mask;
389 int current_level;
390 int (*set_policy)(struct smu_context *ctxt, int level);
391 };
392
393 struct smu_dpm_policy_ctxt {
394 struct smu_dpm_policy policies[PP_PM_POLICY_NUM];
395 unsigned long policy_mask;
396 };
397
398 struct smu_dpm_context {
399 uint32_t dpm_context_size;
400 void *dpm_context;
401 void *golden_dpm_context;
402 enum amd_dpm_forced_level dpm_level;
403 enum amd_dpm_forced_level saved_dpm_level;
404 enum amd_dpm_forced_level requested_dpm_level;
405 struct smu_power_state *dpm_request_power_state;
406 struct smu_power_state *dpm_current_power_state;
407 struct mclock_latency_table *mclk_latency_table;
408 struct smu_dpm_policy_ctxt *dpm_policies;
409 };
410
411 struct smu_temp_context {
412 const struct smu_temp_funcs *temp_funcs;
413 };
414
415 struct smu_power_gate {
416 bool uvd_gated;
417 bool vce_gated;
418 atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
419 atomic_t jpeg_gated;
420 atomic_t vpe_gated;
421 atomic_t isp_gated;
422 atomic_t umsch_mm_gated;
423 };
424
425 struct smu_power_context {
426 void *power_context;
427 uint32_t power_context_size;
428 struct smu_power_gate power_gate;
429 };
430
431 #define SMU_FEATURE_MAX (64)
432 struct smu_feature {
433 uint32_t feature_num;
434 DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
435 DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
436 };
437
438 struct smu_clocks {
439 uint32_t engine_clock;
440 uint32_t memory_clock;
441 uint32_t bus_bandwidth;
442 uint32_t engine_clock_in_sr;
443 uint32_t dcef_clock;
444 uint32_t dcef_clock_in_sr;
445 };
446
447 #define MAX_REGULAR_DPM_NUM 16
448 struct mclk_latency_entries {
449 uint32_t frequency;
450 uint32_t latency;
451 };
452 struct mclock_latency_table {
453 uint32_t count;
454 struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
455 };
456
457 enum smu_reset_mode {
458 SMU_RESET_MODE_0,
459 SMU_RESET_MODE_1,
460 SMU_RESET_MODE_2,
461 SMU_RESET_MODE_3,
462 SMU_RESET_MODE_4,
463 };
464
465 enum smu_baco_state {
466 SMU_BACO_STATE_ENTER = 0,
467 SMU_BACO_STATE_EXIT,
468 SMU_BACO_STATE_NONE,
469 };
470
471 struct smu_baco_context {
472 uint32_t state;
473 bool platform_support;
474 bool maco_support;
475 };
476
477 struct smu_freq_info {
478 uint32_t min;
479 uint32_t max;
480 uint32_t freq_level;
481 };
482
483 struct pstates_clk_freq {
484 uint32_t min;
485 uint32_t standard;
486 uint32_t peak;
487 struct smu_freq_info custom;
488 struct smu_freq_info curr;
489 };
490
491 struct smu_umd_pstate_table {
492 struct pstates_clk_freq gfxclk_pstate;
493 struct pstates_clk_freq socclk_pstate;
494 struct pstates_clk_freq uclk_pstate;
495 struct pstates_clk_freq vclk_pstate;
496 struct pstates_clk_freq dclk_pstate;
497 struct pstates_clk_freq fclk_pstate;
498 };
499
500 struct cmn2asic_msg_mapping {
501 int valid_mapping;
502 int map_to;
503 uint32_t flags;
504 };
505
506 struct cmn2asic_mapping {
507 int valid_mapping;
508 int map_to;
509 };
510
511 struct stb_context {
512 uint32_t stb_buf_size;
513 bool enabled;
514 spinlock_t lock;
515 };
516
517 enum smu_fw_status {
518 SMU_FW_INIT = 0,
519 SMU_FW_RUNTIME,
520 SMU_FW_HANG,
521 };
522
523 #define WORKLOAD_POLICY_MAX 7
524
525 /*
526 * Configure wbrf event handling pace as there can be only one
527 * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms.
528 */
529 #define SMU_WBRF_EVENT_HANDLING_PACE 10
530
531 enum smu_feature_cap_id {
532 SMU_FEATURE_CAP_ID__LINK_RESET = 0,
533 SMU_FEATURE_CAP_ID__SDMA_RESET,
534 SMU_FEATURE_CAP_ID__VCN_RESET,
535 SMU_FEATURE_CAP_ID__COUNT,
536 };
537
538 struct smu_feature_cap {
539 DECLARE_BITMAP(cap_map, SMU_FEATURE_CAP_ID__COUNT);
540 };
541
542 struct smu_context {
543 struct amdgpu_device *adev;
544 struct amdgpu_irq_src irq_source;
545
546 const struct pptable_funcs *ppt_funcs;
547 const struct cmn2asic_msg_mapping *message_map;
548 const struct cmn2asic_mapping *clock_map;
549 const struct cmn2asic_mapping *feature_map;
550 const struct cmn2asic_mapping *table_map;
551 const struct cmn2asic_mapping *pwr_src_map;
552 const struct cmn2asic_mapping *workload_map;
553 struct mutex message_lock;
554 uint64_t pool_size;
555
556 struct smu_table_context smu_table;
557 struct smu_dpm_context smu_dpm;
558 struct smu_power_context smu_power;
559 struct smu_temp_context smu_temp;
560 struct smu_feature smu_feature;
561 struct amd_pp_display_configuration *display_config;
562 struct smu_baco_context smu_baco;
563 struct smu_temperature_range thermal_range;
564 struct smu_feature_cap fea_cap;
565 void *od_settings;
566
567 struct smu_umd_pstate_table pstate_table;
568 uint32_t pstate_sclk;
569 uint32_t pstate_mclk;
570
571 bool od_enabled;
572 uint32_t current_power_limit;
573 uint32_t default_power_limit;
574 uint32_t max_power_limit;
575 uint32_t min_power_limit;
576
577 /* soft pptable */
578 uint32_t ppt_offset_bytes;
579 uint32_t ppt_size_bytes;
580 uint8_t *ppt_start_addr;
581
582 bool support_power_containment;
583 bool disable_watermark;
584
585 #define WATERMARKS_EXIST (1 << 0)
586 #define WATERMARKS_LOADED (1 << 1)
587 uint32_t watermarks_bitmap;
588 uint32_t hard_min_uclk_req_from_dal;
589 bool disable_uclk_switch;
590
591 /* asic agnostic workload mask */
592 uint32_t workload_mask;
593 bool pause_workload;
594 /* default/user workload preference */
595 uint32_t power_profile_mode;
596 uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
597 /* backend specific custom workload settings */
598 long *custom_profile_params;
599 bool pm_enabled;
600 bool is_apu;
601
602 uint32_t smc_driver_if_version;
603 uint32_t smc_fw_if_version;
604 uint32_t smc_fw_version;
605 uint32_t smc_fw_caps;
606 uint8_t smc_fw_state;
607
608 bool uploading_custom_pp_table;
609 bool dc_controlled_by_gpio;
610
611 struct work_struct throttling_logging_work;
612 atomic64_t throttle_int_counter;
613 struct work_struct interrupt_work;
614
615 unsigned fan_max_rpm;
616 unsigned manual_fan_speed_pwm;
617
618 uint32_t gfx_default_hard_min_freq;
619 uint32_t gfx_default_soft_max_freq;
620 uint32_t gfx_actual_hard_min_freq;
621 uint32_t gfx_actual_soft_max_freq;
622
623 /* APU only */
624 uint32_t cpu_default_soft_min_freq;
625 uint32_t cpu_default_soft_max_freq;
626 uint32_t cpu_actual_soft_min_freq;
627 uint32_t cpu_actual_soft_max_freq;
628 uint32_t cpu_core_id_select;
629 uint16_t cpu_core_num;
630
631 struct smu_user_dpm_profile user_dpm_profile;
632
633 struct stb_context stb_context;
634
635 struct firmware pptable_firmware;
636
637 u32 param_reg;
638 u32 msg_reg;
639 u32 resp_reg;
640
641 u32 debug_param_reg;
642 u32 debug_msg_reg;
643 u32 debug_resp_reg;
644
645 struct delayed_work swctf_delayed_work;
646
647 /* data structures for wbrf feature support */
648 bool wbrf_supported;
649 struct notifier_block wbrf_notifier;
650 struct delayed_work wbrf_delayed_work;
651 };
652
653 struct i2c_adapter;
654
655 /**
656 * struct smu_temp_funcs - Callbacks used to get temperature data.
657 */
658 struct smu_temp_funcs {
659 /**
660 * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's
661 * power delivery and voltage margins. Required for adaptive
662 * @type Temperature metrics type(baseboard/gpuboard)
663 * Return: Size of &table
664 */
665 ssize_t (*get_temp_metrics)(struct smu_context *smu,
666 enum smu_temp_metric_type type, void *table);
667
668 /**
669 * @temp_metrics_is_support: Get if specific temperature metrics is supported
670 * @type Temperature metrics type(baseboard/gpuboard)
671 * Return: true if supported else false
672 */
673 bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type);
674
675 };
676
677 /**
678 * struct pptable_funcs - Callbacks used to interact with the SMU.
679 */
680 struct pptable_funcs {
681 /**
682 * @run_btc: Calibrate voltage/frequency curve to fit the system's
683 * power delivery and voltage margins. Required for adaptive
684 * voltage frequency scaling (AVFS).
685 */
686 int (*run_btc)(struct smu_context *smu);
687
688 /**
689 * @get_allowed_feature_mask: Get allowed feature mask.
690 * &feature_mask: Array to store feature mask.
691 * &num: Elements in &feature_mask.
692 */
693 int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
694
695 /**
696 * @get_current_power_state: Get the current power state.
697 *
698 * Return: Current power state on success, negative errno on failure.
699 */
700 enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
701
702 /**
703 * @set_default_dpm_table: Retrieve the default overdrive settings from
704 * the SMU.
705 */
706 int (*set_default_dpm_table)(struct smu_context *smu);
707
708 int (*set_power_state)(struct smu_context *smu);
709
710 /**
711 * @populate_umd_state_clk: Populate the UMD power state table with
712 * defaults.
713 */
714 int (*populate_umd_state_clk)(struct smu_context *smu);
715
716 /**
717 * @print_clk_levels: Print DPM clock levels for a clock domain
718 * to buffer. Star current level.
719 *
720 * Used for sysfs interfaces.
721 * Return: Number of characters written to the buffer
722 */
723 int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
724
725 /**
726 * @emit_clk_levels: Print DPM clock levels for a clock domain
727 * to buffer using sysfs_emit_at. Star current level.
728 *
729 * Used for sysfs interfaces.
730 * &buf: sysfs buffer
731 * &offset: offset within buffer to start printing, which is updated by the
732 * function.
733 *
734 * Return: 0 on Success or Negative to indicate an error occurred.
735 */
736 int (*emit_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf, int *offset);
737
738 /**
739 * @force_clk_levels: Set a range of allowed DPM levels for a clock
740 * domain.
741 * &clk_type: Clock domain.
742 * &mask: Range of allowed DPM levels.
743 */
744 int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask);
745
746 /**
747 * @od_edit_dpm_table: Edit the custom overdrive DPM table.
748 * &type: Type of edit.
749 * &input: Edit parameters.
750 * &size: Size of &input.
751 */
752 int (*od_edit_dpm_table)(struct smu_context *smu,
753 enum PP_OD_DPM_TABLE_COMMAND type,
754 long *input, uint32_t size);
755
756 /**
757 * @restore_user_od_settings: Restore the user customized
758 * OD settings on S3/S4/Runpm resume.
759 */
760 int (*restore_user_od_settings)(struct smu_context *smu);
761
762 /**
763 * @get_clock_by_type_with_latency: Get the speed and latency of a clock
764 * domain.
765 */
766 int (*get_clock_by_type_with_latency)(struct smu_context *smu,
767 enum smu_clk_type clk_type,
768 struct
769 pp_clock_levels_with_latency
770 *clocks);
771 /**
772 * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock
773 * domain.
774 */
775 int (*get_clock_by_type_with_voltage)(struct smu_context *smu,
776 enum amd_pp_clock_type type,
777 struct
778 pp_clock_levels_with_voltage
779 *clocks);
780
781 /**
782 * @get_power_profile_mode: Print all power profile modes to
783 * buffer. Star current mode.
784 */
785 int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
786
787 /**
788 * @set_power_profile_mode: Set a power profile mode. Also used to
789 * create/set custom power profile modes.
790 * &input: Power profile mode parameters.
791 * &workload_mask: mask of workloads to enable
792 * &custom_params: custom profile parameters
793 * &custom_params_max_idx: max valid idx into custom_params
794 */
795 int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask,
796 long *custom_params, u32 custom_params_max_idx);
797
798 /**
799 * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
800 * management.
801 */
802 int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable, int inst);
803
804 /**
805 * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power
806 * management.
807 */
808 int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
809
810 /**
811 * @set_gfx_power_up_by_imu: Enable GFX engine with IMU
812 */
813 int (*set_gfx_power_up_by_imu)(struct smu_context *smu);
814
815 /**
816 * @read_sensor: Read data from a sensor.
817 * &sensor: Sensor to read data from.
818 * &data: Sensor reading.
819 * &size: Size of &data.
820 */
821 int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
822 void *data, uint32_t *size);
823
824 /**
825 * @get_apu_thermal_limit: get apu core limit from smu
826 * &limit: current limit temperature in millidegrees Celsius
827 */
828 int (*get_apu_thermal_limit)(struct smu_context *smu, uint32_t *limit);
829
830 /**
831 * @set_apu_thermal_limit: update all controllers with new limit
832 * &limit: limit temperature to be setted, in millidegrees Celsius
833 */
834 int (*set_apu_thermal_limit)(struct smu_context *smu, uint32_t limit);
835
836 /**
837 * @pre_display_config_changed: Prepare GPU for a display configuration
838 * change.
839 *
840 * Disable display tracking and pin memory clock speed to maximum. Used
841 * in display component synchronization.
842 */
843 int (*pre_display_config_changed)(struct smu_context *smu);
844
845 /**
846 * @display_config_changed: Notify the SMU of the current display
847 * configuration.
848 *
849 * Allows SMU to properly track blanking periods for memory clock
850 * adjustment. Used in display component synchronization.
851 */
852 int (*display_config_changed)(struct smu_context *smu);
853
854 int (*apply_clocks_adjust_rules)(struct smu_context *smu);
855
856 /**
857 * @notify_smc_display_config: Applies display requirements to the
858 * current power state.
859 *
860 * Optimize deep sleep DCEFclk and mclk for the current display
861 * configuration. Used in display component synchronization.
862 */
863 int (*notify_smc_display_config)(struct smu_context *smu);
864
865 /**
866 * @is_dpm_running: Check if DPM is running.
867 *
868 * Return: True if DPM is running, false otherwise.
869 */
870 bool (*is_dpm_running)(struct smu_context *smu);
871
872 /**
873 * @get_fan_speed_pwm: Get the current fan speed in PWM.
874 */
875 int (*get_fan_speed_pwm)(struct smu_context *smu, uint32_t *speed);
876
877 /**
878 * @get_fan_speed_rpm: Get the current fan speed in rpm.
879 */
880 int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
881
882 /**
883 * @set_watermarks_table: Configure and upload the watermarks tables to
884 * the SMU.
885 */
886 int (*set_watermarks_table)(struct smu_context *smu,
887 struct pp_smu_wm_range_sets *clock_ranges);
888
889 /**
890 * @get_thermal_temperature_range: Get safe thermal limits in Celcius.
891 */
892 int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
893
894 /**
895 * @get_uclk_dpm_states: Get memory clock DPM levels in kHz.
896 * &clocks_in_khz: Array of DPM levels.
897 * &num_states: Elements in &clocks_in_khz.
898 */
899 int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
900
901 /**
902 * @set_default_od_settings: Set the overdrive tables to defaults.
903 */
904 int (*set_default_od_settings)(struct smu_context *smu);
905
906 /**
907 * @set_performance_level: Set a performance level.
908 */
909 int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
910
911 /**
912 * @display_disable_memory_clock_switch: Enable/disable dynamic memory
913 * clock switching.
914 *
915 * Disabling this feature forces memory clock speed to maximum.
916 * Enabling sets the minimum memory clock capable of driving the
917 * current display configuration.
918 */
919 int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
920
921 /**
922 * @get_power_limit: Get the device's power limits.
923 */
924 int (*get_power_limit)(struct smu_context *smu,
925 uint32_t *current_power_limit,
926 uint32_t *default_power_limit,
927 uint32_t *max_power_limit,
928 uint32_t *min_power_limit);
929
930 /**
931 * @get_ppt_limit: Get the device's ppt limits.
932 */
933 int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit,
934 enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level);
935
936 /**
937 * @set_df_cstate: Set data fabric cstate.
938 */
939 int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
940
941 /**
942 * @update_pcie_parameters: Update and upload the system's PCIe
943 * capabilites to the SMU.
944 * &pcie_gen_cap: Maximum allowed PCIe generation.
945 * &pcie_width_cap: Maximum allowed PCIe width.
946 */
947 int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap);
948
949 /**
950 * @i2c_init: Initialize i2c.
951 *
952 * The i2c bus is used internally by the SMU voltage regulators and
953 * other devices. The i2c's EEPROM also stores bad page tables on boards
954 * with ECC.
955 */
956 int (*i2c_init)(struct smu_context *smu);
957
958 /**
959 * @i2c_fini: Tear down i2c.
960 */
961 void (*i2c_fini)(struct smu_context *smu);
962
963 /**
964 * @get_unique_id: Get the GPU's unique id. Used for asset tracking.
965 */
966 void (*get_unique_id)(struct smu_context *smu);
967
968 /**
969 * @get_dpm_clock_table: Get a copy of the DPM clock table.
970 *
971 * Used by display component in bandwidth and watermark calculations.
972 */
973 int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
974
975 /**
976 * @init_microcode: Request the SMU's firmware from the kernel.
977 */
978 int (*init_microcode)(struct smu_context *smu);
979
980 /**
981 * @load_microcode: Load firmware onto the SMU.
982 */
983 int (*load_microcode)(struct smu_context *smu);
984
985 /**
986 * @fini_microcode: Release the SMU's firmware.
987 */
988 void (*fini_microcode)(struct smu_context *smu);
989
990 /**
991 * @init_smc_tables: Initialize the SMU tables.
992 */
993 int (*init_smc_tables)(struct smu_context *smu);
994
995 /**
996 * @fini_smc_tables: Release the SMU tables.
997 */
998 int (*fini_smc_tables)(struct smu_context *smu);
999
1000 /**
1001 * @init_power: Initialize the power gate table context.
1002 */
1003 int (*init_power)(struct smu_context *smu);
1004
1005 /**
1006 * @fini_power: Release the power gate table context.
1007 */
1008 int (*fini_power)(struct smu_context *smu);
1009
1010 /**
1011 * @check_fw_status: Check the SMU's firmware status.
1012 *
1013 * Return: Zero if check passes, negative errno on failure.
1014 */
1015 int (*check_fw_status)(struct smu_context *smu);
1016
1017 /**
1018 * @set_mp1_state: put SMU into a correct state for comming
1019 * resume from runpm or gpu reset.
1020 */
1021 int (*set_mp1_state)(struct smu_context *smu,
1022 enum pp_mp1_state mp1_state);
1023
1024 /**
1025 * @setup_pptable: Initialize the power play table and populate it with
1026 * default values.
1027 */
1028 int (*setup_pptable)(struct smu_context *smu);
1029
1030 /**
1031 * @get_vbios_bootup_values: Get default boot values from the VBIOS.
1032 */
1033 int (*get_vbios_bootup_values)(struct smu_context *smu);
1034
1035 /**
1036 * @check_fw_version: Print driver and SMU interface versions to the
1037 * system log.
1038 *
1039 * Interface mismatch is not a critical failure.
1040 */
1041 int (*check_fw_version)(struct smu_context *smu);
1042
1043 /**
1044 * @powergate_sdma: Power up/down system direct memory access.
1045 */
1046 int (*powergate_sdma)(struct smu_context *smu, bool gate);
1047
1048 /**
1049 * @set_gfx_cgpg: Enable/disable graphics engine course grain power
1050 * gating.
1051 */
1052 int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
1053
1054 /**
1055 * @write_pptable: Write the power play table to the SMU.
1056 */
1057 int (*write_pptable)(struct smu_context *smu);
1058
1059 /**
1060 * @set_driver_table_location: Send the location of the driver table to
1061 * the SMU.
1062 */
1063 int (*set_driver_table_location)(struct smu_context *smu);
1064
1065 /**
1066 * @set_tool_table_location: Send the location of the tool table to the
1067 * SMU.
1068 */
1069 int (*set_tool_table_location)(struct smu_context *smu);
1070
1071 /**
1072 * @notify_memory_pool_location: Send the location of the memory pool to
1073 * the SMU.
1074 */
1075 int (*notify_memory_pool_location)(struct smu_context *smu);
1076
1077 /**
1078 * @system_features_control: Enable/disable all SMU features.
1079 */
1080 int (*system_features_control)(struct smu_context *smu, bool en);
1081
1082 /**
1083 * @send_smc_msg_with_param: Send a message with a parameter to the SMU.
1084 * &msg: Type of message.
1085 * ¶m: Message parameter.
1086 * &read_arg: SMU response (optional).
1087 */
1088 int (*send_smc_msg_with_param)(struct smu_context *smu,
1089 enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
1090
1091 /**
1092 * @send_smc_msg: Send a message to the SMU.
1093 * &msg: Type of message.
1094 * &read_arg: SMU response (optional).
1095 */
1096 int (*send_smc_msg)(struct smu_context *smu,
1097 enum smu_message_type msg,
1098 uint32_t *read_arg);
1099
1100 /**
1101 * @init_display_count: Notify the SMU of the number of display
1102 * components in current display configuration.
1103 */
1104 int (*init_display_count)(struct smu_context *smu, uint32_t count);
1105
1106 /**
1107 * @set_allowed_mask: Notify the SMU of the features currently allowed
1108 * by the driver.
1109 */
1110 int (*set_allowed_mask)(struct smu_context *smu);
1111
1112 /**
1113 * @get_enabled_mask: Get a mask of features that are currently enabled
1114 * on the SMU.
1115 * &feature_mask: Enabled feature mask.
1116 */
1117 int (*get_enabled_mask)(struct smu_context *smu, uint64_t *feature_mask);
1118
1119 /**
1120 * @feature_is_enabled: Test if a feature is enabled.
1121 *
1122 * Return: One if enabled, zero if disabled.
1123 */
1124 int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask);
1125
1126 /**
1127 * @disable_all_features_with_exception: Disable all features with
1128 * exception to those in &mask.
1129 */
1130 int (*disable_all_features_with_exception)(struct smu_context *smu,
1131 enum smu_feature_mask mask);
1132
1133 /**
1134 * @notify_display_change: General interface call to let SMU know about DC change
1135 */
1136 int (*notify_display_change)(struct smu_context *smu);
1137
1138 /**
1139 * @set_power_limit: Set power limit in watts.
1140 */
1141 int (*set_power_limit)(struct smu_context *smu,
1142 enum smu_ppt_limit_type limit_type,
1143 uint32_t limit);
1144
1145 /**
1146 * @init_max_sustainable_clocks: Populate max sustainable clock speed
1147 * table with values from the SMU.
1148 */
1149 int (*init_max_sustainable_clocks)(struct smu_context *smu);
1150
1151 /**
1152 * @enable_thermal_alert: Enable thermal alert interrupts.
1153 */
1154 int (*enable_thermal_alert)(struct smu_context *smu);
1155
1156 /**
1157 * @disable_thermal_alert: Disable thermal alert interrupts.
1158 */
1159 int (*disable_thermal_alert)(struct smu_context *smu);
1160
1161 /**
1162 * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep
1163 * clock speed in MHz.
1164 */
1165 int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk);
1166
1167 /**
1168 * @display_clock_voltage_request: Set a hard minimum frequency
1169 * for a clock domain.
1170 */
1171 int (*display_clock_voltage_request)(struct smu_context *smu, struct
1172 pp_display_clock_request
1173 *clock_req);
1174
1175 /**
1176 * @get_fan_control_mode: Get the current fan control mode.
1177 */
1178 uint32_t (*get_fan_control_mode)(struct smu_context *smu);
1179
1180 /**
1181 * @set_fan_control_mode: Set the fan control mode.
1182 */
1183 int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
1184
1185 /**
1186 * @set_fan_speed_pwm: Set a static fan speed in PWM.
1187 */
1188 int (*set_fan_speed_pwm)(struct smu_context *smu, uint32_t speed);
1189
1190 /**
1191 * @set_fan_speed_rpm: Set a static fan speed in rpm.
1192 */
1193 int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
1194
1195 /**
1196 * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate.
1197 * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise.
1198 */
1199 int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
1200
1201 /**
1202 * @gfx_off_control: Enable/disable graphics engine poweroff.
1203 */
1204 int (*gfx_off_control)(struct smu_context *smu, bool enable);
1205
1206
1207 /**
1208 * @get_gfx_off_status: Get graphics engine poweroff status.
1209 *
1210 * Return:
1211 * 0 - GFXOFF(default).
1212 * 1 - Transition out of GFX State.
1213 * 2 - Not in GFXOFF.
1214 * 3 - Transition into GFXOFF.
1215 */
1216 uint32_t (*get_gfx_off_status)(struct smu_context *smu);
1217
1218 /**
1219 * @gfx_off_entrycount: total GFXOFF entry count at the time of
1220 * query since system power-up
1221 */
1222 u32 (*get_gfx_off_entrycount)(struct smu_context *smu, uint64_t *entrycount);
1223
1224 /**
1225 * @set_gfx_off_residency: set 1 to start logging, 0 to stop logging
1226 */
1227 u32 (*set_gfx_off_residency)(struct smu_context *smu, bool start);
1228
1229 /**
1230 * @get_gfx_off_residency: Average GFXOFF residency % during the logging interval
1231 */
1232 u32 (*get_gfx_off_residency)(struct smu_context *smu, uint32_t *residency);
1233
1234 /**
1235 * @register_irq_handler: Register interupt request handlers.
1236 */
1237 int (*register_irq_handler)(struct smu_context *smu);
1238
1239 /**
1240 * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep.
1241 */
1242 int (*set_azalia_d3_pme)(struct smu_context *smu);
1243
1244 /**
1245 * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable
1246 * clock speeds table.
1247 *
1248 * Provides a way for the display component (DC) to get the max
1249 * sustainable clocks from the SMU.
1250 */
1251 int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
1252
1253 /**
1254 * @get_bamaco_support: Check if GPU supports BACO/MACO
1255 * BACO: Bus Active, Chip Off
1256 * MACO: Memory Active, Chip Off
1257 */
1258 int (*get_bamaco_support)(struct smu_context *smu);
1259
1260 /**
1261 * @baco_get_state: Get the current BACO state.
1262 *
1263 * Return: Current BACO state.
1264 */
1265 enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
1266
1267 /**
1268 * @baco_set_state: Enter/exit BACO.
1269 */
1270 int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
1271
1272 /**
1273 * @baco_enter: Enter BACO.
1274 */
1275 int (*baco_enter)(struct smu_context *smu);
1276
1277 /**
1278 * @baco_exit: Exit Baco.
1279 */
1280 int (*baco_exit)(struct smu_context *smu);
1281
1282 /**
1283 * @mode1_reset_is_support: Check if GPU supports mode1 reset.
1284 */
1285 bool (*mode1_reset_is_support)(struct smu_context *smu);
1286
1287 /**
1288 * @mode1_reset: Perform mode1 reset.
1289 *
1290 * Complete GPU reset.
1291 */
1292 int (*mode1_reset)(struct smu_context *smu);
1293
1294 /**
1295 * @mode2_reset: Perform mode2 reset.
1296 *
1297 * Mode2 reset generally does not reset as many IPs as mode1 reset. The
1298 * IPs reset varies by asic.
1299 */
1300 int (*mode2_reset)(struct smu_context *smu);
1301 /* for gfx feature enablement after mode2 reset */
1302 int (*enable_gfx_features)(struct smu_context *smu);
1303
1304 /**
1305 * @link_reset: Perform link reset.
1306 *
1307 * The gfx device driver reset
1308 */
1309 int (*link_reset)(struct smu_context *smu);
1310
1311 /**
1312 * @get_dpm_ultimate_freq: Get the hard frequency range of a clock
1313 * domain in MHz.
1314 */
1315 int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
1316
1317 /**
1318 * @set_soft_freq_limited_range: Set the soft frequency range of a clock
1319 * domain in MHz.
1320 */
1321 int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max,
1322 bool automatic);
1323
1324 /**
1325 * @set_power_source: Notify the SMU of the current power source.
1326 */
1327 int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
1328
1329 /**
1330 * @log_thermal_throttling_event: Print a thermal throttling warning to
1331 * the system's log.
1332 */
1333 void (*log_thermal_throttling_event)(struct smu_context *smu);
1334
1335 /**
1336 * @get_pp_feature_mask: Print a human readable table of enabled
1337 * features to buffer.
1338 */
1339 size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
1340
1341 /**
1342 * @set_pp_feature_mask: Request the SMU enable/disable features to
1343 * match those enabled in &new_mask.
1344 */
1345 int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
1346
1347 /**
1348 * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU.
1349 *
1350 * Return: Size of &table
1351 */
1352 ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
1353
1354 /**
1355 * @get_pm_metrics: Get one snapshot of power management metrics from
1356 * PMFW.
1357 *
1358 * Return: Size of the metrics sample
1359 */
1360 ssize_t (*get_pm_metrics)(struct smu_context *smu, void *pm_metrics,
1361 size_t size);
1362
1363 /**
1364 * @enable_mgpu_fan_boost: Enable multi-GPU fan boost.
1365 */
1366 int (*enable_mgpu_fan_boost)(struct smu_context *smu);
1367
1368 /**
1369 * @gfx_ulv_control: Enable/disable ultra low voltage.
1370 */
1371 int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
1372
1373 /**
1374 * @deep_sleep_control: Enable/disable deep sleep.
1375 */
1376 int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
1377
1378 /**
1379 * @get_fan_parameters: Get fan parameters.
1380 *
1381 * Get maximum fan speed from the power play table.
1382 */
1383 int (*get_fan_parameters)(struct smu_context *smu);
1384
1385 /**
1386 * @post_init: Helper function for asic specific workarounds.
1387 */
1388 int (*post_init)(struct smu_context *smu);
1389
1390 /**
1391 * @interrupt_work: Work task scheduled from SMU interrupt handler.
1392 */
1393 void (*interrupt_work)(struct smu_context *smu);
1394
1395 /**
1396 * @gpo_control: Enable/disable graphics power optimization if supported.
1397 */
1398 int (*gpo_control)(struct smu_context *smu, bool enablement);
1399
1400 /**
1401 * @gfx_state_change_set: Send the current graphics state to the SMU.
1402 */
1403 int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state);
1404
1405 /**
1406 * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock
1407 * parameters to defaults.
1408 */
1409 int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu);
1410
1411 /**
1412 * @smu_handle_passthrough_sbr: Send message to SMU about special handling for SBR.
1413 */
1414 int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable);
1415
1416 /**
1417 * @wait_for_event: Wait for events from SMU.
1418 */
1419 int (*wait_for_event)(struct smu_context *smu,
1420 enum smu_event_type event, uint64_t event_arg);
1421
1422 /**
1423 * @sned_hbm_bad_pages_num: message SMU to update bad page number
1424 * of SMUBUS table.
1425 */
1426 int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size);
1427
1428 /**
1429 * @send_rma_reason: message rma reason event to SMU.
1430 */
1431 int (*send_rma_reason)(struct smu_context *smu);
1432
1433 /**
1434 * @reset_sdma: message SMU to soft reset sdma instance.
1435 */
1436 int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
1437
1438 /**
1439 * @reset_vcn: message SMU to soft reset vcn instance.
1440 */
1441 int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
1442
1443 /**
1444 * @get_ecc_table: message SMU to get ECC INFO table.
1445 */
1446 ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
1447
1448
1449 /**
1450 * @stb_collect_info: Collects Smart Trace Buffers data.
1451 */
1452 int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size);
1453
1454 /**
1455 * @get_default_config_table_settings: Get the ASIC default DriverSmuConfig table settings.
1456 */
1457 int (*get_default_config_table_settings)(struct smu_context *smu, struct config_table_setting *table);
1458
1459 /**
1460 * @set_config_table: Apply the input DriverSmuConfig table settings.
1461 */
1462 int (*set_config_table)(struct smu_context *smu, struct config_table_setting *table);
1463
1464 /**
1465 * @sned_hbm_bad_channel_flag: message SMU to update bad channel info
1466 * of SMUBUS table.
1467 */
1468 int (*send_hbm_bad_channel_flag)(struct smu_context *smu, uint32_t size);
1469
1470 /**
1471 * @init_pptable_microcode: Prepare the pptable microcode to upload via PSP
1472 */
1473 int (*init_pptable_microcode)(struct smu_context *smu);
1474
1475 /**
1476 * @dpm_set_vpe_enable: Enable/disable VPE engine dynamic power
1477 * management.
1478 */
1479 int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable);
1480
1481 /**
1482 * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power
1483 * management.
1484 */
1485 int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable);
1486
1487 /**
1488 * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power
1489 * management.
1490 */
1491 int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
1492
1493 /**
1494 * @set_mall_enable: Init MALL power gating control.
1495 */
1496 int (*set_mall_enable)(struct smu_context *smu);
1497
1498 /**
1499 * @notify_rlc_state: Notify RLC power state to SMU.
1500 */
1501 int (*notify_rlc_state)(struct smu_context *smu, bool en);
1502
1503 /**
1504 * @is_asic_wbrf_supported: check whether PMFW supports the wbrf feature
1505 */
1506 bool (*is_asic_wbrf_supported)(struct smu_context *smu);
1507
1508 /**
1509 * @enable_uclk_shadow: Enable the uclk shadow feature on wbrf supported
1510 */
1511 int (*enable_uclk_shadow)(struct smu_context *smu, bool enable);
1512
1513 /**
1514 * @set_wbrf_exclusion_ranges: notify SMU the wifi bands occupied
1515 */
1516 int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
1517 struct freq_band_range *exclusion_ranges);
1518 /**
1519 * @get_xcp_metrics: Get a copy of the partition metrics table from SMU.
1520 * Return: Size of table
1521 */
1522 ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
1523 void *table);
1524 };
1525
1526 typedef enum {
1527 METRICS_CURR_GFXCLK,
1528 METRICS_CURR_SOCCLK,
1529 METRICS_CURR_UCLK,
1530 METRICS_CURR_VCLK,
1531 METRICS_CURR_VCLK1,
1532 METRICS_CURR_DCLK,
1533 METRICS_CURR_DCLK1,
1534 METRICS_CURR_FCLK,
1535 METRICS_CURR_DCEFCLK,
1536 METRICS_AVERAGE_CPUCLK,
1537 METRICS_AVERAGE_GFXCLK,
1538 METRICS_AVERAGE_SOCCLK,
1539 METRICS_AVERAGE_FCLK,
1540 METRICS_AVERAGE_UCLK,
1541 METRICS_AVERAGE_VCLK,
1542 METRICS_AVERAGE_DCLK,
1543 METRICS_AVERAGE_VCLK1,
1544 METRICS_AVERAGE_DCLK1,
1545 METRICS_AVERAGE_GFXACTIVITY,
1546 METRICS_AVERAGE_MEMACTIVITY,
1547 METRICS_AVERAGE_VCNACTIVITY,
1548 METRICS_AVERAGE_SOCKETPOWER,
1549 METRICS_TEMPERATURE_EDGE,
1550 METRICS_TEMPERATURE_HOTSPOT,
1551 METRICS_TEMPERATURE_MEM,
1552 METRICS_TEMPERATURE_VRGFX,
1553 METRICS_TEMPERATURE_VRSOC,
1554 METRICS_TEMPERATURE_VRMEM,
1555 METRICS_THROTTLER_STATUS,
1556 METRICS_CURR_FANSPEED,
1557 METRICS_VOLTAGE_VDDSOC,
1558 METRICS_VOLTAGE_VDDGFX,
1559 METRICS_SS_APU_SHARE,
1560 METRICS_SS_DGPU_SHARE,
1561 METRICS_UNIQUE_ID_UPPER32,
1562 METRICS_UNIQUE_ID_LOWER32,
1563 METRICS_PCIE_RATE,
1564 METRICS_PCIE_WIDTH,
1565 METRICS_CURR_FANPWM,
1566 METRICS_CURR_SOCKETPOWER,
1567 METRICS_AVERAGE_VPECLK,
1568 METRICS_AVERAGE_IPUCLK,
1569 METRICS_AVERAGE_MPIPUCLK,
1570 METRICS_THROTTLER_RESIDENCY_PROCHOT,
1571 METRICS_THROTTLER_RESIDENCY_SPL,
1572 METRICS_THROTTLER_RESIDENCY_FPPT,
1573 METRICS_THROTTLER_RESIDENCY_SPPT,
1574 METRICS_THROTTLER_RESIDENCY_THM_CORE,
1575 METRICS_THROTTLER_RESIDENCY_THM_GFX,
1576 METRICS_THROTTLER_RESIDENCY_THM_SOC,
1577 } MetricsMember_t;
1578
1579 enum smu_cmn2asic_mapping_type {
1580 CMN2ASIC_MAPPING_MSG,
1581 CMN2ASIC_MAPPING_CLK,
1582 CMN2ASIC_MAPPING_FEATURE,
1583 CMN2ASIC_MAPPING_TABLE,
1584 CMN2ASIC_MAPPING_PWR,
1585 CMN2ASIC_MAPPING_WORKLOAD,
1586 };
1587
1588 enum smu_baco_seq {
1589 BACO_SEQ_BACO = 0,
1590 BACO_SEQ_MSR,
1591 BACO_SEQ_BAMACO,
1592 BACO_SEQ_ULPS,
1593 BACO_SEQ_COUNT,
1594 };
1595
1596 #define MSG_MAP(msg, index, flags) \
1597 [SMU_MSG_##msg] = {1, (index), (flags)}
1598
1599 #define CLK_MAP(clk, index) \
1600 [SMU_##clk] = {1, (index)}
1601
1602 #define FEA_MAP(fea) \
1603 [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
1604
1605 #define FEA_MAP_REVERSE(fea) \
1606 [SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT}
1607
1608 #define FEA_MAP_HALF_REVERSE(fea) \
1609 [SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT}
1610
1611 #define TAB_MAP(tab) \
1612 [SMU_TABLE_##tab] = {1, TABLE_##tab}
1613
1614 #define TAB_MAP_VALID(tab) \
1615 [SMU_TABLE_##tab] = {1, TABLE_##tab}
1616
1617 #define TAB_MAP_INVALID(tab) \
1618 [SMU_TABLE_##tab] = {0, TABLE_##tab}
1619
1620 #define PWR_MAP(tab) \
1621 [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
1622
1623 #define WORKLOAD_MAP(profile, workload) \
1624 [profile] = {1, (workload)}
1625
1626 /**
1627 * smu_memcpy_trailing - Copy the end of one structure into the middle of another
1628 *
1629 * @dst: Pointer to destination struct
1630 * @first_dst_member: The member name in @dst where the overwrite begins
1631 * @last_dst_member: The member name in @dst where the overwrite ends after
1632 * @src: Pointer to the source struct
1633 * @first_src_member: The member name in @src where the copy begins
1634 *
1635 */
1636 #define smu_memcpy_trailing(dst, first_dst_member, last_dst_member, \
1637 src, first_src_member) \
1638 ({ \
1639 size_t __src_offset = offsetof(typeof(*(src)), first_src_member); \
1640 size_t __src_size = sizeof(*(src)) - __src_offset; \
1641 size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member); \
1642 size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \
1643 __dst_offset; \
1644 BUILD_BUG_ON(__src_size != __dst_size); \
1645 __builtin_memcpy((u8 *)(dst) + __dst_offset, \
1646 (u8 *)(src) + __src_offset, \
1647 __dst_size); \
1648 })
1649
1650 typedef struct {
1651 uint16_t LowFreq;
1652 uint16_t HighFreq;
1653 } WifiOneBand_t;
1654
1655 typedef struct {
1656 uint32_t WifiBandEntryNum;
1657 WifiOneBand_t WifiBandEntry[11];
1658 uint32_t MmHubPadding[8];
1659 } WifiBandEntryTable_t;
1660
1661 #define STR_SOC_PSTATE_POLICY "soc_pstate"
1662 #define STR_XGMI_PLPD_POLICY "xgmi_plpd"
1663
1664 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
1665 enum pp_pm_policy p_type);
1666
1667 static inline enum smu_table_id
smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)1668 smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)
1669 {
1670 switch (type) {
1671 case SMU_TEMP_METRIC_BASEBOARD:
1672 return SMU_TABLE_BASEBOARD_TEMP_METRICS;
1673 case SMU_TEMP_METRIC_GPUBOARD:
1674 return SMU_TABLE_GPUBOARD_TEMP_METRICS;
1675 default:
1676 return SMU_TABLE_COUNT;
1677 }
1678
1679 return SMU_TABLE_COUNT;
1680 }
1681
smu_table_cache_update_time(struct smu_table * table,unsigned long time)1682 static inline void smu_table_cache_update_time(struct smu_table *table,
1683 unsigned long time)
1684 {
1685 table->cache.last_cache_time = time;
1686 }
1687
smu_table_cache_is_valid(struct smu_table * table)1688 static inline bool smu_table_cache_is_valid(struct smu_table *table)
1689 {
1690 if (!table->cache.buffer || !table->cache.last_cache_time ||
1691 !table->cache.interval || !table->cache.size ||
1692 time_after(jiffies,
1693 table->cache.last_cache_time +
1694 msecs_to_jiffies(table->cache.interval)))
1695 return false;
1696
1697 return true;
1698 }
1699
smu_table_cache_init(struct smu_context * smu,enum smu_table_id table_id,size_t size,uint32_t cache_interval)1700 static inline int smu_table_cache_init(struct smu_context *smu,
1701 enum smu_table_id table_id, size_t size,
1702 uint32_t cache_interval)
1703 {
1704 struct smu_table_context *smu_table = &smu->smu_table;
1705 struct smu_table *tables = smu_table->tables;
1706
1707 tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
1708 if (!tables[table_id].cache.buffer)
1709 return -ENOMEM;
1710
1711 tables[table_id].cache.last_cache_time = 0;
1712 tables[table_id].cache.interval = cache_interval;
1713 tables[table_id].cache.size = size;
1714
1715 return 0;
1716 }
1717
smu_table_cache_fini(struct smu_context * smu,enum smu_table_id table_id)1718 static inline void smu_table_cache_fini(struct smu_context *smu,
1719 enum smu_table_id table_id)
1720 {
1721 struct smu_table_context *smu_table = &smu->smu_table;
1722 struct smu_table *tables = smu_table->tables;
1723
1724 if (tables[table_id].cache.buffer) {
1725 kfree(tables[table_id].cache.buffer);
1726 tables[table_id].cache.buffer = NULL;
1727 tables[table_id].cache.last_cache_time = 0;
1728 tables[table_id].cache.interval = 0;
1729 }
1730 }
1731
1732 #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
1733 int smu_get_power_limit(void *handle,
1734 uint32_t *limit,
1735 enum pp_power_limit_level pp_limit_level,
1736 enum pp_power_type pp_power_type);
1737
1738 bool smu_mode1_reset_is_support(struct smu_context *smu);
1739 bool smu_link_reset_is_support(struct smu_context *smu);
1740 int smu_mode1_reset(struct smu_context *smu);
1741 int smu_link_reset(struct smu_context *smu);
1742
1743 extern const struct amd_ip_funcs smu_ip_funcs;
1744
1745 bool is_support_sw_smu(struct amdgpu_device *adev);
1746 bool is_support_cclk_dpm(struct amdgpu_device *adev);
1747 int smu_write_watermarks_table(struct smu_context *smu);
1748
1749 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
1750 uint32_t *min, uint32_t *max);
1751
1752 int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type,
1753 uint32_t min, uint32_t max);
1754
1755 int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
1756
1757 int smu_set_ac_dc(struct smu_context *smu);
1758
1759 int smu_set_xgmi_plpd_mode(struct smu_context *smu,
1760 enum pp_xgmi_plpd_mode mode);
1761
1762 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value);
1763
1764 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value);
1765
1766 int smu_set_residency_gfxoff(struct smu_context *smu, bool value);
1767
1768 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value);
1769
1770 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
1771
1772 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
1773 uint64_t event_arg);
1774 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc);
1775 int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size);
1776 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
1777 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
1778 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
1779 int smu_send_rma_reason(struct smu_context *smu);
1780 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
1781 bool smu_reset_sdma_is_supported(struct smu_context *smu);
1782 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
1783 bool smu_reset_vcn_is_supported(struct smu_context *smu);
1784 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
1785 int level);
1786 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
1787 enum pp_pm_policy p_type, char *sysbuf);
1788
1789 #endif
1790
1791 void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id);
1792 bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id);
1793 #endif
1794