xref: /linux/drivers/gpu/drm/amd/pm/amdgpu_pm.c (revision 4a57e0913e8c7fff407e97909f4ae48caa84d612) !
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Rafał Miłecki <zajec5@gmail.com>
23  *          Alex Deucher <alexdeucher@gmail.com>
24  */
25 
26 #include "amdgpu.h"
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 #include <linux/pci.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/string_choices.h>
37 #include <asm/processor.h>
38 
39 #define MAX_NUM_OF_FEATURES_PER_SUBSET		8
40 #define MAX_NUM_OF_SUBSETS			8
41 
42 #define DEVICE_ATTR_IS(_name)		(attr_id == device_attr_id__##_name)
43 
44 struct od_attribute {
45 	struct kobj_attribute	attribute;
46 	struct list_head	entry;
47 };
48 
49 struct od_kobj {
50 	struct kobject		kobj;
51 	struct list_head	entry;
52 	struct list_head	attribute;
53 	void			*priv;
54 };
55 
56 struct od_feature_ops {
57 	umode_t (*is_visible)(struct amdgpu_device *adev);
58 	ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
59 			char *buf);
60 	ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
61 			 const char *buf, size_t count);
62 };
63 
64 struct od_feature_item {
65 	const char		*name;
66 	struct od_feature_ops	ops;
67 };
68 
69 struct od_feature_container {
70 	char				*name;
71 	struct od_feature_ops		ops;
72 	struct od_feature_item		sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
73 };
74 
75 struct od_feature_set {
76 	struct od_feature_container	containers[MAX_NUM_OF_SUBSETS];
77 };
78 
79 static const struct hwmon_temp_label {
80 	enum PP_HWMON_TEMP channel;
81 	const char *label;
82 } temp_label[] = {
83 	{PP_TEMP_EDGE, "edge"},
84 	{PP_TEMP_JUNCTION, "junction"},
85 	{PP_TEMP_MEM, "mem"},
86 };
87 
88 const char * const amdgpu_pp_profile_name[] = {
89 	"BOOTUP_DEFAULT",
90 	"3D_FULL_SCREEN",
91 	"POWER_SAVING",
92 	"VIDEO",
93 	"VR",
94 	"COMPUTE",
95 	"CUSTOM",
96 	"WINDOW_3D",
97 	"CAPPED",
98 	"UNCAPPED",
99 };
100 
101 /**
102  * amdgpu_pm_dev_state_check - Check if device can be accessed.
103  * @adev: Target device.
104  * @runpm: Check runpm status for suspend state checks.
105  *
106  * Checks the state of the @adev for access. Return 0 if the device is
107  * accessible or a negative error code otherwise.
108  */
109 static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
110 {
111 	bool runpm_check = runpm ? adev->in_runpm : false;
112 	bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT);
113 
114 	if (amdgpu_in_reset(adev) || !full_init)
115 		return -EBUSY;
116 
117 	if (adev->in_suspend && !runpm_check)
118 		return -EBUSY;
119 
120 	return 0;
121 }
122 
123 /**
124  * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
125  * @adev: Target device.
126  *
127  * Checks the state of the @adev for access. Use runtime pm API to resume if
128  * needed. Return 0 if the device is accessible or a negative error code
129  * otherwise.
130  */
131 static int amdgpu_pm_get_access(struct amdgpu_device *adev)
132 {
133 	int ret;
134 
135 	ret = amdgpu_pm_dev_state_check(adev, true);
136 	if (ret)
137 		return ret;
138 
139 	return pm_runtime_resume_and_get(adev->dev);
140 }
141 
142 /**
143  * amdgpu_pm_get_access_if_active - Check if device is active for access.
144  * @adev: Target device.
145  *
146  * Checks the state of the @adev for access. Use runtime pm API to determine
147  * if device is active. Allow access only if device is active.Return 0 if the
148  * device is accessible or a negative error code otherwise.
149  */
150 static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
151 {
152 	int ret;
153 
154 	/* Ignore runpm status. If device is in suspended state, deny access */
155 	ret = amdgpu_pm_dev_state_check(adev, false);
156 	if (ret)
157 		return ret;
158 
159 	/*
160 	 * Allow only if device is active. If runpm is disabled also, as in
161 	 * kernels without CONFIG_PM, allow access.
162 	 */
163 	ret = pm_runtime_get_if_active(adev->dev);
164 	if (!ret)
165 		return -EPERM;
166 
167 	return 0;
168 }
169 
170 /**
171  * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
172  * @adev: Target device.
173  *
174  * Should be paired with amdgpu_pm_get_access* calls
175  */
176 static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
177 {
178 	pm_runtime_put_autosuspend(adev->dev);
179 }
180 
181 /**
182  * DOC: power_dpm_state
183  *
184  * The power_dpm_state file is a legacy interface and is only provided for
185  * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
186  * certain power related parameters.  The file power_dpm_state is used for this.
187  * It accepts the following arguments:
188  *
189  * - battery
190  *
191  * - balanced
192  *
193  * - performance
194  *
195  * battery
196  *
197  * On older GPUs, the vbios provided a special power state for battery
198  * operation.  Selecting battery switched to this state.  This is no
199  * longer provided on newer GPUs so the option does nothing in that case.
200  *
201  * balanced
202  *
203  * On older GPUs, the vbios provided a special power state for balanced
204  * operation.  Selecting balanced switched to this state.  This is no
205  * longer provided on newer GPUs so the option does nothing in that case.
206  *
207  * performance
208  *
209  * On older GPUs, the vbios provided a special power state for performance
210  * operation.  Selecting performance switched to this state.  This is no
211  * longer provided on newer GPUs so the option does nothing in that case.
212  *
213  */
214 
215 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
216 					  struct device_attribute *attr,
217 					  char *buf)
218 {
219 	struct drm_device *ddev = dev_get_drvdata(dev);
220 	struct amdgpu_device *adev = drm_to_adev(ddev);
221 	enum amd_pm_state_type pm;
222 	int ret;
223 
224 	ret = amdgpu_pm_get_access_if_active(adev);
225 	if (ret)
226 		return ret;
227 
228 	amdgpu_dpm_get_current_power_state(adev, &pm);
229 
230 	amdgpu_pm_put_access(adev);
231 
232 	return sysfs_emit(buf, "%s\n",
233 			  (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
234 			  (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
235 }
236 
237 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
238 					  struct device_attribute *attr,
239 					  const char *buf,
240 					  size_t count)
241 {
242 	struct drm_device *ddev = dev_get_drvdata(dev);
243 	struct amdgpu_device *adev = drm_to_adev(ddev);
244 	enum amd_pm_state_type  state;
245 	int ret;
246 
247 	if (sysfs_streq(buf, "battery"))
248 		state = POWER_STATE_TYPE_BATTERY;
249 	else if (sysfs_streq(buf, "balanced"))
250 		state = POWER_STATE_TYPE_BALANCED;
251 	else if (sysfs_streq(buf, "performance"))
252 		state = POWER_STATE_TYPE_PERFORMANCE;
253 	else
254 		return -EINVAL;
255 
256 	ret = amdgpu_pm_get_access(adev);
257 	if (ret < 0)
258 		return ret;
259 
260 	amdgpu_dpm_set_power_state(adev, state);
261 
262 	amdgpu_pm_put_access(adev);
263 
264 	return count;
265 }
266 
267 
268 /**
269  * DOC: power_dpm_force_performance_level
270  *
271  * The amdgpu driver provides a sysfs API for adjusting certain power
272  * related parameters.  The file power_dpm_force_performance_level is
273  * used for this.  It accepts the following arguments:
274  *
275  * - auto
276  *
277  * - low
278  *
279  * - high
280  *
281  * - manual
282  *
283  * - profile_standard
284  *
285  * - profile_min_sclk
286  *
287  * - profile_min_mclk
288  *
289  * - profile_peak
290  *
291  * auto
292  *
293  * When auto is selected, the driver will attempt to dynamically select
294  * the optimal power profile for current conditions in the driver.
295  *
296  * low
297  *
298  * When low is selected, the clocks are forced to the lowest power state.
299  *
300  * high
301  *
302  * When high is selected, the clocks are forced to the highest power state.
303  *
304  * manual
305  *
306  * When manual is selected, the user can manually adjust which power states
307  * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
308  * and pp_dpm_pcie files and adjust the power state transition heuristics
309  * via the pp_power_profile_mode sysfs file.
310  *
311  * profile_standard
312  * profile_min_sclk
313  * profile_min_mclk
314  * profile_peak
315  *
316  * When the profiling modes are selected, clock and power gating are
317  * disabled and the clocks are set for different profiling cases. This
318  * mode is recommended for profiling specific work loads where you do
319  * not want clock or power gating for clock fluctuation to interfere
320  * with your results. profile_standard sets the clocks to a fixed clock
321  * level which varies from asic to asic.  profile_min_sclk forces the sclk
322  * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
323  * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
324  *
325  */
326 
327 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
328 							    struct device_attribute *attr,
329 							    char *buf)
330 {
331 	struct drm_device *ddev = dev_get_drvdata(dev);
332 	struct amdgpu_device *adev = drm_to_adev(ddev);
333 	enum amd_dpm_forced_level level = 0xff;
334 	int ret;
335 
336 	ret = amdgpu_pm_get_access_if_active(adev);
337 	if (ret)
338 		return ret;
339 
340 	level = amdgpu_dpm_get_performance_level(adev);
341 
342 	amdgpu_pm_put_access(adev);
343 
344 	return sysfs_emit(buf, "%s\n",
345 			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
346 			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
347 			  (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
348 			  (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
349 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
350 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
351 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
352 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
353 			  (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
354 			  "unknown");
355 }
356 
357 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
358 							    struct device_attribute *attr,
359 							    const char *buf,
360 							    size_t count)
361 {
362 	struct drm_device *ddev = dev_get_drvdata(dev);
363 	struct amdgpu_device *adev = drm_to_adev(ddev);
364 	enum amd_dpm_forced_level level;
365 	int ret = 0;
366 
367 	if (sysfs_streq(buf, "low"))
368 		level = AMD_DPM_FORCED_LEVEL_LOW;
369 	else if (sysfs_streq(buf, "high"))
370 		level = AMD_DPM_FORCED_LEVEL_HIGH;
371 	else if (sysfs_streq(buf, "auto"))
372 		level = AMD_DPM_FORCED_LEVEL_AUTO;
373 	else if (sysfs_streq(buf, "manual"))
374 		level = AMD_DPM_FORCED_LEVEL_MANUAL;
375 	else if (sysfs_streq(buf, "profile_exit"))
376 		level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
377 	else if (sysfs_streq(buf, "profile_standard"))
378 		level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
379 	else if (sysfs_streq(buf, "profile_min_sclk"))
380 		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
381 	else if (sysfs_streq(buf, "profile_min_mclk"))
382 		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
383 	else if (sysfs_streq(buf, "profile_peak"))
384 		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
385 	else if (sysfs_streq(buf, "perf_determinism"))
386 		level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
387 	else
388 		return -EINVAL;
389 
390 	ret = amdgpu_pm_get_access(adev);
391 	if (ret < 0)
392 		return ret;
393 
394 	mutex_lock(&adev->pm.stable_pstate_ctx_lock);
395 	if (amdgpu_dpm_force_performance_level(adev, level)) {
396 		amdgpu_pm_put_access(adev);
397 		mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
398 		return -EINVAL;
399 	}
400 	/* override whatever a user ctx may have set */
401 	adev->pm.stable_pstate_ctx = NULL;
402 	mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
403 
404 	amdgpu_pm_put_access(adev);
405 
406 	return count;
407 }
408 
409 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
410 		struct device_attribute *attr,
411 		char *buf)
412 {
413 	struct drm_device *ddev = dev_get_drvdata(dev);
414 	struct amdgpu_device *adev = drm_to_adev(ddev);
415 	struct pp_states_info data;
416 	uint32_t i;
417 	int buf_len, ret;
418 
419 	ret = amdgpu_pm_get_access_if_active(adev);
420 	if (ret)
421 		return ret;
422 
423 	if (amdgpu_dpm_get_pp_num_states(adev, &data))
424 		memset(&data, 0, sizeof(data));
425 
426 	amdgpu_pm_put_access(adev);
427 
428 	buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
429 	for (i = 0; i < data.nums; i++)
430 		buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
431 				(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
432 				(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
433 				(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
434 				(data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
435 
436 	return buf_len;
437 }
438 
439 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
440 		struct device_attribute *attr,
441 		char *buf)
442 {
443 	struct drm_device *ddev = dev_get_drvdata(dev);
444 	struct amdgpu_device *adev = drm_to_adev(ddev);
445 	struct pp_states_info data = {0};
446 	enum amd_pm_state_type pm = 0;
447 	int i = 0, ret = 0;
448 
449 	ret = amdgpu_pm_get_access_if_active(adev);
450 	if (ret)
451 		return ret;
452 
453 	amdgpu_dpm_get_current_power_state(adev, &pm);
454 
455 	ret = amdgpu_dpm_get_pp_num_states(adev, &data);
456 
457 	amdgpu_pm_put_access(adev);
458 
459 	if (ret)
460 		return ret;
461 
462 	for (i = 0; i < data.nums; i++) {
463 		if (pm == data.states[i])
464 			break;
465 	}
466 
467 	if (i == data.nums)
468 		i = -EINVAL;
469 
470 	return sysfs_emit(buf, "%d\n", i);
471 }
472 
473 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
474 		struct device_attribute *attr,
475 		char *buf)
476 {
477 	struct drm_device *ddev = dev_get_drvdata(dev);
478 	struct amdgpu_device *adev = drm_to_adev(ddev);
479 
480 	if (adev->pm.pp_force_state_enabled)
481 		return amdgpu_get_pp_cur_state(dev, attr, buf);
482 	else
483 		return sysfs_emit(buf, "\n");
484 }
485 
486 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
487 		struct device_attribute *attr,
488 		const char *buf,
489 		size_t count)
490 {
491 	struct drm_device *ddev = dev_get_drvdata(dev);
492 	struct amdgpu_device *adev = drm_to_adev(ddev);
493 	enum amd_pm_state_type state = 0;
494 	struct pp_states_info data;
495 	unsigned long idx;
496 	int ret;
497 
498 	adev->pm.pp_force_state_enabled = false;
499 
500 	if (strlen(buf) == 1)
501 		return count;
502 
503 	ret = kstrtoul(buf, 0, &idx);
504 	if (ret || idx >= ARRAY_SIZE(data.states))
505 		return -EINVAL;
506 
507 	idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
508 
509 	ret = amdgpu_pm_get_access(adev);
510 	if (ret < 0)
511 		return ret;
512 
513 	ret = amdgpu_dpm_get_pp_num_states(adev, &data);
514 	if (ret)
515 		goto err_out;
516 
517 	state = data.states[idx];
518 
519 	/* only set user selected power states */
520 	if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
521 	    state != POWER_STATE_TYPE_DEFAULT) {
522 		ret = amdgpu_dpm_dispatch_task(adev,
523 				AMD_PP_TASK_ENABLE_USER_STATE, &state);
524 		if (ret)
525 			goto err_out;
526 
527 		adev->pm.pp_force_state_enabled = true;
528 	}
529 
530 	amdgpu_pm_put_access(adev);
531 
532 	return count;
533 
534 err_out:
535 	amdgpu_pm_put_access(adev);
536 
537 	return ret;
538 }
539 
540 /**
541  * DOC: pp_table
542  *
543  * The amdgpu driver provides a sysfs API for uploading new powerplay
544  * tables.  The file pp_table is used for this.  Reading the file
545  * will dump the current power play table.  Writing to the file
546  * will attempt to upload a new powerplay table and re-initialize
547  * powerplay using that new table.
548  *
549  */
550 
551 static ssize_t amdgpu_get_pp_table(struct device *dev,
552 		struct device_attribute *attr,
553 		char *buf)
554 {
555 	struct drm_device *ddev = dev_get_drvdata(dev);
556 	struct amdgpu_device *adev = drm_to_adev(ddev);
557 	char *table = NULL;
558 	int size, ret;
559 
560 	ret = amdgpu_pm_get_access_if_active(adev);
561 	if (ret)
562 		return ret;
563 
564 	size = amdgpu_dpm_get_pp_table(adev, &table);
565 
566 	amdgpu_pm_put_access(adev);
567 
568 	if (size <= 0)
569 		return size;
570 
571 	if (size >= PAGE_SIZE)
572 		size = PAGE_SIZE - 1;
573 
574 	memcpy(buf, table, size);
575 
576 	return size;
577 }
578 
579 static ssize_t amdgpu_set_pp_table(struct device *dev,
580 		struct device_attribute *attr,
581 		const char *buf,
582 		size_t count)
583 {
584 	struct drm_device *ddev = dev_get_drvdata(dev);
585 	struct amdgpu_device *adev = drm_to_adev(ddev);
586 	int ret = 0;
587 
588 	ret = amdgpu_pm_get_access(adev);
589 	if (ret < 0)
590 		return ret;
591 
592 	ret = amdgpu_dpm_set_pp_table(adev, buf, count);
593 
594 	amdgpu_pm_put_access(adev);
595 
596 	if (ret)
597 		return ret;
598 
599 	return count;
600 }
601 
602 /**
603  * DOC: pp_od_clk_voltage
604  *
605  * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
606  * in each power level within a power state.  The pp_od_clk_voltage is used for
607  * this.
608  *
609  * Note that the actual memory controller clock rate are exposed, not
610  * the effective memory clock of the DRAMs. To translate it, use the
611  * following formula:
612  *
613  * Clock conversion (Mhz):
614  *
615  * HBM: effective_memory_clock = memory_controller_clock * 1
616  *
617  * G5: effective_memory_clock = memory_controller_clock * 1
618  *
619  * G6: effective_memory_clock = memory_controller_clock * 2
620  *
621  * DRAM data rate (MT/s):
622  *
623  * HBM: effective_memory_clock * 2 = data_rate
624  *
625  * G5: effective_memory_clock * 4 = data_rate
626  *
627  * G6: effective_memory_clock * 8 = data_rate
628  *
629  * Bandwidth (MB/s):
630  *
631  * data_rate * vram_bit_width / 8 = memory_bandwidth
632  *
633  * Some examples:
634  *
635  * G5 on RX460:
636  *
637  * memory_controller_clock = 1750 Mhz
638  *
639  * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
640  *
641  * data rate = 1750 * 4 = 7000 MT/s
642  *
643  * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
644  *
645  * G6 on RX5700:
646  *
647  * memory_controller_clock = 875 Mhz
648  *
649  * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
650  *
651  * data rate = 1750 * 8 = 14000 MT/s
652  *
653  * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
654  *
655  * < For Vega10 and previous ASICs >
656  *
657  * Reading the file will display:
658  *
659  * - a list of engine clock levels and voltages labeled OD_SCLK
660  *
661  * - a list of memory clock levels and voltages labeled OD_MCLK
662  *
663  * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
664  *
665  * To manually adjust these settings, first select manual using
666  * power_dpm_force_performance_level. Enter a new value for each
667  * level by writing a string that contains "s/m level clock voltage" to
668  * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
669  * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
670  * 810 mV.  When you have edited all of the states as needed, write
671  * "c" (commit) to the file to commit your changes.  If you want to reset to the
672  * default power levels, write "r" (reset) to the file to reset them.
673  *
674  *
675  * < For Vega20 and newer ASICs >
676  *
677  * Reading the file will display:
678  *
679  * - minimum and maximum engine clock labeled OD_SCLK
680  *
681  * - minimum(not available for Vega20 and Navi1x) and maximum memory
682  *   clock labeled OD_MCLK
683  *
684  * - minimum and maximum fabric clock labeled OD_FCLK (SMU13)
685  *
686  * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
687  *   They can be used to calibrate the sclk voltage curve. This is
688  *   available for Vega20 and NV1X.
689  *
690  * - voltage offset(in mV) applied on target voltage calculation.
691  *   This is available for Sienna Cichlid, Navy Flounder, Dimgrey
692  *   Cavefish and some later SMU13 ASICs. For these ASICs, the target
693  *   voltage calculation can be illustrated by "voltage = voltage
694  *   calculated from v/f curve + overdrive vddgfx offset"
695  *
696  * - a list of valid ranges for sclk, mclk, voltage curve points
697  *   or voltage offset labeled OD_RANGE
698  *
699  * < For APUs >
700  *
701  * Reading the file will display:
702  *
703  * - minimum and maximum engine clock labeled OD_SCLK
704  *
705  * - a list of valid ranges for sclk labeled OD_RANGE
706  *
707  * < For VanGogh >
708  *
709  * Reading the file will display:
710  *
711  * - minimum and maximum engine clock labeled OD_SCLK
712  * - minimum and maximum core clocks labeled OD_CCLK
713  *
714  * - a list of valid ranges for sclk and cclk labeled OD_RANGE
715  *
716  * To manually adjust these settings:
717  *
718  * - First select manual using power_dpm_force_performance_level
719  *
720  * - For clock frequency setting, enter a new value by writing a
721  *   string that contains "s/m/f index clock" to the file. The index
722  *   should be 0 if to set minimum clock. And 1 if to set maximum
723  *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
724  *   "m 1 800" will update maximum mclk to be 800Mhz. "f 1 1600" will
725  *   update maximum fabric clock to be 1600Mhz. For core
726  *   clocks on VanGogh, the string contains "p core index clock".
727  *   E.g., "p 2 0 800" would set the minimum core clock on core
728  *   2 to 800Mhz.
729  *
730  *   For sclk voltage curve supported by Vega20 and NV1X, enter the new
731  *   values by writing a string that contains "vc point clock voltage"
732  *   to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
733  *   600" will update point1 with clock set as 300Mhz and voltage as 600mV.
734  *   "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
735  *   voltage 1000mV.
736  *
737  *   For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
738  *   Cavefish and some later SMU13 ASICs, enter the new value by writing a
739  *   string that contains "vo offset". E.g., "vo -10" will update the extra
740  *   voltage offset applied to the whole v/f curve line as -10mv.
741  *
742  * - When you have edited all of the states as needed, write "c" (commit)
743  *   to the file to commit your changes
744  *
745  * - If you want to reset to the default power levels, write "r" (reset)
746  *   to the file to reset them
747  *
748  */
749 
750 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
751 		struct device_attribute *attr,
752 		const char *buf,
753 		size_t count)
754 {
755 	struct drm_device *ddev = dev_get_drvdata(dev);
756 	struct amdgpu_device *adev = drm_to_adev(ddev);
757 	int ret;
758 	uint32_t parameter_size = 0;
759 	long parameter[64];
760 	char buf_cpy[128];
761 	char *tmp_str;
762 	char *sub_str;
763 	const char delimiter[3] = {' ', '\n', '\0'};
764 	uint32_t type;
765 
766 	if (count > 127 || count == 0)
767 		return -EINVAL;
768 
769 	if (*buf == 's')
770 		type = PP_OD_EDIT_SCLK_VDDC_TABLE;
771 	else if (*buf == 'p')
772 		type = PP_OD_EDIT_CCLK_VDDC_TABLE;
773 	else if (*buf == 'm')
774 		type = PP_OD_EDIT_MCLK_VDDC_TABLE;
775 	else if (*buf == 'f')
776 		type = PP_OD_EDIT_FCLK_TABLE;
777 	else if (*buf == 'r')
778 		type = PP_OD_RESTORE_DEFAULT_TABLE;
779 	else if (*buf == 'c')
780 		type = PP_OD_COMMIT_DPM_TABLE;
781 	else if (!strncmp(buf, "vc", 2))
782 		type = PP_OD_EDIT_VDDC_CURVE;
783 	else if (!strncmp(buf, "vo", 2))
784 		type = PP_OD_EDIT_VDDGFX_OFFSET;
785 	else
786 		return -EINVAL;
787 
788 	memcpy(buf_cpy, buf, count);
789 	buf_cpy[count] = 0;
790 
791 	tmp_str = buf_cpy;
792 
793 	if ((type == PP_OD_EDIT_VDDC_CURVE) ||
794 	     (type == PP_OD_EDIT_VDDGFX_OFFSET))
795 		tmp_str++;
796 	while (isspace(*++tmp_str));
797 
798 	while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
799 		if (strlen(sub_str) == 0)
800 			continue;
801 		ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
802 		if (ret)
803 			return -EINVAL;
804 		parameter_size++;
805 
806 		if (!tmp_str)
807 			break;
808 
809 		while (isspace(*tmp_str))
810 			tmp_str++;
811 	}
812 
813 	ret = amdgpu_pm_get_access(adev);
814 	if (ret < 0)
815 		return ret;
816 
817 	if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
818 					      type,
819 					      parameter,
820 					      parameter_size))
821 		goto err_out;
822 
823 	if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
824 					  parameter, parameter_size))
825 		goto err_out;
826 
827 	if (type == PP_OD_COMMIT_DPM_TABLE) {
828 		if (amdgpu_dpm_dispatch_task(adev,
829 					     AMD_PP_TASK_READJUST_POWER_STATE,
830 					     NULL))
831 			goto err_out;
832 	}
833 
834 	amdgpu_pm_put_access(adev);
835 
836 	return count;
837 
838 err_out:
839 	amdgpu_pm_put_access(adev);
840 
841 	return -EINVAL;
842 }
843 
844 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
845 		struct device_attribute *attr,
846 		char *buf)
847 {
848 	struct drm_device *ddev = dev_get_drvdata(dev);
849 	struct amdgpu_device *adev = drm_to_adev(ddev);
850 	int size = 0;
851 	int ret;
852 	enum pp_clock_type od_clocks[] = {
853 		OD_SCLK,
854 		OD_MCLK,
855 		OD_FCLK,
856 		OD_VDDC_CURVE,
857 		OD_RANGE,
858 		OD_VDDGFX_OFFSET,
859 		OD_CCLK,
860 	};
861 	uint clk_index;
862 
863 	ret = amdgpu_pm_get_access_if_active(adev);
864 	if (ret)
865 		return ret;
866 
867 	for (clk_index = 0 ; clk_index < ARRAY_SIZE(od_clocks) ; clk_index++) {
868 		amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
869 	}
870 
871 	if (size == 0)
872 		size = sysfs_emit(buf, "\n");
873 
874 	amdgpu_pm_put_access(adev);
875 
876 	return size;
877 }
878 
879 /**
880  * DOC: pp_features
881  *
882  * The amdgpu driver provides a sysfs API for adjusting what powerplay
883  * features to be enabled. The file pp_features is used for this. And
884  * this is only available for Vega10 and later dGPUs.
885  *
886  * Reading back the file will show you the followings:
887  * - Current ppfeature masks
888  * - List of the all supported powerplay features with their naming,
889  *   bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
890  *
891  * To manually enable or disable a specific feature, just set or clear
892  * the corresponding bit from original ppfeature masks and input the
893  * new ppfeature masks.
894  */
895 static ssize_t amdgpu_set_pp_features(struct device *dev,
896 				      struct device_attribute *attr,
897 				      const char *buf,
898 				      size_t count)
899 {
900 	struct drm_device *ddev = dev_get_drvdata(dev);
901 	struct amdgpu_device *adev = drm_to_adev(ddev);
902 	uint64_t featuremask;
903 	int ret;
904 
905 	ret = kstrtou64(buf, 0, &featuremask);
906 	if (ret)
907 		return -EINVAL;
908 
909 	ret = amdgpu_pm_get_access(adev);
910 	if (ret < 0)
911 		return ret;
912 
913 	ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
914 
915 	amdgpu_pm_put_access(adev);
916 
917 	if (ret)
918 		return -EINVAL;
919 
920 	return count;
921 }
922 
923 static ssize_t amdgpu_get_pp_features(struct device *dev,
924 				      struct device_attribute *attr,
925 				      char *buf)
926 {
927 	struct drm_device *ddev = dev_get_drvdata(dev);
928 	struct amdgpu_device *adev = drm_to_adev(ddev);
929 	ssize_t size;
930 	int ret;
931 
932 	ret = amdgpu_pm_get_access_if_active(adev);
933 	if (ret)
934 		return ret;
935 
936 	size = amdgpu_dpm_get_ppfeature_status(adev, buf);
937 	if (size <= 0)
938 		size = sysfs_emit(buf, "\n");
939 
940 	amdgpu_pm_put_access(adev);
941 
942 	return size;
943 }
944 
945 /**
946  * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
947  *
948  * The amdgpu driver provides a sysfs API for adjusting what power levels
949  * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
950  * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
951  * this.
952  *
953  * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
954  * Vega10 and later ASICs.
955  * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
956  *
957  * Reading back the files will show you the available power levels within
958  * the power state and the clock information for those levels. If deep sleep is
959  * applied to a clock, the level will be denoted by a special level 'S:'
960  * E.g., ::
961  *
962  *  S: 19Mhz *
963  *  0: 615Mhz
964  *  1: 800Mhz
965  *  2: 888Mhz
966  *  3: 1000Mhz
967  *
968  *
969  * To manually adjust these states, first select manual using
970  * power_dpm_force_performance_level.
971  * Secondly, enter a new value for each level by inputing a string that
972  * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
973  * E.g.,
974  *
975  * .. code-block:: bash
976  *
977  *	echo "4 5 6" > pp_dpm_sclk
978  *
979  * will enable sclk levels 4, 5, and 6.
980  *
981  * NOTE: change to the dcefclk max dpm level is not supported now
982  */
983 
984 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
985 		enum pp_clock_type type,
986 		char *buf)
987 {
988 	struct drm_device *ddev = dev_get_drvdata(dev);
989 	struct amdgpu_device *adev = drm_to_adev(ddev);
990 	int size = 0;
991 	int ret = 0;
992 
993 	ret = amdgpu_pm_get_access_if_active(adev);
994 	if (ret)
995 		return ret;
996 
997 	ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
998 	if (ret)
999 		return ret;
1000 
1001 	if (size == 0)
1002 		size = sysfs_emit(buf, "\n");
1003 
1004 	amdgpu_pm_put_access(adev);
1005 
1006 	return size;
1007 }
1008 
1009 /*
1010  * Worst case: 32 bits individually specified, in octal at 12 characters
1011  * per line (+1 for \n).
1012  */
1013 #define AMDGPU_MASK_BUF_MAX	(32 * 13)
1014 
1015 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1016 {
1017 	int ret;
1018 	unsigned long level;
1019 	char *sub_str = NULL;
1020 	char *tmp;
1021 	char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1022 	const char delimiter[3] = {' ', '\n', '\0'};
1023 	size_t bytes;
1024 
1025 	*mask = 0;
1026 
1027 	bytes = min(count, sizeof(buf_cpy) - 1);
1028 	memcpy(buf_cpy, buf, bytes);
1029 	buf_cpy[bytes] = '\0';
1030 	tmp = buf_cpy;
1031 	while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1032 		if (strlen(sub_str)) {
1033 			ret = kstrtoul(sub_str, 0, &level);
1034 			if (ret || level > 31)
1035 				return -EINVAL;
1036 			*mask |= 1 << level;
1037 		} else
1038 			break;
1039 	}
1040 
1041 	return 0;
1042 }
1043 
1044 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1045 		enum pp_clock_type type,
1046 		const char *buf,
1047 		size_t count)
1048 {
1049 	struct drm_device *ddev = dev_get_drvdata(dev);
1050 	struct amdgpu_device *adev = drm_to_adev(ddev);
1051 	int ret;
1052 	uint32_t mask = 0;
1053 
1054 	ret = amdgpu_read_mask(buf, count, &mask);
1055 	if (ret)
1056 		return ret;
1057 
1058 	ret = amdgpu_pm_get_access(adev);
1059 	if (ret < 0)
1060 		return ret;
1061 
1062 	ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1063 
1064 	amdgpu_pm_put_access(adev);
1065 
1066 	if (ret)
1067 		return -EINVAL;
1068 
1069 	return count;
1070 }
1071 
1072 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1073 		struct device_attribute *attr,
1074 		char *buf)
1075 {
1076 	return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1077 }
1078 
1079 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1080 		struct device_attribute *attr,
1081 		const char *buf,
1082 		size_t count)
1083 {
1084 	return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1085 }
1086 
1087 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1088 		struct device_attribute *attr,
1089 		char *buf)
1090 {
1091 	return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1092 }
1093 
1094 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1095 		struct device_attribute *attr,
1096 		const char *buf,
1097 		size_t count)
1098 {
1099 	return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1100 }
1101 
1102 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1103 		struct device_attribute *attr,
1104 		char *buf)
1105 {
1106 	return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1107 }
1108 
1109 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1110 		struct device_attribute *attr,
1111 		const char *buf,
1112 		size_t count)
1113 {
1114 	return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1115 }
1116 
1117 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1118 		struct device_attribute *attr,
1119 		char *buf)
1120 {
1121 	return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1122 }
1123 
1124 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1125 		struct device_attribute *attr,
1126 		const char *buf,
1127 		size_t count)
1128 {
1129 	return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1130 }
1131 
1132 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1133 		struct device_attribute *attr,
1134 		char *buf)
1135 {
1136 	return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1137 }
1138 
1139 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1140 		struct device_attribute *attr,
1141 		const char *buf,
1142 		size_t count)
1143 {
1144 	return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1145 }
1146 
1147 static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1148 		struct device_attribute *attr,
1149 		char *buf)
1150 {
1151 	return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1152 }
1153 
1154 static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1155 		struct device_attribute *attr,
1156 		const char *buf,
1157 		size_t count)
1158 {
1159 	return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1160 }
1161 
1162 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1163 		struct device_attribute *attr,
1164 		char *buf)
1165 {
1166 	return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1167 }
1168 
1169 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1170 		struct device_attribute *attr,
1171 		const char *buf,
1172 		size_t count)
1173 {
1174 	return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1175 }
1176 
1177 static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1178 		struct device_attribute *attr,
1179 		char *buf)
1180 {
1181 	return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1182 }
1183 
1184 static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1185 		struct device_attribute *attr,
1186 		const char *buf,
1187 		size_t count)
1188 {
1189 	return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1190 }
1191 
1192 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1193 		struct device_attribute *attr,
1194 		char *buf)
1195 {
1196 	return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1197 }
1198 
1199 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1200 		struct device_attribute *attr,
1201 		const char *buf,
1202 		size_t count)
1203 {
1204 	return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1205 }
1206 
1207 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1208 		struct device_attribute *attr,
1209 		char *buf)
1210 {
1211 	return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1212 }
1213 
1214 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1215 		struct device_attribute *attr,
1216 		const char *buf,
1217 		size_t count)
1218 {
1219 	return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1220 }
1221 
1222 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1223 		struct device_attribute *attr,
1224 		char *buf)
1225 {
1226 	struct drm_device *ddev = dev_get_drvdata(dev);
1227 	struct amdgpu_device *adev = drm_to_adev(ddev);
1228 	uint32_t value = 0;
1229 	int ret;
1230 
1231 	ret = amdgpu_pm_get_access_if_active(adev);
1232 	if (ret)
1233 		return ret;
1234 
1235 	value = amdgpu_dpm_get_sclk_od(adev);
1236 
1237 	amdgpu_pm_put_access(adev);
1238 
1239 	return sysfs_emit(buf, "%d\n", value);
1240 }
1241 
1242 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1243 		struct device_attribute *attr,
1244 		const char *buf,
1245 		size_t count)
1246 {
1247 	struct drm_device *ddev = dev_get_drvdata(dev);
1248 	struct amdgpu_device *adev = drm_to_adev(ddev);
1249 	int ret;
1250 	long int value;
1251 
1252 	ret = kstrtol(buf, 0, &value);
1253 
1254 	if (ret)
1255 		return -EINVAL;
1256 
1257 	ret = amdgpu_pm_get_access(adev);
1258 	if (ret < 0)
1259 		return ret;
1260 
1261 	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1262 
1263 	amdgpu_pm_put_access(adev);
1264 
1265 	return count;
1266 }
1267 
1268 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1269 		struct device_attribute *attr,
1270 		char *buf)
1271 {
1272 	struct drm_device *ddev = dev_get_drvdata(dev);
1273 	struct amdgpu_device *adev = drm_to_adev(ddev);
1274 	uint32_t value = 0;
1275 	int ret;
1276 
1277 	ret = amdgpu_pm_get_access_if_active(adev);
1278 	if (ret)
1279 		return ret;
1280 
1281 	value = amdgpu_dpm_get_mclk_od(adev);
1282 
1283 	amdgpu_pm_put_access(adev);
1284 
1285 	return sysfs_emit(buf, "%d\n", value);
1286 }
1287 
1288 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1289 		struct device_attribute *attr,
1290 		const char *buf,
1291 		size_t count)
1292 {
1293 	struct drm_device *ddev = dev_get_drvdata(dev);
1294 	struct amdgpu_device *adev = drm_to_adev(ddev);
1295 	int ret;
1296 	long int value;
1297 
1298 	ret = kstrtol(buf, 0, &value);
1299 
1300 	if (ret)
1301 		return -EINVAL;
1302 
1303 	ret = amdgpu_pm_get_access(adev);
1304 	if (ret < 0)
1305 		return ret;
1306 
1307 	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1308 
1309 	amdgpu_pm_put_access(adev);
1310 
1311 	return count;
1312 }
1313 
1314 /**
1315  * DOC: pp_power_profile_mode
1316  *
1317  * The amdgpu driver provides a sysfs API for adjusting the heuristics
1318  * related to switching between power levels in a power state.  The file
1319  * pp_power_profile_mode is used for this.
1320  *
1321  * Reading this file outputs a list of all of the predefined power profiles
1322  * and the relevant heuristics settings for that profile.
1323  *
1324  * To select a profile or create a custom profile, first select manual using
1325  * power_dpm_force_performance_level.  Writing the number of a predefined
1326  * profile to pp_power_profile_mode will enable those heuristics.  To
1327  * create a custom set of heuristics, write a string of numbers to the file
1328  * starting with the number of the custom profile along with a setting
1329  * for each heuristic parameter.  Due to differences across asic families
1330  * the heuristic parameters vary from family to family. Additionally,
1331  * you can apply the custom heuristics to different clock domains.  Each
1332  * clock domain is considered a distinct operation so if you modify the
1333  * gfxclk heuristics and then the memclk heuristics, the all of the
1334  * custom heuristics will be retained until you switch to another profile.
1335  *
1336  */
1337 
1338 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1339 		struct device_attribute *attr,
1340 		char *buf)
1341 {
1342 	struct drm_device *ddev = dev_get_drvdata(dev);
1343 	struct amdgpu_device *adev = drm_to_adev(ddev);
1344 	ssize_t size;
1345 	int ret;
1346 
1347 	ret = amdgpu_pm_get_access_if_active(adev);
1348 	if (ret)
1349 		return ret;
1350 
1351 	size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1352 	if (size <= 0)
1353 		size = sysfs_emit(buf, "\n");
1354 
1355 	amdgpu_pm_put_access(adev);
1356 
1357 	return size;
1358 }
1359 
1360 
1361 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1362 		struct device_attribute *attr,
1363 		const char *buf,
1364 		size_t count)
1365 {
1366 	int ret;
1367 	struct drm_device *ddev = dev_get_drvdata(dev);
1368 	struct amdgpu_device *adev = drm_to_adev(ddev);
1369 	uint32_t parameter_size = 0;
1370 	long parameter[64];
1371 	char *sub_str, buf_cpy[128];
1372 	char *tmp_str;
1373 	uint32_t i = 0;
1374 	char tmp[2];
1375 	long int profile_mode = 0;
1376 	const char delimiter[3] = {' ', '\n', '\0'};
1377 
1378 	tmp[0] = *(buf);
1379 	tmp[1] = '\0';
1380 	ret = kstrtol(tmp, 0, &profile_mode);
1381 	if (ret)
1382 		return -EINVAL;
1383 
1384 	if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1385 		if (count < 2 || count > 127)
1386 			return -EINVAL;
1387 		while (isspace(*++buf))
1388 			i++;
1389 		memcpy(buf_cpy, buf, count-i);
1390 		tmp_str = buf_cpy;
1391 		while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1392 			if (strlen(sub_str) == 0)
1393 				continue;
1394 			ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
1395 			if (ret)
1396 				return -EINVAL;
1397 			parameter_size++;
1398 			if (!tmp_str)
1399 				break;
1400 			while (isspace(*tmp_str))
1401 				tmp_str++;
1402 		}
1403 	}
1404 	parameter[parameter_size] = profile_mode;
1405 
1406 	ret = amdgpu_pm_get_access(adev);
1407 	if (ret < 0)
1408 		return ret;
1409 
1410 	ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1411 
1412 	amdgpu_pm_put_access(adev);
1413 
1414 	if (!ret)
1415 		return count;
1416 
1417 	return -EINVAL;
1418 }
1419 
1420 static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev,
1421 					enum amd_pp_sensors sensor,
1422 					void *query)
1423 {
1424 	int r, size = sizeof(uint32_t);
1425 
1426 	r = amdgpu_pm_get_access_if_active(adev);
1427 	if (r)
1428 		return r;
1429 
1430 	/* get the sensor value */
1431 	r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1432 
1433 	amdgpu_pm_put_access(adev);
1434 
1435 	return r;
1436 }
1437 
1438 /**
1439  * DOC: gpu_busy_percent
1440  *
1441  * The amdgpu driver provides a sysfs API for reading how busy the GPU
1442  * is as a percentage.  The file gpu_busy_percent is used for this.
1443  * The SMU firmware computes a percentage of load based on the
1444  * aggregate activity level in the IP cores.
1445  */
1446 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1447 					   struct device_attribute *attr,
1448 					   char *buf)
1449 {
1450 	struct drm_device *ddev = dev_get_drvdata(dev);
1451 	struct amdgpu_device *adev = drm_to_adev(ddev);
1452 	unsigned int value;
1453 	int r;
1454 
1455 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
1456 	if (r)
1457 		return r;
1458 
1459 	return sysfs_emit(buf, "%d\n", value);
1460 }
1461 
1462 /**
1463  * DOC: mem_busy_percent
1464  *
1465  * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1466  * is as a percentage.  The file mem_busy_percent is used for this.
1467  * The SMU firmware computes a percentage of load based on the
1468  * aggregate activity level in the IP cores.
1469  */
1470 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1471 					   struct device_attribute *attr,
1472 					   char *buf)
1473 {
1474 	struct drm_device *ddev = dev_get_drvdata(dev);
1475 	struct amdgpu_device *adev = drm_to_adev(ddev);
1476 	unsigned int value;
1477 	int r;
1478 
1479 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
1480 	if (r)
1481 		return r;
1482 
1483 	return sysfs_emit(buf, "%d\n", value);
1484 }
1485 
1486 /**
1487  * DOC: vcn_busy_percent
1488  *
1489  * The amdgpu driver provides a sysfs API for reading how busy the VCN
1490  * is as a percentage.  The file vcn_busy_percent is used for this.
1491  * The SMU firmware computes a percentage of load based on the
1492  * aggregate activity level in the IP cores.
1493  */
1494 static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
1495 						  struct device_attribute *attr,
1496 						  char *buf)
1497 {
1498 	struct drm_device *ddev = dev_get_drvdata(dev);
1499 	struct amdgpu_device *adev = drm_to_adev(ddev);
1500 	unsigned int value;
1501 	int r;
1502 
1503 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
1504 	if (r)
1505 		return r;
1506 
1507 	return sysfs_emit(buf, "%d\n", value);
1508 }
1509 
1510 /**
1511  * DOC: pcie_bw
1512  *
1513  * The amdgpu driver provides a sysfs API for estimating how much data
1514  * has been received and sent by the GPU in the last second through PCIe.
1515  * The file pcie_bw is used for this.
1516  * The Perf counters count the number of received and sent messages and return
1517  * those values, as well as the maximum payload size of a PCIe packet (mps).
1518  * Note that it is not possible to easily and quickly obtain the size of each
1519  * packet transmitted, so we output the max payload size (mps) to allow for
1520  * quick estimation of the PCIe bandwidth usage
1521  */
1522 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1523 		struct device_attribute *attr,
1524 		char *buf)
1525 {
1526 	struct drm_device *ddev = dev_get_drvdata(dev);
1527 	struct amdgpu_device *adev = drm_to_adev(ddev);
1528 	uint64_t count0 = 0, count1 = 0;
1529 	int ret;
1530 
1531 	if (adev->flags & AMD_IS_APU)
1532 		return -ENODATA;
1533 
1534 	if (!adev->asic_funcs->get_pcie_usage)
1535 		return -ENODATA;
1536 
1537 	ret = amdgpu_pm_get_access_if_active(adev);
1538 	if (ret)
1539 		return ret;
1540 
1541 	amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1542 
1543 	amdgpu_pm_put_access(adev);
1544 
1545 	return sysfs_emit(buf, "%llu %llu %i\n",
1546 			  count0, count1, pcie_get_mps(adev->pdev));
1547 }
1548 
1549 /**
1550  * DOC: unique_id
1551  *
1552  * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1553  * The file unique_id is used for this.
1554  * This will provide a Unique ID that will persist from machine to machine
1555  *
1556  * NOTE: This will only work for GFX9 and newer. This file will be absent
1557  * on unsupported ASICs (GFX8 and older)
1558  */
1559 static ssize_t amdgpu_get_unique_id(struct device *dev,
1560 		struct device_attribute *attr,
1561 		char *buf)
1562 {
1563 	struct drm_device *ddev = dev_get_drvdata(dev);
1564 	struct amdgpu_device *adev = drm_to_adev(ddev);
1565 
1566 	if (adev->unique_id)
1567 		return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1568 
1569 	return 0;
1570 }
1571 
1572 /**
1573  * DOC: thermal_throttling_logging
1574  *
1575  * Thermal throttling pulls down the clock frequency and thus the performance.
1576  * It's an useful mechanism to protect the chip from overheating. Since it
1577  * impacts performance, the user controls whether it is enabled and if so,
1578  * the log frequency.
1579  *
1580  * Reading back the file shows you the status(enabled or disabled) and
1581  * the interval(in seconds) between each thermal logging.
1582  *
1583  * Writing an integer to the file, sets a new logging interval, in seconds.
1584  * The value should be between 1 and 3600. If the value is less than 1,
1585  * thermal logging is disabled. Values greater than 3600 are ignored.
1586  */
1587 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1588 						     struct device_attribute *attr,
1589 						     char *buf)
1590 {
1591 	struct drm_device *ddev = dev_get_drvdata(dev);
1592 	struct amdgpu_device *adev = drm_to_adev(ddev);
1593 
1594 	return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1595 			  adev_to_drm(adev)->unique,
1596 			  str_enabled_disabled(atomic_read(&adev->throttling_logging_enabled)),
1597 			  adev->throttling_logging_rs.interval / HZ + 1);
1598 }
1599 
1600 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1601 						     struct device_attribute *attr,
1602 						     const char *buf,
1603 						     size_t count)
1604 {
1605 	struct drm_device *ddev = dev_get_drvdata(dev);
1606 	struct amdgpu_device *adev = drm_to_adev(ddev);
1607 	long throttling_logging_interval;
1608 	int ret = 0;
1609 
1610 	ret = kstrtol(buf, 0, &throttling_logging_interval);
1611 	if (ret)
1612 		return ret;
1613 
1614 	if (throttling_logging_interval > 3600)
1615 		return -EINVAL;
1616 
1617 	if (throttling_logging_interval > 0) {
1618 		/*
1619 		 * Reset the ratelimit timer internals.
1620 		 * This can effectively restart the timer.
1621 		 */
1622 		ratelimit_state_reset_interval(&adev->throttling_logging_rs,
1623 					       (throttling_logging_interval - 1) * HZ);
1624 		atomic_set(&adev->throttling_logging_enabled, 1);
1625 	} else {
1626 		atomic_set(&adev->throttling_logging_enabled, 0);
1627 	}
1628 
1629 	return count;
1630 }
1631 
1632 /**
1633  * DOC: apu_thermal_cap
1634  *
1635  * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1636  * limit temperature in millidegrees Celsius
1637  *
1638  * Reading back the file shows you core limit value
1639  *
1640  * Writing an integer to the file, sets a new thermal limit. The value
1641  * should be between 0 and 100. If the value is less than 0 or greater
1642  * than 100, then the write request will be ignored.
1643  */
1644 static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1645 					 struct device_attribute *attr,
1646 					 char *buf)
1647 {
1648 	int ret, size;
1649 	u32 limit;
1650 	struct drm_device *ddev = dev_get_drvdata(dev);
1651 	struct amdgpu_device *adev = drm_to_adev(ddev);
1652 
1653 	ret = amdgpu_pm_get_access_if_active(adev);
1654 	if (ret)
1655 		return ret;
1656 
1657 	ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1658 	if (!ret)
1659 		size = sysfs_emit(buf, "%u\n", limit);
1660 	else
1661 		size = sysfs_emit(buf, "failed to get thermal limit\n");
1662 
1663 	amdgpu_pm_put_access(adev);
1664 
1665 	return size;
1666 }
1667 
1668 static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1669 					 struct device_attribute *attr,
1670 					 const char *buf,
1671 					 size_t count)
1672 {
1673 	int ret;
1674 	u32 value;
1675 	struct drm_device *ddev = dev_get_drvdata(dev);
1676 	struct amdgpu_device *adev = drm_to_adev(ddev);
1677 
1678 	ret = kstrtou32(buf, 10, &value);
1679 	if (ret)
1680 		return ret;
1681 
1682 	if (value > 100) {
1683 		dev_err(dev, "Invalid argument !\n");
1684 		return -EINVAL;
1685 	}
1686 
1687 	ret = amdgpu_pm_get_access(adev);
1688 	if (ret < 0)
1689 		return ret;
1690 
1691 	ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1692 	if (ret) {
1693 		amdgpu_pm_put_access(adev);
1694 		dev_err(dev, "failed to update thermal limit\n");
1695 		return ret;
1696 	}
1697 
1698 	amdgpu_pm_put_access(adev);
1699 
1700 	return count;
1701 }
1702 
1703 static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev,
1704 					 struct amdgpu_device_attr *attr,
1705 					 uint32_t mask,
1706 					 enum amdgpu_device_attr_states *states)
1707 {
1708 	if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP)
1709 		*states = ATTR_STATE_UNSUPPORTED;
1710 
1711 	return 0;
1712 }
1713 
1714 static ssize_t amdgpu_get_pm_metrics(struct device *dev,
1715 				     struct device_attribute *attr, char *buf)
1716 {
1717 	struct drm_device *ddev = dev_get_drvdata(dev);
1718 	struct amdgpu_device *adev = drm_to_adev(ddev);
1719 	ssize_t size = 0;
1720 	int ret;
1721 
1722 	ret = amdgpu_pm_get_access_if_active(adev);
1723 	if (ret)
1724 		return ret;
1725 
1726 	size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
1727 
1728 	amdgpu_pm_put_access(adev);
1729 
1730 	return size;
1731 }
1732 
1733 /**
1734  * DOC: gpu_metrics
1735  *
1736  * The amdgpu driver provides a sysfs API for retrieving current gpu
1737  * metrics data. The file gpu_metrics is used for this. Reading the
1738  * file will dump all the current gpu metrics data.
1739  *
1740  * These data include temperature, frequency, engines utilization,
1741  * power consume, throttler status, fan speed and cpu core statistics(
1742  * available for APU only). That's it will give a snapshot of all sensors
1743  * at the same time.
1744  */
1745 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1746 				      struct device_attribute *attr,
1747 				      char *buf)
1748 {
1749 	struct drm_device *ddev = dev_get_drvdata(dev);
1750 	struct amdgpu_device *adev = drm_to_adev(ddev);
1751 	void *gpu_metrics;
1752 	ssize_t size = 0;
1753 	int ret;
1754 
1755 	ret = amdgpu_pm_get_access_if_active(adev);
1756 	if (ret)
1757 		return ret;
1758 
1759 	size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1760 	if (size <= 0)
1761 		goto out;
1762 
1763 	if (size >= PAGE_SIZE)
1764 		size = PAGE_SIZE - 1;
1765 
1766 	memcpy(buf, gpu_metrics, size);
1767 
1768 out:
1769 	amdgpu_pm_put_access(adev);
1770 
1771 	return size;
1772 }
1773 
1774 static int amdgpu_show_powershift_percent(struct device *dev,
1775 					char *buf, enum amd_pp_sensors sensor)
1776 {
1777 	struct drm_device *ddev = dev_get_drvdata(dev);
1778 	struct amdgpu_device *adev = drm_to_adev(ddev);
1779 	uint32_t ss_power;
1780 	int r = 0, i;
1781 
1782 	r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
1783 	if (r == -EOPNOTSUPP) {
1784 		/* sensor not available on dGPU, try to read from APU */
1785 		adev = NULL;
1786 		mutex_lock(&mgpu_info.mutex);
1787 		for (i = 0; i < mgpu_info.num_gpu; i++) {
1788 			if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1789 				adev = mgpu_info.gpu_ins[i].adev;
1790 				break;
1791 			}
1792 		}
1793 		mutex_unlock(&mgpu_info.mutex);
1794 		if (adev)
1795 			r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
1796 	}
1797 
1798 	if (r)
1799 		return r;
1800 
1801 	return sysfs_emit(buf, "%u%%\n", ss_power);
1802 }
1803 
1804 /**
1805  * DOC: smartshift_apu_power
1806  *
1807  * The amdgpu driver provides a sysfs API for reporting APU power
1808  * shift in percentage if platform supports smartshift. Value 0 means that
1809  * there is no powershift and values between [1-100] means that the power
1810  * is shifted to APU, the percentage of boost is with respect to APU power
1811  * limit on the platform.
1812  */
1813 
1814 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1815 					       char *buf)
1816 {
1817 	return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
1818 }
1819 
1820 /**
1821  * DOC: smartshift_dgpu_power
1822  *
1823  * The amdgpu driver provides a sysfs API for reporting dGPU power
1824  * shift in percentage if platform supports smartshift. Value 0 means that
1825  * there is no powershift and values between [1-100] means that the power is
1826  * shifted to dGPU, the percentage of boost is with respect to dGPU power
1827  * limit on the platform.
1828  */
1829 
1830 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1831 						char *buf)
1832 {
1833 	return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
1834 }
1835 
1836 /**
1837  * DOC: smartshift_bias
1838  *
1839  * The amdgpu driver provides a sysfs API for reporting the
1840  * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1841  * and the default is 0. -100 sets maximum preference to APU
1842  * and 100 sets max perference to dGPU.
1843  */
1844 
1845 static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1846 					  struct device_attribute *attr,
1847 					  char *buf)
1848 {
1849 	int r = 0;
1850 
1851 	r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1852 
1853 	return r;
1854 }
1855 
1856 static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1857 					  struct device_attribute *attr,
1858 					  const char *buf, size_t count)
1859 {
1860 	struct drm_device *ddev = dev_get_drvdata(dev);
1861 	struct amdgpu_device *adev = drm_to_adev(ddev);
1862 	int r = 0;
1863 	int bias = 0;
1864 
1865 	r = kstrtoint(buf, 10, &bias);
1866 	if (r)
1867 		goto out;
1868 
1869 	r = amdgpu_pm_get_access(adev);
1870 	if (r < 0)
1871 		return r;
1872 
1873 	if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1874 		bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1875 	else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1876 		bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1877 
1878 	amdgpu_smartshift_bias = bias;
1879 	r = count;
1880 
1881 	/* TODO: update bias level with SMU message */
1882 
1883 out:
1884 	amdgpu_pm_put_access(adev);
1885 
1886 	return r;
1887 }
1888 
1889 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1890 				uint32_t mask, enum amdgpu_device_attr_states *states)
1891 {
1892 	if (!amdgpu_device_supports_smart_shift(adev))
1893 		*states = ATTR_STATE_UNSUPPORTED;
1894 
1895 	return 0;
1896 }
1897 
1898 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1899 			       uint32_t mask, enum amdgpu_device_attr_states *states)
1900 {
1901 	uint32_t ss_power;
1902 
1903 	if (!amdgpu_device_supports_smart_shift(adev))
1904 		*states = ATTR_STATE_UNSUPPORTED;
1905 	else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1906 					      (void *)&ss_power))
1907 		*states = ATTR_STATE_UNSUPPORTED;
1908 	else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1909 					      (void *)&ss_power))
1910 		*states = ATTR_STATE_UNSUPPORTED;
1911 
1912 	return 0;
1913 }
1914 
1915 static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1916 					 uint32_t mask, enum amdgpu_device_attr_states *states)
1917 {
1918 	*states = ATTR_STATE_SUPPORTED;
1919 
1920 	if (!amdgpu_dpm_is_overdrive_supported(adev)) {
1921 		*states = ATTR_STATE_UNSUPPORTED;
1922 		return 0;
1923 	}
1924 
1925 	/* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0, 12.1.0 SRIOV/BM support */
1926 	if (amdgpu_is_multi_aid(adev)) {
1927 		if (amdgpu_sriov_multi_vf_mode(adev))
1928 			*states = ATTR_STATE_UNSUPPORTED;
1929 		return 0;
1930 	}
1931 
1932 	if (!(attr->flags & mask))
1933 		*states = ATTR_STATE_UNSUPPORTED;
1934 
1935 	return 0;
1936 }
1937 
1938 static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1939 				      uint32_t mask, enum amdgpu_device_attr_states *states)
1940 {
1941 	struct device_attribute *dev_attr = &attr->dev_attr;
1942 	uint32_t gc_ver;
1943 
1944 	*states = ATTR_STATE_SUPPORTED;
1945 
1946 	if (!(attr->flags & mask)) {
1947 		*states = ATTR_STATE_UNSUPPORTED;
1948 		return 0;
1949 	}
1950 
1951 	gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1952 	/* dcefclk node is not available on gfx 11.0.3 sriov */
1953 	if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) ||
1954 	    gc_ver < IP_VERSION(9, 0, 0) ||
1955 	    !amdgpu_device_has_display_hardware(adev))
1956 		*states = ATTR_STATE_UNSUPPORTED;
1957 
1958 	/* SMU MP1 does not support dcefclk level setting,
1959 	 * setting should not be allowed from VF if not in one VF mode.
1960 	 */
1961 	if (gc_ver >= IP_VERSION(10, 0, 0) ||
1962 	    (amdgpu_sriov_multi_vf_mode(adev))) {
1963 		dev_attr->attr.mode &= ~S_IWUGO;
1964 		dev_attr->store = NULL;
1965 	}
1966 
1967 	return 0;
1968 }
1969 
1970 static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1971 					  uint32_t mask, enum amdgpu_device_attr_states *states)
1972 {
1973 	struct device_attribute *dev_attr = &attr->dev_attr;
1974 	enum amdgpu_device_attr_id attr_id = attr->attr_id;
1975 	uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
1976 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1977 
1978 	*states = ATTR_STATE_SUPPORTED;
1979 
1980 	if (!(attr->flags & mask)) {
1981 		*states = ATTR_STATE_UNSUPPORTED;
1982 		return 0;
1983 	}
1984 
1985 	if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1986 		if (gc_ver < IP_VERSION(9, 0, 0))
1987 			*states = ATTR_STATE_UNSUPPORTED;
1988 	} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1989 		if (mp1_ver < IP_VERSION(10, 0, 0))
1990 			*states = ATTR_STATE_UNSUPPORTED;
1991 	} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
1992 		if (!(gc_ver == IP_VERSION(10, 3, 1) ||
1993 		      gc_ver == IP_VERSION(10, 3, 3) ||
1994 		      gc_ver == IP_VERSION(10, 3, 6) ||
1995 		      gc_ver == IP_VERSION(10, 3, 7) ||
1996 		      gc_ver == IP_VERSION(10, 3, 0) ||
1997 		      gc_ver == IP_VERSION(10, 1, 2) ||
1998 		      gc_ver == IP_VERSION(11, 0, 0) ||
1999 		      gc_ver == IP_VERSION(11, 0, 1) ||
2000 		      gc_ver == IP_VERSION(11, 0, 4) ||
2001 		      gc_ver == IP_VERSION(11, 5, 0) ||
2002 		      gc_ver == IP_VERSION(11, 0, 2) ||
2003 		      gc_ver == IP_VERSION(11, 0, 3) ||
2004 		      amdgpu_is_multi_aid(adev)))
2005 			*states = ATTR_STATE_UNSUPPORTED;
2006 	} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2007 		if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2008 		       gc_ver == IP_VERSION(10, 3, 0) ||
2009 		       gc_ver == IP_VERSION(11, 0, 2) ||
2010 		       gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2011 			*states = ATTR_STATE_UNSUPPORTED;
2012 	} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2013 		if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2014 		      gc_ver == IP_VERSION(10, 3, 3) ||
2015 		      gc_ver == IP_VERSION(10, 3, 6) ||
2016 		      gc_ver == IP_VERSION(10, 3, 7) ||
2017 		      gc_ver == IP_VERSION(10, 3, 0) ||
2018 		      gc_ver == IP_VERSION(10, 1, 2) ||
2019 		      gc_ver == IP_VERSION(11, 0, 0) ||
2020 		      gc_ver == IP_VERSION(11, 0, 1) ||
2021 		      gc_ver == IP_VERSION(11, 0, 4) ||
2022 		      gc_ver == IP_VERSION(11, 5, 0) ||
2023 		      gc_ver == IP_VERSION(11, 0, 2) ||
2024 		      gc_ver == IP_VERSION(11, 0, 3) ||
2025 		      amdgpu_is_multi_aid(adev)))
2026 			*states = ATTR_STATE_UNSUPPORTED;
2027 	} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2028 		if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2029 		       gc_ver == IP_VERSION(10, 3, 0) ||
2030 		       gc_ver == IP_VERSION(11, 0, 2) ||
2031 		       gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2032 			*states = ATTR_STATE_UNSUPPORTED;
2033 	} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2034 		if (gc_ver == IP_VERSION(9, 4, 2) ||
2035 		    amdgpu_is_multi_aid(adev))
2036 			*states = ATTR_STATE_UNSUPPORTED;
2037 	}
2038 
2039 	switch (gc_ver) {
2040 	case IP_VERSION(9, 4, 1):
2041 	case IP_VERSION(9, 4, 2):
2042 		/* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2043 		if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2044 		    DEVICE_ATTR_IS(pp_dpm_socclk) ||
2045 		    DEVICE_ATTR_IS(pp_dpm_fclk)) {
2046 			dev_attr->attr.mode &= ~S_IWUGO;
2047 			dev_attr->store = NULL;
2048 		}
2049 		break;
2050 	default:
2051 		break;
2052 	}
2053 
2054 	/* setting should not be allowed from VF if not in one VF mode */
2055 	if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) {
2056 		dev_attr->attr.mode &= ~S_IWUGO;
2057 		dev_attr->store = NULL;
2058 	}
2059 
2060 	return 0;
2061 }
2062 
2063 /**
2064  * DOC: board
2065  *
2066  * Certain SOCs can support various board attributes reporting. This is useful
2067  * for user application to monitor various board reated attributes.
2068  *
2069  * The amdgpu driver provides a sysfs API for reporting board attributes. Presently,
2070  * nine types of attributes are reported. Baseboard temperature and
2071  * gpu board temperature are reported as binary files. Npm status, current node power limit,
2072  * max node power limit, node power, global ppt residency, baseboard_power, baseboard_power_limit
2073  * is reported as ASCII text file.
2074  *
2075  * * .. code-block:: console
2076  *
2077  *      hexdump /sys/bus/pci/devices/.../board/baseboard_temp
2078  *
2079  *      hexdump /sys/bus/pci/devices/.../board/gpuboard_temp
2080  *
2081  *      hexdump /sys/bus/pci/devices/.../board/npm_status
2082  *
2083  *      hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit
2084  *
2085  *      hexdump /sys/bus/pci/devices/.../board/max_node_power_limit
2086  *
2087  *      hexdump /sys/bus/pci/devices/.../board/node_power
2088  *
2089  *      hexdump /sys/bus/pci/devices/.../board/global_ppt_resid
2090  *
2091  *      hexdump /sys/bus/pci/devices/.../board/baseboard_power
2092  *
2093  *      hexdump /sys/bus/pci/devices/.../board/baseboard_power_limit
2094  */
2095 
2096 /**
2097  * DOC: baseboard_temp
2098  *
2099  * The amdgpu driver provides a sysfs API for retrieving current baseboard
2100  * temperature metrics data. The file baseboard_temp is used for this.
2101  * Reading the file will dump all the current baseboard temperature  metrics data.
2102  */
2103 static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev,
2104 						 struct device_attribute *attr, char *buf)
2105 {
2106 	struct drm_device *ddev = dev_get_drvdata(dev);
2107 	struct amdgpu_device *adev = drm_to_adev(ddev);
2108 	ssize_t size;
2109 	int ret;
2110 
2111 	ret = amdgpu_pm_get_access_if_active(adev);
2112 	if (ret)
2113 		return ret;
2114 
2115 	size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL);
2116 	if (size <= 0)
2117 		goto out;
2118 	if (size >= PAGE_SIZE) {
2119 		ret = -ENOSPC;
2120 		goto out;
2121 	}
2122 
2123 	amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf);
2124 
2125 out:
2126 	amdgpu_pm_put_access(adev);
2127 
2128 	if (ret)
2129 		return ret;
2130 
2131 	return size;
2132 }
2133 
2134 /**
2135  * DOC: gpuboard_temp
2136  *
2137  * The amdgpu driver provides a sysfs API for retrieving current gpuboard
2138  * temperature metrics data. The file gpuboard_temp is used for this.
2139  * Reading the file will dump all the current gpuboard temperature  metrics data.
2140  */
2141 static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev,
2142 						struct device_attribute *attr, char *buf)
2143 {
2144 	struct drm_device *ddev = dev_get_drvdata(dev);
2145 	struct amdgpu_device *adev = drm_to_adev(ddev);
2146 	ssize_t size;
2147 	int ret;
2148 
2149 	ret = amdgpu_pm_get_access_if_active(adev);
2150 	if (ret)
2151 		return ret;
2152 
2153 	size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL);
2154 	if (size <= 0)
2155 		goto out;
2156 	if (size >= PAGE_SIZE) {
2157 		ret = -ENOSPC;
2158 		goto out;
2159 	}
2160 
2161 	amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf);
2162 
2163 out:
2164 	amdgpu_pm_put_access(adev);
2165 
2166 	if (ret)
2167 		return ret;
2168 
2169 	return size;
2170 }
2171 
2172 /**
2173  * DOC: cur_node_power_limit
2174  *
2175  * The amdgpu driver provides a sysfs API for retrieving current node power limit.
2176  * The file cur_node_power_limit is used for this.
2177  */
2178 static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev,
2179 						struct device_attribute *attr, char *buf)
2180 {
2181 	struct drm_device *ddev = dev_get_drvdata(dev);
2182 	struct amdgpu_device *adev = drm_to_adev(ddev);
2183 	u32 nplimit;
2184 	int r;
2185 
2186 	/* get the current node power limit */
2187 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
2188 					 (void *)&nplimit);
2189 	if (r)
2190 		return r;
2191 
2192 	return sysfs_emit(buf, "%u\n", nplimit);
2193 }
2194 
2195 /**
2196  * DOC: node_power
2197  *
2198  * The amdgpu driver provides a sysfs API for retrieving current node power.
2199  * The file node_power is used for this.
2200  */
2201 static ssize_t amdgpu_show_node_power(struct device *dev,
2202 				      struct device_attribute *attr, char *buf)
2203 {
2204 	struct drm_device *ddev = dev_get_drvdata(dev);
2205 	struct amdgpu_device *adev = drm_to_adev(ddev);
2206 	u32 npower;
2207 	int r;
2208 
2209 	/* get the node power */
2210 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
2211 					 (void *)&npower);
2212 	if (r)
2213 		return r;
2214 
2215 	return sysfs_emit(buf, "%u\n", npower);
2216 }
2217 
2218 /**
2219  * DOC: npm_status
2220  *
2221  * The amdgpu driver provides a sysfs API for retrieving current node power management status.
2222  * The file npm_status is used for this. It shows the status as enabled or disabled based on
2223  * current node power value. If node power is zero, status is disabled else enabled.
2224  */
2225 static ssize_t amdgpu_show_npm_status(struct device *dev,
2226 				      struct device_attribute *attr, char *buf)
2227 {
2228 	struct drm_device *ddev = dev_get_drvdata(dev);
2229 	struct amdgpu_device *adev = drm_to_adev(ddev);
2230 	u32 npower;
2231 	int r;
2232 
2233 	/* get the node power */
2234 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
2235 					 (void *)&npower);
2236 	if (r)
2237 		return r;
2238 
2239 	return sysfs_emit(buf, "%s\n", str_enabled_disabled(npower));
2240 }
2241 
2242 /**
2243  * DOC: global_ppt_resid
2244  *
2245  * The amdgpu driver provides a sysfs API for retrieving global ppt residency.
2246  * The file global_ppt_resid is used for this.
2247  */
2248 static ssize_t amdgpu_show_global_ppt_resid(struct device *dev,
2249 					    struct device_attribute *attr, char *buf)
2250 {
2251 	struct drm_device *ddev = dev_get_drvdata(dev);
2252 	struct amdgpu_device *adev = drm_to_adev(ddev);
2253 	u32 gpptresid;
2254 	int r;
2255 
2256 	/* get the global ppt residency */
2257 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY,
2258 					 (void *)&gpptresid);
2259 	if (r)
2260 		return r;
2261 
2262 	return sysfs_emit(buf, "%u\n", gpptresid);
2263 }
2264 
2265 /**
2266  * DOC: max_node_power_limit
2267  *
2268  * The amdgpu driver provides a sysfs API for retrieving maximum node power limit.
2269  * The file max_node_power_limit is used for this.
2270  */
2271 static ssize_t amdgpu_show_max_node_power_limit(struct device *dev,
2272 						struct device_attribute *attr, char *buf)
2273 {
2274 	struct drm_device *ddev = dev_get_drvdata(dev);
2275 	struct amdgpu_device *adev = drm_to_adev(ddev);
2276 	u32 max_nplimit;
2277 	int r;
2278 
2279 	/* get the max node power limit */
2280 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
2281 					 (void *)&max_nplimit);
2282 	if (r)
2283 		return r;
2284 
2285 	return sysfs_emit(buf, "%u\n", max_nplimit);
2286 }
2287 
2288 /**
2289  * DOC: baseboard_power
2290  *
2291  * The amdgpu driver provides a sysfs API for retrieving current ubb power in watts.
2292  * The file baseboard_power is used for this.
2293  */
2294 static ssize_t amdgpu_show_baseboard_power(struct device *dev,
2295 					   struct device_attribute *attr, char *buf)
2296 {
2297 	struct drm_device *ddev = dev_get_drvdata(dev);
2298 	struct amdgpu_device *adev = drm_to_adev(ddev);
2299 	u32 ubbpower;
2300 	int r;
2301 
2302 	/* get the ubb power */
2303 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER,
2304 					 (void *)&ubbpower);
2305 	if (r)
2306 		return r;
2307 
2308 	return sysfs_emit(buf, "%u\n", ubbpower);
2309 }
2310 
2311 /**
2312  * DOC: baseboard_power_limit
2313  *
2314  * The amdgpu driver provides a sysfs API for retrieving threshold ubb power in watts.
2315  * The file baseboard_power_limit is used for this.
2316  */
2317 static ssize_t amdgpu_show_baseboard_power_limit(struct device *dev,
2318 						 struct device_attribute *attr, char *buf)
2319 {
2320 	struct drm_device *ddev = dev_get_drvdata(dev);
2321 	struct amdgpu_device *adev = drm_to_adev(ddev);
2322 	u32 ubbpowerlimit;
2323 	int r;
2324 
2325 	/* get the ubb power limit */
2326 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER_LIMIT,
2327 					 (void *)&ubbpowerlimit);
2328 	if (r)
2329 		return r;
2330 
2331 	return sysfs_emit(buf, "%u\n", ubbpowerlimit);
2332 }
2333 
2334 static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL);
2335 static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL);
2336 static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL);
2337 static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL);
2338 static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL);
2339 static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL);
2340 static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL);
2341 static DEVICE_ATTR(baseboard_power, 0444, amdgpu_show_baseboard_power, NULL);
2342 static DEVICE_ATTR(baseboard_power_limit, 0444, amdgpu_show_baseboard_power_limit, NULL);
2343 
2344 static struct attribute *board_attrs[] = {
2345 	&dev_attr_baseboard_temp.attr,
2346 	&dev_attr_gpuboard_temp.attr,
2347 	NULL
2348 };
2349 
2350 static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n)
2351 {
2352 	struct device *dev = kobj_to_dev(kobj);
2353 	struct drm_device *ddev = dev_get_drvdata(dev);
2354 	struct amdgpu_device *adev = drm_to_adev(ddev);
2355 
2356 	if (attr == &dev_attr_baseboard_temp.attr) {
2357 		if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD))
2358 			return 0;
2359 	}
2360 
2361 	if (attr == &dev_attr_gpuboard_temp.attr) {
2362 		if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD))
2363 			return 0;
2364 	}
2365 
2366 	return attr->mode;
2367 }
2368 
2369 const struct attribute_group amdgpu_board_attr_group = {
2370 	.name = "board",
2371 	.attrs = board_attrs,
2372 	.is_visible = amdgpu_board_attr_visible,
2373 };
2374 
2375 /* pm policy attributes */
2376 struct amdgpu_pm_policy_attr {
2377 	struct device_attribute dev_attr;
2378 	enum pp_pm_policy id;
2379 };
2380 
2381 /**
2382  * DOC: pm_policy
2383  *
2384  * Certain SOCs can support different power policies to optimize application
2385  * performance. However, this policy is provided only at SOC level and not at a
2386  * per-process level. This is useful especially when entire SOC is utilized for
2387  * dedicated workload.
2388  *
2389  * The amdgpu driver provides a sysfs API for selecting the policy. Presently,
2390  * only two types of policies are supported through this interface.
2391  *
2392  *  Pstate Policy Selection - This is to select different Pstate profiles which
2393  *  decides clock/throttling preferences.
2394  *
2395  *  XGMI PLPD Policy Selection - When multiple devices are connected over XGMI,
2396  *  this helps to select policy to be applied for per link power down.
2397  *
2398  * The list of available policies and policy levels vary between SOCs. They can
2399  * be viewed under pm_policy node directory. If SOC doesn't support any policy,
2400  * this node won't be available. The different policies supported will be
2401  * available as separate nodes under pm_policy.
2402  *
2403  *	cat /sys/bus/pci/devices/.../pm_policy/<policy_type>
2404  *
2405  * Reading the policy file shows the different levels supported. The level which
2406  * is applied presently is denoted by * (asterisk). E.g.,
2407  *
2408  * .. code-block:: console
2409  *
2410  *	cat /sys/bus/pci/devices/.../pm_policy/soc_pstate
2411  *	0 : soc_pstate_default
2412  *	1 : soc_pstate_0
2413  *	2 : soc_pstate_1*
2414  *	3 : soc_pstate_2
2415  *
2416  *	cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2417  *	0 : plpd_disallow
2418  *	1 : plpd_default
2419  *	2 : plpd_optimized*
2420  *
2421  * To apply a specific policy
2422  *
2423  * "echo  <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>"
2424  *
2425  * For the levels listed in the example above, to select "plpd_optimized" for
2426  * XGMI and "soc_pstate_2" for soc pstate policy -
2427  *
2428  * .. code-block:: console
2429  *
2430  *	echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2431  *	echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate
2432  *
2433  */
2434 static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
2435 					 struct device_attribute *attr,
2436 					 char *buf)
2437 {
2438 	struct drm_device *ddev = dev_get_drvdata(dev);
2439 	struct amdgpu_device *adev = drm_to_adev(ddev);
2440 	struct amdgpu_pm_policy_attr *policy_attr;
2441 
2442 	policy_attr =
2443 		container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2444 
2445 	return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
2446 }
2447 
2448 static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
2449 					 struct device_attribute *attr,
2450 					 const char *buf, size_t count)
2451 {
2452 	struct drm_device *ddev = dev_get_drvdata(dev);
2453 	struct amdgpu_device *adev = drm_to_adev(ddev);
2454 	struct amdgpu_pm_policy_attr *policy_attr;
2455 	int ret, num_params = 0;
2456 	char delimiter[] = " \n\t";
2457 	char tmp_buf[128];
2458 	char *tmp, *param;
2459 	long val;
2460 
2461 	count = min(count, sizeof(tmp_buf));
2462 	memcpy(tmp_buf, buf, count);
2463 	tmp_buf[count - 1] = '\0';
2464 	tmp = tmp_buf;
2465 
2466 	tmp = skip_spaces(tmp);
2467 	while ((param = strsep(&tmp, delimiter))) {
2468 		if (!strlen(param)) {
2469 			tmp = skip_spaces(tmp);
2470 			continue;
2471 		}
2472 		ret = kstrtol(param, 0, &val);
2473 		if (ret)
2474 			return -EINVAL;
2475 		num_params++;
2476 		if (num_params > 1)
2477 			return -EINVAL;
2478 	}
2479 
2480 	if (num_params != 1)
2481 		return -EINVAL;
2482 
2483 	policy_attr =
2484 		container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2485 
2486 	ret = amdgpu_pm_get_access(adev);
2487 	if (ret < 0)
2488 		return ret;
2489 
2490 	ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
2491 
2492 	amdgpu_pm_put_access(adev);
2493 
2494 	if (ret)
2495 		return ret;
2496 
2497 	return count;
2498 }
2499 
2500 #define AMDGPU_PM_POLICY_ATTR(_name, _id)                                  \
2501 	static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = {     \
2502 		.dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \
2503 				   amdgpu_set_pm_policy_attr),             \
2504 		.id = PP_PM_POLICY_##_id,                                  \
2505 	};
2506 
2507 #define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr
2508 
2509 AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE)
2510 AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD)
2511 
2512 static struct attribute *pm_policy_attrs[] = {
2513 	&AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate),
2514 	&AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd),
2515 	NULL
2516 };
2517 
2518 static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj,
2519 					     struct attribute *attr, int n)
2520 {
2521 	struct device *dev = kobj_to_dev(kobj);
2522 	struct drm_device *ddev = dev_get_drvdata(dev);
2523 	struct amdgpu_device *adev = drm_to_adev(ddev);
2524 	struct amdgpu_pm_policy_attr *policy_attr;
2525 
2526 	policy_attr =
2527 		container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr);
2528 
2529 	if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) ==
2530 	    -ENOENT)
2531 		return 0;
2532 
2533 	return attr->mode;
2534 }
2535 
2536 const struct attribute_group amdgpu_pm_policy_attr_group = {
2537 	.name = "pm_policy",
2538 	.attrs = pm_policy_attrs,
2539 	.is_visible = amdgpu_pm_policy_attr_visible,
2540 };
2541 
2542 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2543 	AMDGPU_DEVICE_ATTR_RW(power_dpm_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2544 	AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,	ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2545 	AMDGPU_DEVICE_ATTR_RO(pp_num_states,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2546 	AMDGPU_DEVICE_ATTR_RO(pp_cur_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2547 	AMDGPU_DEVICE_ATTR_RW(pp_force_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2548 	AMDGPU_DEVICE_ATTR_RW(pp_table,					ATTR_FLAG_BASIC),
2549 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2550 			      .attr_update = pp_dpm_clk_default_attr_update),
2551 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2552 			      .attr_update = pp_dpm_clk_default_attr_update),
2553 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2554 			      .attr_update = pp_dpm_clk_default_attr_update),
2555 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2556 			      .attr_update = pp_dpm_clk_default_attr_update),
2557 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2558 			      .attr_update = pp_dpm_clk_default_attr_update),
2559 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2560 			      .attr_update = pp_dpm_clk_default_attr_update),
2561 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2562 			      .attr_update = pp_dpm_clk_default_attr_update),
2563 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2564 			      .attr_update = pp_dpm_clk_default_attr_update),
2565 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2566 			      .attr_update = pp_dpm_dcefclk_attr_update),
2567 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2568 			      .attr_update = pp_dpm_clk_default_attr_update),
2569 	AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,				ATTR_FLAG_BASIC),
2570 	AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,				ATTR_FLAG_BASIC),
2571 	AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,			ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2572 	AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage,			ATTR_FLAG_BASIC,
2573 			      .attr_update = pp_od_clk_voltage_attr_update),
2574 	AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2575 	AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2576 	AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2577 	AMDGPU_DEVICE_ATTR_RO(pcie_bw,					ATTR_FLAG_BASIC),
2578 	AMDGPU_DEVICE_ATTR_RW(pp_features,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2579 	AMDGPU_DEVICE_ATTR_RO(unique_id,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2580 	AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging,		ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2581 	AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2582 	AMDGPU_DEVICE_ATTR_RO(gpu_metrics,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2583 	AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power,			ATTR_FLAG_BASIC,
2584 			      .attr_update = ss_power_attr_update),
2585 	AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power,			ATTR_FLAG_BASIC,
2586 			      .attr_update = ss_power_attr_update),
2587 	AMDGPU_DEVICE_ATTR_RW(smartshift_bias,				ATTR_FLAG_BASIC,
2588 			      .attr_update = ss_bias_attr_update),
2589 	AMDGPU_DEVICE_ATTR_RO(pm_metrics,				ATTR_FLAG_BASIC,
2590 			      .attr_update = amdgpu_pm_metrics_attr_update),
2591 };
2592 
2593 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2594 			       uint32_t mask, enum amdgpu_device_attr_states *states)
2595 {
2596 	struct device_attribute *dev_attr = &attr->dev_attr;
2597 	enum amdgpu_device_attr_id attr_id = attr->attr_id;
2598 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2599 
2600 	if (!(attr->flags & mask)) {
2601 		*states = ATTR_STATE_UNSUPPORTED;
2602 		return 0;
2603 	}
2604 
2605 	if (DEVICE_ATTR_IS(mem_busy_percent)) {
2606 		if ((adev->flags & AMD_IS_APU &&
2607 		     gc_ver != IP_VERSION(9, 4, 3)) ||
2608 		    gc_ver == IP_VERSION(9, 0, 1))
2609 			*states = ATTR_STATE_UNSUPPORTED;
2610 	} else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
2611 		if (!(gc_ver == IP_VERSION(9, 3, 0) ||
2612 		      gc_ver == IP_VERSION(10, 3, 1) ||
2613 		      gc_ver == IP_VERSION(10, 3, 3) ||
2614 		      gc_ver == IP_VERSION(10, 3, 6) ||
2615 		      gc_ver == IP_VERSION(10, 3, 7) ||
2616 		      gc_ver == IP_VERSION(11, 0, 0) ||
2617 		      gc_ver == IP_VERSION(11, 0, 1) ||
2618 		      gc_ver == IP_VERSION(11, 0, 2) ||
2619 		      gc_ver == IP_VERSION(11, 0, 3) ||
2620 		      gc_ver == IP_VERSION(11, 0, 4) ||
2621 		      gc_ver == IP_VERSION(11, 5, 0) ||
2622 		      gc_ver == IP_VERSION(11, 5, 1) ||
2623 		      gc_ver == IP_VERSION(11, 5, 2) ||
2624 		      gc_ver == IP_VERSION(11, 5, 3) ||
2625 		      gc_ver == IP_VERSION(12, 0, 0) ||
2626 		      gc_ver == IP_VERSION(12, 0, 1)))
2627 			*states = ATTR_STATE_UNSUPPORTED;
2628 	} else if (DEVICE_ATTR_IS(pcie_bw)) {
2629 		/* PCIe Perf counters won't work on APU nodes */
2630 		if (adev->flags & AMD_IS_APU ||
2631 		    !adev->asic_funcs->get_pcie_usage)
2632 			*states = ATTR_STATE_UNSUPPORTED;
2633 	} else if (DEVICE_ATTR_IS(unique_id)) {
2634 		switch (gc_ver) {
2635 		case IP_VERSION(9, 0, 1):
2636 		case IP_VERSION(9, 4, 0):
2637 		case IP_VERSION(9, 4, 1):
2638 		case IP_VERSION(9, 4, 2):
2639 		case IP_VERSION(9, 4, 3):
2640 		case IP_VERSION(9, 4, 4):
2641 		case IP_VERSION(9, 5, 0):
2642 		case IP_VERSION(10, 3, 0):
2643 		case IP_VERSION(11, 0, 0):
2644 		case IP_VERSION(11, 0, 1):
2645 		case IP_VERSION(11, 0, 2):
2646 		case IP_VERSION(11, 0, 3):
2647 		case IP_VERSION(12, 0, 0):
2648 		case IP_VERSION(12, 0, 1):
2649 		case IP_VERSION(12, 1, 0):
2650 			*states = ATTR_STATE_SUPPORTED;
2651 			break;
2652 		default:
2653 			*states = ATTR_STATE_UNSUPPORTED;
2654 		}
2655 	} else if (DEVICE_ATTR_IS(pp_features)) {
2656 		if ((adev->flags & AMD_IS_APU &&
2657 		     gc_ver != IP_VERSION(9, 4, 3)) ||
2658 		    gc_ver < IP_VERSION(9, 0, 0))
2659 			*states = ATTR_STATE_UNSUPPORTED;
2660 	} else if (DEVICE_ATTR_IS(gpu_metrics)) {
2661 		if (gc_ver < IP_VERSION(9, 1, 0))
2662 			*states = ATTR_STATE_UNSUPPORTED;
2663 	} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2664 		if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2665 			*states = ATTR_STATE_UNSUPPORTED;
2666 		else if ((gc_ver == IP_VERSION(10, 3, 0) ||
2667 			  gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
2668 			*states = ATTR_STATE_UNSUPPORTED;
2669 	} else if (DEVICE_ATTR_IS(pp_mclk_od)) {
2670 		if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
2671 			*states = ATTR_STATE_UNSUPPORTED;
2672 	} else if (DEVICE_ATTR_IS(pp_sclk_od)) {
2673 		if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
2674 			*states = ATTR_STATE_UNSUPPORTED;
2675 	} else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
2676 		u32 limit;
2677 
2678 		if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
2679 		    -EOPNOTSUPP)
2680 			*states = ATTR_STATE_UNSUPPORTED;
2681 	} else if (DEVICE_ATTR_IS(pp_table)) {
2682 		int ret;
2683 		char *tmp = NULL;
2684 
2685 		ret = amdgpu_dpm_get_pp_table(adev, &tmp);
2686 		if (ret == -EOPNOTSUPP || !tmp)
2687 			*states = ATTR_STATE_UNSUPPORTED;
2688 		else
2689 			*states = ATTR_STATE_SUPPORTED;
2690 	}
2691 
2692 	switch (gc_ver) {
2693 	case IP_VERSION(10, 3, 0):
2694 		if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2695 		    amdgpu_sriov_vf(adev)) {
2696 			dev_attr->attr.mode &= ~0222;
2697 			dev_attr->store = NULL;
2698 		}
2699 		break;
2700 	default:
2701 		break;
2702 	}
2703 
2704 	return 0;
2705 }
2706 
2707 
2708 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2709 				     struct amdgpu_device_attr *attr,
2710 				     uint32_t mask, struct list_head *attr_list)
2711 {
2712 	int ret = 0;
2713 	enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2714 	struct amdgpu_device_attr_entry *attr_entry;
2715 	struct device_attribute *dev_attr;
2716 	const char *name;
2717 
2718 	int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2719 			   uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2720 
2721 	if (!attr)
2722 		return -EINVAL;
2723 
2724 	dev_attr = &attr->dev_attr;
2725 	name = dev_attr->attr.name;
2726 
2727 	attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2728 
2729 	ret = attr_update(adev, attr, mask, &attr_states);
2730 	if (ret) {
2731 		dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2732 			name, ret);
2733 		return ret;
2734 	}
2735 
2736 	if (attr_states == ATTR_STATE_UNSUPPORTED)
2737 		return 0;
2738 
2739 	ret = device_create_file(adev->dev, dev_attr);
2740 	if (ret) {
2741 		dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2742 			name, ret);
2743 	}
2744 
2745 	attr_entry = kmalloc_obj(*attr_entry);
2746 	if (!attr_entry)
2747 		return -ENOMEM;
2748 
2749 	attr_entry->attr = attr;
2750 	INIT_LIST_HEAD(&attr_entry->entry);
2751 
2752 	list_add_tail(&attr_entry->entry, attr_list);
2753 
2754 	return ret;
2755 }
2756 
2757 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2758 {
2759 	struct device_attribute *dev_attr = &attr->dev_attr;
2760 
2761 	device_remove_file(adev->dev, dev_attr);
2762 }
2763 
2764 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2765 					     struct list_head *attr_list);
2766 
2767 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2768 					    struct amdgpu_device_attr *attrs,
2769 					    uint32_t counts,
2770 					    uint32_t mask,
2771 					    struct list_head *attr_list)
2772 {
2773 	int ret = 0;
2774 	uint32_t i = 0;
2775 
2776 	for (i = 0; i < counts; i++) {
2777 		ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2778 		if (ret)
2779 			goto failed;
2780 	}
2781 
2782 	return 0;
2783 
2784 failed:
2785 	amdgpu_device_attr_remove_groups(adev, attr_list);
2786 
2787 	return ret;
2788 }
2789 
2790 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2791 					     struct list_head *attr_list)
2792 {
2793 	struct amdgpu_device_attr_entry *entry, *entry_tmp;
2794 
2795 	if (list_empty(attr_list))
2796 		return ;
2797 
2798 	list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2799 		amdgpu_device_attr_remove(adev, entry->attr);
2800 		list_del(&entry->entry);
2801 		kfree(entry);
2802 	}
2803 }
2804 
2805 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2806 				      struct device_attribute *attr,
2807 				      char *buf)
2808 {
2809 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2810 	int channel = to_sensor_dev_attr(attr)->index;
2811 	int r, temp = 0;
2812 
2813 	if (channel >= PP_TEMP_MAX)
2814 		return -EINVAL;
2815 
2816 	switch (channel) {
2817 	case PP_TEMP_JUNCTION:
2818 		/* get current junction temperature */
2819 		r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2820 						 (void *)&temp);
2821 		break;
2822 	case PP_TEMP_EDGE:
2823 		/* get current edge temperature */
2824 		r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2825 						 (void *)&temp);
2826 		break;
2827 	case PP_TEMP_MEM:
2828 		/* get current memory temperature */
2829 		r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2830 						 (void *)&temp);
2831 		break;
2832 	default:
2833 		r = -EINVAL;
2834 		break;
2835 	}
2836 
2837 	if (r)
2838 		return r;
2839 
2840 	return sysfs_emit(buf, "%d\n", temp);
2841 }
2842 
2843 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2844 					     struct device_attribute *attr,
2845 					     char *buf)
2846 {
2847 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2848 	int hyst = to_sensor_dev_attr(attr)->index;
2849 	int temp;
2850 
2851 	if (hyst)
2852 		temp = adev->pm.dpm.thermal.min_temp;
2853 	else
2854 		temp = adev->pm.dpm.thermal.max_temp;
2855 
2856 	return sysfs_emit(buf, "%d\n", temp);
2857 }
2858 
2859 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2860 					     struct device_attribute *attr,
2861 					     char *buf)
2862 {
2863 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2864 	int hyst = to_sensor_dev_attr(attr)->index;
2865 	int temp;
2866 
2867 	if (hyst)
2868 		temp = adev->pm.dpm.thermal.min_hotspot_temp;
2869 	else
2870 		temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2871 
2872 	return sysfs_emit(buf, "%d\n", temp);
2873 }
2874 
2875 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2876 					     struct device_attribute *attr,
2877 					     char *buf)
2878 {
2879 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2880 	int hyst = to_sensor_dev_attr(attr)->index;
2881 	int temp;
2882 
2883 	if (hyst)
2884 		temp = adev->pm.dpm.thermal.min_mem_temp;
2885 	else
2886 		temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2887 
2888 	return sysfs_emit(buf, "%d\n", temp);
2889 }
2890 
2891 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2892 					     struct device_attribute *attr,
2893 					     char *buf)
2894 {
2895 	int channel = to_sensor_dev_attr(attr)->index;
2896 
2897 	if (channel >= PP_TEMP_MAX)
2898 		return -EINVAL;
2899 
2900 	return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2901 }
2902 
2903 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2904 					     struct device_attribute *attr,
2905 					     char *buf)
2906 {
2907 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2908 	int channel = to_sensor_dev_attr(attr)->index;
2909 	int temp = 0;
2910 
2911 	if (channel >= PP_TEMP_MAX)
2912 		return -EINVAL;
2913 
2914 	switch (channel) {
2915 	case PP_TEMP_JUNCTION:
2916 		temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2917 		break;
2918 	case PP_TEMP_EDGE:
2919 		temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2920 		break;
2921 	case PP_TEMP_MEM:
2922 		temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2923 		break;
2924 	}
2925 
2926 	return sysfs_emit(buf, "%d\n", temp);
2927 }
2928 
2929 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2930 					    struct device_attribute *attr,
2931 					    char *buf)
2932 {
2933 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2934 	u32 pwm_mode = 0;
2935 	int ret;
2936 
2937 	ret = amdgpu_pm_get_access_if_active(adev);
2938 	if (ret)
2939 		return ret;
2940 
2941 	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2942 
2943 	amdgpu_pm_put_access(adev);
2944 
2945 	if (ret)
2946 		return -EINVAL;
2947 
2948 	return sysfs_emit(buf, "%u\n", pwm_mode);
2949 }
2950 
2951 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2952 					    struct device_attribute *attr,
2953 					    const char *buf,
2954 					    size_t count)
2955 {
2956 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2957 	int err, ret;
2958 	u32 pwm_mode;
2959 	int value;
2960 
2961 	err = kstrtoint(buf, 10, &value);
2962 	if (err)
2963 		return err;
2964 
2965 	if (value == 0)
2966 		pwm_mode = AMD_FAN_CTRL_NONE;
2967 	else if (value == 1)
2968 		pwm_mode = AMD_FAN_CTRL_MANUAL;
2969 	else if (value == 2)
2970 		pwm_mode = AMD_FAN_CTRL_AUTO;
2971 	else
2972 		return -EINVAL;
2973 
2974 	ret = amdgpu_pm_get_access(adev);
2975 	if (ret < 0)
2976 		return ret;
2977 
2978 	ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2979 
2980 	amdgpu_pm_put_access(adev);
2981 
2982 	if (ret)
2983 		return -EINVAL;
2984 
2985 	return count;
2986 }
2987 
2988 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2989 					 struct device_attribute *attr,
2990 					 char *buf)
2991 {
2992 	return sysfs_emit(buf, "%i\n", 0);
2993 }
2994 
2995 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2996 					 struct device_attribute *attr,
2997 					 char *buf)
2998 {
2999 	return sysfs_emit(buf, "%i\n", 255);
3000 }
3001 
3002 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
3003 				     struct device_attribute *attr,
3004 				     const char *buf, size_t count)
3005 {
3006 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3007 	int err;
3008 	u32 value;
3009 	u32 pwm_mode;
3010 
3011 	err = kstrtou32(buf, 10, &value);
3012 	if (err)
3013 		return err;
3014 
3015 	err = amdgpu_pm_get_access(adev);
3016 	if (err < 0)
3017 		return err;
3018 
3019 	err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3020 	if (err)
3021 		goto out;
3022 
3023 	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
3024 		pr_info("manual fan speed control should be enabled first\n");
3025 		err = -EINVAL;
3026 		goto out;
3027 	}
3028 
3029 	err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
3030 
3031 out:
3032 	amdgpu_pm_put_access(adev);
3033 
3034 	if (err)
3035 		return err;
3036 
3037 	return count;
3038 }
3039 
3040 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
3041 				     struct device_attribute *attr,
3042 				     char *buf)
3043 {
3044 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3045 	int err;
3046 	u32 speed = 0;
3047 
3048 	err = amdgpu_pm_get_access_if_active(adev);
3049 	if (err)
3050 		return err;
3051 
3052 	err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
3053 
3054 	amdgpu_pm_put_access(adev);
3055 
3056 	if (err)
3057 		return err;
3058 
3059 	return sysfs_emit(buf, "%i\n", speed);
3060 }
3061 
3062 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
3063 					   struct device_attribute *attr,
3064 					   char *buf)
3065 {
3066 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3067 	int err;
3068 	u32 speed = 0;
3069 
3070 	err = amdgpu_pm_get_access_if_active(adev);
3071 	if (err)
3072 		return err;
3073 
3074 	err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
3075 
3076 	amdgpu_pm_put_access(adev);
3077 
3078 	if (err)
3079 		return err;
3080 
3081 	return sysfs_emit(buf, "%i\n", speed);
3082 }
3083 
3084 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
3085 					 struct device_attribute *attr,
3086 					 char *buf)
3087 {
3088 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3089 	u32 min_rpm = 0;
3090 	int r;
3091 
3092 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
3093 					 (void *)&min_rpm);
3094 
3095 	if (r)
3096 		return r;
3097 
3098 	return sysfs_emit(buf, "%d\n", min_rpm);
3099 }
3100 
3101 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
3102 					 struct device_attribute *attr,
3103 					 char *buf)
3104 {
3105 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3106 	u32 max_rpm = 0;
3107 	int r;
3108 
3109 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
3110 					 (void *)&max_rpm);
3111 
3112 	if (r)
3113 		return r;
3114 
3115 	return sysfs_emit(buf, "%d\n", max_rpm);
3116 }
3117 
3118 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
3119 					   struct device_attribute *attr,
3120 					   char *buf)
3121 {
3122 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3123 	int err;
3124 	u32 rpm = 0;
3125 
3126 	err = amdgpu_pm_get_access_if_active(adev);
3127 	if (err)
3128 		return err;
3129 
3130 	err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
3131 
3132 	amdgpu_pm_put_access(adev);
3133 
3134 	if (err)
3135 		return err;
3136 
3137 	return sysfs_emit(buf, "%i\n", rpm);
3138 }
3139 
3140 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
3141 				     struct device_attribute *attr,
3142 				     const char *buf, size_t count)
3143 {
3144 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3145 	int err;
3146 	u32 value;
3147 	u32 pwm_mode;
3148 
3149 	err = kstrtou32(buf, 10, &value);
3150 	if (err)
3151 		return err;
3152 
3153 	err = amdgpu_pm_get_access(adev);
3154 	if (err < 0)
3155 		return err;
3156 
3157 	err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3158 	if (err)
3159 		goto out;
3160 
3161 	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
3162 		err = -ENODATA;
3163 		goto out;
3164 	}
3165 
3166 	err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
3167 
3168 out:
3169 	amdgpu_pm_put_access(adev);
3170 
3171 	if (err)
3172 		return err;
3173 
3174 	return count;
3175 }
3176 
3177 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
3178 					    struct device_attribute *attr,
3179 					    char *buf)
3180 {
3181 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3182 	u32 pwm_mode = 0;
3183 	int ret;
3184 
3185 	ret = amdgpu_pm_get_access_if_active(adev);
3186 	if (ret)
3187 		return ret;
3188 
3189 	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3190 
3191 	amdgpu_pm_put_access(adev);
3192 
3193 	if (ret)
3194 		return -EINVAL;
3195 
3196 	return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
3197 }
3198 
3199 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
3200 					    struct device_attribute *attr,
3201 					    const char *buf,
3202 					    size_t count)
3203 {
3204 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3205 	int err;
3206 	int value;
3207 	u32 pwm_mode;
3208 
3209 	err = kstrtoint(buf, 10, &value);
3210 	if (err)
3211 		return err;
3212 
3213 	if (value == 0)
3214 		pwm_mode = AMD_FAN_CTRL_AUTO;
3215 	else if (value == 1)
3216 		pwm_mode = AMD_FAN_CTRL_MANUAL;
3217 	else
3218 		return -EINVAL;
3219 
3220 	err = amdgpu_pm_get_access(adev);
3221 	if (err < 0)
3222 		return err;
3223 
3224 	err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
3225 
3226 	amdgpu_pm_put_access(adev);
3227 
3228 	if (err)
3229 		return -EINVAL;
3230 
3231 	return count;
3232 }
3233 
3234 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
3235 					struct device_attribute *attr,
3236 					char *buf)
3237 {
3238 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3239 	u32 vddgfx;
3240 	int r;
3241 
3242 	/* get the voltage */
3243 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
3244 					 (void *)&vddgfx);
3245 	if (r)
3246 		return r;
3247 
3248 	return sysfs_emit(buf, "%d\n", vddgfx);
3249 }
3250 
3251 static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
3252 					  struct device_attribute *attr,
3253 					  char *buf)
3254 {
3255 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3256 	u32 vddboard;
3257 	int r;
3258 
3259 	/* get the voltage */
3260 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
3261 					 (void *)&vddboard);
3262 	if (r)
3263 		return r;
3264 
3265 	return sysfs_emit(buf, "%d\n", vddboard);
3266 }
3267 
3268 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
3269 					      struct device_attribute *attr,
3270 					      char *buf)
3271 {
3272 	return sysfs_emit(buf, "vddgfx\n");
3273 }
3274 
3275 static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
3276 						struct device_attribute *attr,
3277 						char *buf)
3278 {
3279 	return sysfs_emit(buf, "vddboard\n");
3280 }
3281 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
3282 				       struct device_attribute *attr,
3283 				       char *buf)
3284 {
3285 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3286 	u32 vddnb;
3287 	int r;
3288 
3289 	/* only APUs have vddnb */
3290 	if  (!(adev->flags & AMD_IS_APU))
3291 		return -EINVAL;
3292 
3293 	/* get the voltage */
3294 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
3295 					 (void *)&vddnb);
3296 	if (r)
3297 		return r;
3298 
3299 	return sysfs_emit(buf, "%d\n", vddnb);
3300 }
3301 
3302 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
3303 					      struct device_attribute *attr,
3304 					      char *buf)
3305 {
3306 	return sysfs_emit(buf, "vddnb\n");
3307 }
3308 
3309 static int amdgpu_hwmon_get_power(struct device *dev,
3310 				  enum amd_pp_sensors sensor)
3311 {
3312 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3313 	unsigned int uw;
3314 	u32 query = 0;
3315 	int r;
3316 
3317 	r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query);
3318 	if (r)
3319 		return r;
3320 
3321 	/* convert to microwatts */
3322 	uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
3323 
3324 	return uw;
3325 }
3326 
3327 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
3328 					   struct device_attribute *attr,
3329 					   char *buf)
3330 {
3331 	ssize_t val;
3332 
3333 	val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
3334 	if (val < 0)
3335 		return val;
3336 
3337 	return sysfs_emit(buf, "%zd\n", val);
3338 }
3339 
3340 static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
3341 					     struct device_attribute *attr,
3342 					     char *buf)
3343 {
3344 	ssize_t val;
3345 
3346 	val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
3347 	if (val < 0)
3348 		return val;
3349 
3350 	return sysfs_emit(buf, "%zd\n", val);
3351 }
3352 
3353 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
3354 					struct device_attribute *attr,
3355 					char *buf,
3356 					enum pp_power_limit_level pp_limit_level)
3357 {
3358 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3359 	enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
3360 	uint32_t limit;
3361 	ssize_t size;
3362 	int r;
3363 
3364 	r = amdgpu_pm_get_access_if_active(adev);
3365 	if (r)
3366 		return r;
3367 
3368 	r = amdgpu_dpm_get_power_limit(adev, &limit,
3369 				      pp_limit_level, power_type);
3370 
3371 	if (!r)
3372 		size = sysfs_emit(buf, "%u\n", limit * 1000000);
3373 	else
3374 		size = sysfs_emit(buf, "\n");
3375 
3376 	amdgpu_pm_put_access(adev);
3377 
3378 	return size;
3379 }
3380 
3381 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
3382 					 struct device_attribute *attr,
3383 					 char *buf)
3384 {
3385 	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
3386 }
3387 
3388 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
3389 					 struct device_attribute *attr,
3390 					 char *buf)
3391 {
3392 	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
3393 
3394 }
3395 
3396 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
3397 					 struct device_attribute *attr,
3398 					 char *buf)
3399 {
3400 	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
3401 
3402 }
3403 
3404 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
3405 					 struct device_attribute *attr,
3406 					 char *buf)
3407 {
3408 	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
3409 
3410 }
3411 
3412 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3413 					 struct device_attribute *attr,
3414 					 char *buf)
3415 {
3416 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3417 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3418 
3419 	if (gc_ver == IP_VERSION(10, 3, 1))
3420 		return sysfs_emit(buf, "%s\n",
3421 				  to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3422 				  "fastPPT" : "slowPPT");
3423 	else
3424 		return sysfs_emit(buf, "%s\n",
3425 				  to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3426 				  "PPT1" : "PPT");
3427 }
3428 
3429 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3430 		struct device_attribute *attr,
3431 		const char *buf,
3432 		size_t count)
3433 {
3434 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3435 	int limit_type = to_sensor_dev_attr(attr)->index;
3436 	int err;
3437 	u32 value;
3438 
3439 	err = kstrtou32(buf, 10, &value);
3440 	if (err)
3441 		return err;
3442 
3443 	value = value / 1000000; /* convert to Watt */
3444 
3445 	err = amdgpu_pm_get_access(adev);
3446 	if (err < 0)
3447 		return err;
3448 
3449 	err = amdgpu_dpm_set_power_limit(adev, limit_type, value);
3450 
3451 	amdgpu_pm_put_access(adev);
3452 
3453 	if (err)
3454 		return err;
3455 
3456 	return count;
3457 }
3458 
3459 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3460 				      struct device_attribute *attr,
3461 				      char *buf)
3462 {
3463 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3464 	uint32_t sclk;
3465 	int r;
3466 
3467 	/* get the sclk */
3468 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3469 					 (void *)&sclk);
3470 	if (r)
3471 		return r;
3472 
3473 	return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3474 }
3475 
3476 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3477 					    struct device_attribute *attr,
3478 					    char *buf)
3479 {
3480 	return sysfs_emit(buf, "sclk\n");
3481 }
3482 
3483 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3484 				      struct device_attribute *attr,
3485 				      char *buf)
3486 {
3487 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3488 	uint32_t mclk;
3489 	int r;
3490 
3491 	/* get the sclk */
3492 	r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3493 					 (void *)&mclk);
3494 	if (r)
3495 		return r;
3496 
3497 	return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3498 }
3499 
3500 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3501 					    struct device_attribute *attr,
3502 					    char *buf)
3503 {
3504 	return sysfs_emit(buf, "mclk\n");
3505 }
3506 
3507 /**
3508  * DOC: hwmon
3509  *
3510  * The amdgpu driver exposes the following sensor interfaces:
3511  *
3512  * - GPU temperature (via the on-die sensor)
3513  *
3514  * - GPU voltage
3515  *
3516  * - Northbridge voltage (APUs only)
3517  *
3518  * - GPU power
3519  *
3520  * - GPU fan
3521  *
3522  * - GPU gfx/compute engine clock
3523  *
3524  * - GPU memory clock (dGPU only)
3525  *
3526  * hwmon interfaces for GPU temperature:
3527  *
3528  * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3529  *   - temp2_input and temp3_input are supported on SOC15 dGPUs only
3530  *
3531  * - temp[1-3]_label: temperature channel label
3532  *   - temp2_label and temp3_label are supported on SOC15 dGPUs only
3533  *
3534  * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3535  *   - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3536  *
3537  * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3538  *   - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3539  *
3540  * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3541  *   - these are supported on SOC15 dGPUs only
3542  *
3543  * hwmon interfaces for GPU voltage:
3544  *
3545  * - in0_input: the voltage on the GPU in millivolts
3546  *
3547  * - in1_input: the voltage on the Northbridge in millivolts
3548  *
3549  * hwmon interfaces for GPU power:
3550  *
3551  * - power1_average: average power used by the SoC in microWatts.  On APUs this includes the CPU.
3552  *
3553  * - power1_input: instantaneous power used by the SoC in microWatts.  On APUs this includes the CPU.
3554  *
3555  * - power1_cap_min: minimum cap supported in microWatts
3556  *
3557  * - power1_cap_max: maximum cap supported in microWatts
3558  *
3559  * - power1_cap: selected power cap in microWatts
3560  *
3561  * hwmon interfaces for GPU fan:
3562  *
3563  * - pwm1: pulse width modulation fan level (0-255)
3564  *
3565  * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3566  *
3567  * - pwm1_min: pulse width modulation fan control minimum level (0)
3568  *
3569  * - pwm1_max: pulse width modulation fan control maximum level (255)
3570  *
3571  * - fan1_min: a minimum value Unit: revolution/min (RPM)
3572  *
3573  * - fan1_max: a maximum value Unit: revolution/max (RPM)
3574  *
3575  * - fan1_input: fan speed in RPM
3576  *
3577  * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3578  *
3579  * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3580  *
3581  * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3582  *       That will get the former one overridden.
3583  *
3584  * hwmon interfaces for GPU clocks:
3585  *
3586  * - freq1_input: the gfx/compute clock in hertz
3587  *
3588  * - freq2_input: the memory clock in hertz
3589  *
3590  * You can use hwmon tools like sensors to view this information on your system.
3591  *
3592  */
3593 
3594 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3595 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3596 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3597 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3598 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3599 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3600 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3601 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3602 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3603 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3604 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3605 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3606 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3607 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3608 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3609 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3610 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3611 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3612 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3613 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3614 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3615 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3616 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3617 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3618 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3619 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3620 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3621 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3622 static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
3623 static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
3624 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3625 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
3626 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3627 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3628 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3629 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3630 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3631 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3632 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3633 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3634 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3635 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3636 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3637 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3638 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3639 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3640 
3641 static struct attribute *hwmon_attributes[] = {
3642 	&sensor_dev_attr_temp1_input.dev_attr.attr,
3643 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
3644 	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3645 	&sensor_dev_attr_temp2_input.dev_attr.attr,
3646 	&sensor_dev_attr_temp2_crit.dev_attr.attr,
3647 	&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3648 	&sensor_dev_attr_temp3_input.dev_attr.attr,
3649 	&sensor_dev_attr_temp3_crit.dev_attr.attr,
3650 	&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3651 	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
3652 	&sensor_dev_attr_temp2_emergency.dev_attr.attr,
3653 	&sensor_dev_attr_temp3_emergency.dev_attr.attr,
3654 	&sensor_dev_attr_temp1_label.dev_attr.attr,
3655 	&sensor_dev_attr_temp2_label.dev_attr.attr,
3656 	&sensor_dev_attr_temp3_label.dev_attr.attr,
3657 	&sensor_dev_attr_pwm1.dev_attr.attr,
3658 	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
3659 	&sensor_dev_attr_pwm1_min.dev_attr.attr,
3660 	&sensor_dev_attr_pwm1_max.dev_attr.attr,
3661 	&sensor_dev_attr_fan1_input.dev_attr.attr,
3662 	&sensor_dev_attr_fan1_min.dev_attr.attr,
3663 	&sensor_dev_attr_fan1_max.dev_attr.attr,
3664 	&sensor_dev_attr_fan1_target.dev_attr.attr,
3665 	&sensor_dev_attr_fan1_enable.dev_attr.attr,
3666 	&sensor_dev_attr_in0_input.dev_attr.attr,
3667 	&sensor_dev_attr_in0_label.dev_attr.attr,
3668 	&sensor_dev_attr_in1_input.dev_attr.attr,
3669 	&sensor_dev_attr_in1_label.dev_attr.attr,
3670 	&sensor_dev_attr_in2_input.dev_attr.attr,
3671 	&sensor_dev_attr_in2_label.dev_attr.attr,
3672 	&sensor_dev_attr_power1_average.dev_attr.attr,
3673 	&sensor_dev_attr_power1_input.dev_attr.attr,
3674 	&sensor_dev_attr_power1_cap_max.dev_attr.attr,
3675 	&sensor_dev_attr_power1_cap_min.dev_attr.attr,
3676 	&sensor_dev_attr_power1_cap.dev_attr.attr,
3677 	&sensor_dev_attr_power1_cap_default.dev_attr.attr,
3678 	&sensor_dev_attr_power1_label.dev_attr.attr,
3679 	&sensor_dev_attr_power2_cap_max.dev_attr.attr,
3680 	&sensor_dev_attr_power2_cap_min.dev_attr.attr,
3681 	&sensor_dev_attr_power2_cap.dev_attr.attr,
3682 	&sensor_dev_attr_power2_cap_default.dev_attr.attr,
3683 	&sensor_dev_attr_power2_label.dev_attr.attr,
3684 	&sensor_dev_attr_freq1_input.dev_attr.attr,
3685 	&sensor_dev_attr_freq1_label.dev_attr.attr,
3686 	&sensor_dev_attr_freq2_input.dev_attr.attr,
3687 	&sensor_dev_attr_freq2_label.dev_attr.attr,
3688 	NULL
3689 };
3690 
3691 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3692 					struct attribute *attr, int index)
3693 {
3694 	struct device *dev = kobj_to_dev(kobj);
3695 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3696 	umode_t effective_mode = attr->mode;
3697 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3698 	uint32_t tmp;
3699 
3700 	/* under pp one vf mode manage of hwmon attributes is not supported */
3701 	if (amdgpu_sriov_is_pp_one_vf(adev))
3702 		effective_mode &= ~S_IWUSR;
3703 
3704 	/* Skip fan attributes if fan is not present */
3705 	if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3706 	    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3707 	    attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3708 	    attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3709 	    attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3710 	    attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3711 	    attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3712 	    attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3713 	    attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3714 		return 0;
3715 
3716 	/* Skip fan attributes on APU */
3717 	if ((adev->flags & AMD_IS_APU) &&
3718 	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3719 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3720 	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3721 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3722 	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3723 	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3724 	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3725 	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3726 	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3727 		return 0;
3728 
3729 	/* Skip crit temp on APU */
3730 	if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3731 	     amdgpu_is_multi_aid(adev)) &&
3732 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3733 	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3734 		return 0;
3735 
3736 	/* Skip limit attributes if DPM is not enabled */
3737 	if (!adev->pm.dpm_enabled &&
3738 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3739 	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3740 	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3741 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3742 	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3743 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3744 	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3745 	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3746 	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3747 	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3748 	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3749 		return 0;
3750 
3751 	/* mask fan attributes if we have no bindings for this asic to expose */
3752 	if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3753 	      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3754 	    ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3755 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3756 		effective_mode &= ~S_IRUGO;
3757 
3758 	if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3759 	      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3760 	      ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3761 	      attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3762 		effective_mode &= ~S_IWUSR;
3763 
3764 	/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3765 	if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3766 	    attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3767 	    attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3768 	    attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
3769 		if (adev->family == AMDGPU_FAMILY_SI ||
3770 		    ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
3771 		     (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
3772 		    (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
3773 			return 0;
3774 	}
3775 
3776 	if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr &&
3777 	    amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT))
3778 		effective_mode |= S_IWUSR;
3779 
3780 	/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3781 	if (((adev->family == AMDGPU_FAMILY_SI) ||
3782 	     ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3783 	    (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3784 		return 0;
3785 
3786 	/* not all products support both average and instantaneous */
3787 	if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3788 	    amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER,
3789 					 (void *)&tmp) == -EOPNOTSUPP)
3790 		return 0;
3791 	if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3792 	    amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
3793 					 (void *)&tmp) == -EOPNOTSUPP)
3794 		return 0;
3795 
3796 	/* hide max/min values if we can't both query and manage the fan */
3797 	if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3798 	      (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3799 	      (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3800 	      (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3801 	    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3802 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3803 		return 0;
3804 
3805 	if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3806 	     (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3807 	     (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3808 	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3809 		return 0;
3810 
3811 	if ((adev->family == AMDGPU_FAMILY_SI ||	/* not implemented yet */
3812 	     adev->family == AMDGPU_FAMILY_KV ||	/* not implemented yet */
3813 	     amdgpu_is_multi_aid(adev)) &&
3814 	    (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3815 	     attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3816 		return 0;
3817 
3818 	/* only APUs other than gc 9,4,3 have vddnb */
3819 	if ((!(adev->flags & AMD_IS_APU) ||
3820 	     amdgpu_is_multi_aid(adev)) &&
3821 	    (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3822 	     attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3823 		return 0;
3824 
3825 	/* only few boards support vddboard */
3826 	if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
3827 	     attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
3828 	     amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
3829 					  (void *)&tmp) == -EOPNOTSUPP)
3830 		return 0;
3831 
3832 	/* no mclk on APUs other than gc 9,4,3*/
3833 	if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3834 	    (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3835 	     attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3836 		return 0;
3837 
3838 	if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3839 	    (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) &&
3840 	    (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3841 	     attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3842 	     attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3843 	     attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3844 	     attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3845 	     attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
3846 		return 0;
3847 
3848 	/* hotspot temperature for gc 9,4,3*/
3849 	if (amdgpu_is_multi_aid(adev)) {
3850 		if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3851 		    attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3852 		    attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
3853 			return 0;
3854 
3855 		if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3856 		    attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)
3857 			return attr->mode;
3858 	}
3859 
3860 	/* only SOC15 dGPUs support hotspot and mem temperatures */
3861 	if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3862 	    (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3863 	     attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3864 	     attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3865 	     attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3866 	     attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3867 		return 0;
3868 
3869 	/* only a few GPUs have fast PPT limit and power labels */
3870 	if ((attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3871 	     attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3872 	     attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3873 	     attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3874 	     attr == &sensor_dev_attr_power2_label.dev_attr.attr) &&
3875 	     (amdgpu_dpm_get_power_limit(adev, &tmp,
3876 					 PP_PWR_LIMIT_MAX,
3877 					 PP_PWR_TYPE_FAST) == -EOPNOTSUPP))
3878 		return 0;
3879 
3880 	return effective_mode;
3881 }
3882 
3883 static const struct attribute_group hwmon_attrgroup = {
3884 	.attrs = hwmon_attributes,
3885 	.is_visible = hwmon_attributes_visible,
3886 };
3887 
3888 static const struct attribute_group *hwmon_groups[] = {
3889 	&hwmon_attrgroup,
3890 	NULL
3891 };
3892 
3893 static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
3894 				       enum pp_clock_type od_type,
3895 				       char *buf)
3896 {
3897 	int size = 0;
3898 	int ret;
3899 
3900 	ret = amdgpu_pm_get_access_if_active(adev);
3901 	if (ret)
3902 		return ret;
3903 
3904 	ret = amdgpu_dpm_emit_clock_levels(adev, od_type, buf, &size);
3905 	if (ret)
3906 		return ret;
3907 	if (size == 0)
3908 		size = sysfs_emit(buf, "\n");
3909 
3910 	amdgpu_pm_put_access(adev);
3911 
3912 	return size;
3913 }
3914 
3915 static int parse_input_od_command_lines(const char *buf,
3916 					size_t count,
3917 					u32 *type,
3918 					long *params,
3919 					uint32_t *num_of_params)
3920 {
3921 	const char delimiter[3] = {' ', '\n', '\0'};
3922 	uint32_t parameter_size = 0;
3923 	char buf_cpy[128] = {0};
3924 	char *tmp_str, *sub_str;
3925 	int ret;
3926 
3927 	if (count > sizeof(buf_cpy) - 1)
3928 		return -EINVAL;
3929 
3930 	memcpy(buf_cpy, buf, count);
3931 	tmp_str = buf_cpy;
3932 
3933 	/* skip heading spaces */
3934 	while (isspace(*tmp_str))
3935 		tmp_str++;
3936 
3937 	switch (*tmp_str) {
3938 	case 'c':
3939 		*type = PP_OD_COMMIT_DPM_TABLE;
3940 		return 0;
3941 	case 'r':
3942 		params[parameter_size] = *type;
3943 		*num_of_params = 1;
3944 		*type = PP_OD_RESTORE_DEFAULT_TABLE;
3945 		return 0;
3946 	default:
3947 		break;
3948 	}
3949 
3950 	while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
3951 		if (strlen(sub_str) == 0)
3952 			continue;
3953 
3954 		ret = kstrtol(sub_str, 0, &params[parameter_size]);
3955 		if (ret)
3956 			return -EINVAL;
3957 		parameter_size++;
3958 
3959 		if (!tmp_str)
3960 			break;
3961 
3962 		while (isspace(*tmp_str))
3963 			tmp_str++;
3964 	}
3965 
3966 	*num_of_params = parameter_size;
3967 
3968 	return 0;
3969 }
3970 
3971 static int
3972 amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
3973 				     enum PP_OD_DPM_TABLE_COMMAND cmd_type,
3974 				     const char *in_buf,
3975 				     size_t count)
3976 {
3977 	uint32_t parameter_size = 0;
3978 	long parameter[64];
3979 	int ret;
3980 
3981 	ret = parse_input_od_command_lines(in_buf,
3982 					   count,
3983 					   &cmd_type,
3984 					   parameter,
3985 					   &parameter_size);
3986 	if (ret)
3987 		return ret;
3988 
3989 	ret = amdgpu_pm_get_access(adev);
3990 	if (ret < 0)
3991 		return ret;
3992 
3993 	ret = amdgpu_dpm_odn_edit_dpm_table(adev,
3994 					    cmd_type,
3995 					    parameter,
3996 					    parameter_size);
3997 	if (ret)
3998 		goto err_out;
3999 
4000 	if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
4001 		ret = amdgpu_dpm_dispatch_task(adev,
4002 					       AMD_PP_TASK_READJUST_POWER_STATE,
4003 					       NULL);
4004 		if (ret)
4005 			goto err_out;
4006 	}
4007 
4008 	amdgpu_pm_put_access(adev);
4009 
4010 	return count;
4011 
4012 err_out:
4013 	amdgpu_pm_put_access(adev);
4014 
4015 	return ret;
4016 }
4017 
4018 /**
4019  * DOC: fan_curve
4020  *
4021  * The amdgpu driver provides a sysfs API for checking and adjusting the fan
4022  * control curve line.
4023  *
4024  * Reading back the file shows you the current settings(temperature in Celsius
4025  * degree and fan speed in pwm) applied to every anchor point of the curve line
4026  * and their permitted ranges if changable.
4027  *
4028  * Writing a desired string(with the format like "anchor_point_index temperature
4029  * fan_speed_in_pwm") to the file, change the settings for the specific anchor
4030  * point accordingly.
4031  *
4032  * When you have finished the editing, write "c" (commit) to the file to commit
4033  * your changes.
4034  *
4035  * If you want to reset to the default value, write "r" (reset) to the file to
4036  * reset them
4037  *
4038  * There are two fan control modes supported: auto and manual. With auto mode,
4039  * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
4040  * While with manual mode, users can set their own fan curve line as what
4041  * described here. Normally the ASIC is booted up with auto mode. Any
4042  * settings via this interface will switch the fan control to manual mode
4043  * implicitly.
4044  */
4045 static ssize_t fan_curve_show(struct kobject *kobj,
4046 			      struct kobj_attribute *attr,
4047 			      char *buf)
4048 {
4049 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4050 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4051 
4052 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
4053 }
4054 
4055 static ssize_t fan_curve_store(struct kobject *kobj,
4056 			       struct kobj_attribute *attr,
4057 			       const char *buf,
4058 			       size_t count)
4059 {
4060 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4061 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4062 
4063 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4064 							     PP_OD_EDIT_FAN_CURVE,
4065 							     buf,
4066 							     count);
4067 }
4068 
4069 static umode_t fan_curve_visible(struct amdgpu_device *adev)
4070 {
4071 	umode_t umode = 0000;
4072 
4073 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
4074 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
4075 
4076 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
4077 		umode |= S_IWUSR;
4078 
4079 	return umode;
4080 }
4081 
4082 /**
4083  * DOC: acoustic_limit_rpm_threshold
4084  *
4085  * The amdgpu driver provides a sysfs API for checking and adjusting the
4086  * acoustic limit in RPM for fan control.
4087  *
4088  * Reading back the file shows you the current setting and the permitted
4089  * ranges if changable.
4090  *
4091  * Writing an integer to the file, change the setting accordingly.
4092  *
4093  * When you have finished the editing, write "c" (commit) to the file to commit
4094  * your changes.
4095  *
4096  * If you want to reset to the default value, write "r" (reset) to the file to
4097  * reset them
4098  *
4099  * This setting works under auto fan control mode only. It adjusts the PMFW's
4100  * behavior about the maximum speed in RPM the fan can spin. Setting via this
4101  * interface will switch the fan control to auto mode implicitly.
4102  */
4103 static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
4104 					     struct kobj_attribute *attr,
4105 					     char *buf)
4106 {
4107 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4108 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4109 
4110 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
4111 }
4112 
4113 static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
4114 					      struct kobj_attribute *attr,
4115 					      const char *buf,
4116 					      size_t count)
4117 {
4118 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4119 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4120 
4121 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4122 							     PP_OD_EDIT_ACOUSTIC_LIMIT,
4123 							     buf,
4124 							     count);
4125 }
4126 
4127 static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
4128 {
4129 	umode_t umode = 0000;
4130 
4131 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
4132 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
4133 
4134 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
4135 		umode |= S_IWUSR;
4136 
4137 	return umode;
4138 }
4139 
4140 /**
4141  * DOC: acoustic_target_rpm_threshold
4142  *
4143  * The amdgpu driver provides a sysfs API for checking and adjusting the
4144  * acoustic target in RPM for fan control.
4145  *
4146  * Reading back the file shows you the current setting and the permitted
4147  * ranges if changable.
4148  *
4149  * Writing an integer to the file, change the setting accordingly.
4150  *
4151  * When you have finished the editing, write "c" (commit) to the file to commit
4152  * your changes.
4153  *
4154  * If you want to reset to the default value, write "r" (reset) to the file to
4155  * reset them
4156  *
4157  * This setting works under auto fan control mode only. It can co-exist with
4158  * other settings which can work also under auto mode. It adjusts the PMFW's
4159  * behavior about the maximum speed in RPM the fan can spin when ASIC
4160  * temperature is not greater than target temperature. Setting via this
4161  * interface will switch the fan control to auto mode implicitly.
4162  */
4163 static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
4164 					      struct kobj_attribute *attr,
4165 					      char *buf)
4166 {
4167 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4168 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4169 
4170 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
4171 }
4172 
4173 static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
4174 					       struct kobj_attribute *attr,
4175 					       const char *buf,
4176 					       size_t count)
4177 {
4178 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4179 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4180 
4181 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4182 							     PP_OD_EDIT_ACOUSTIC_TARGET,
4183 							     buf,
4184 							     count);
4185 }
4186 
4187 static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
4188 {
4189 	umode_t umode = 0000;
4190 
4191 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
4192 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
4193 
4194 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
4195 		umode |= S_IWUSR;
4196 
4197 	return umode;
4198 }
4199 
4200 /**
4201  * DOC: fan_target_temperature
4202  *
4203  * The amdgpu driver provides a sysfs API for checking and adjusting the
4204  * target tempeature in Celsius degree for fan control.
4205  *
4206  * Reading back the file shows you the current setting and the permitted
4207  * ranges if changable.
4208  *
4209  * Writing an integer to the file, change the setting accordingly.
4210  *
4211  * When you have finished the editing, write "c" (commit) to the file to commit
4212  * your changes.
4213  *
4214  * If you want to reset to the default value, write "r" (reset) to the file to
4215  * reset them
4216  *
4217  * This setting works under auto fan control mode only. It can co-exist with
4218  * other settings which can work also under auto mode. Paring with the
4219  * acoustic_target_rpm_threshold setting, they define the maximum speed in
4220  * RPM the fan can spin when ASIC temperature is not greater than target
4221  * temperature. Setting via this interface will switch the fan control to
4222  * auto mode implicitly.
4223  */
4224 static ssize_t fan_target_temperature_show(struct kobject *kobj,
4225 					   struct kobj_attribute *attr,
4226 					   char *buf)
4227 {
4228 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4229 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4230 
4231 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
4232 }
4233 
4234 static ssize_t fan_target_temperature_store(struct kobject *kobj,
4235 					    struct kobj_attribute *attr,
4236 					    const char *buf,
4237 					    size_t count)
4238 {
4239 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4240 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4241 
4242 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4243 							     PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
4244 							     buf,
4245 							     count);
4246 }
4247 
4248 static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
4249 {
4250 	umode_t umode = 0000;
4251 
4252 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
4253 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
4254 
4255 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
4256 		umode |= S_IWUSR;
4257 
4258 	return umode;
4259 }
4260 
4261 /**
4262  * DOC: fan_minimum_pwm
4263  *
4264  * The amdgpu driver provides a sysfs API for checking and adjusting the
4265  * minimum fan speed in PWM.
4266  *
4267  * Reading back the file shows you the current setting and the permitted
4268  * ranges if changable.
4269  *
4270  * Writing an integer to the file, change the setting accordingly.
4271  *
4272  * When you have finished the editing, write "c" (commit) to the file to commit
4273  * your changes.
4274  *
4275  * If you want to reset to the default value, write "r" (reset) to the file to
4276  * reset them
4277  *
4278  * This setting works under auto fan control mode only. It can co-exist with
4279  * other settings which can work also under auto mode. It adjusts the PMFW's
4280  * behavior about the minimum fan speed in PWM the fan should spin. Setting
4281  * via this interface will switch the fan control to auto mode implicitly.
4282  */
4283 static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
4284 				    struct kobj_attribute *attr,
4285 				    char *buf)
4286 {
4287 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4288 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4289 
4290 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
4291 }
4292 
4293 static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
4294 				     struct kobj_attribute *attr,
4295 				     const char *buf,
4296 				     size_t count)
4297 {
4298 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4299 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4300 
4301 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4302 							     PP_OD_EDIT_FAN_MINIMUM_PWM,
4303 							     buf,
4304 							     count);
4305 }
4306 
4307 static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
4308 {
4309 	umode_t umode = 0000;
4310 
4311 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
4312 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
4313 
4314 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
4315 		umode |= S_IWUSR;
4316 
4317 	return umode;
4318 }
4319 
4320 /**
4321  * DOC: fan_zero_rpm_enable
4322  *
4323  * The amdgpu driver provides a sysfs API for checking and adjusting the
4324  * zero RPM feature.
4325  *
4326  * Reading back the file shows you the current setting and the permitted
4327  * ranges if changable.
4328  *
4329  * Writing an integer to the file, change the setting accordingly.
4330  *
4331  * When you have finished the editing, write "c" (commit) to the file to commit
4332  * your changes.
4333  *
4334  * If you want to reset to the default value, write "r" (reset) to the file to
4335  * reset them.
4336  */
4337 static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj,
4338 					   struct kobj_attribute *attr,
4339 					   char *buf)
4340 {
4341 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4342 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4343 
4344 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf);
4345 }
4346 
4347 static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj,
4348 					    struct kobj_attribute *attr,
4349 					    const char *buf,
4350 					    size_t count)
4351 {
4352 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4353 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4354 
4355 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4356 							     PP_OD_EDIT_FAN_ZERO_RPM_ENABLE,
4357 							     buf,
4358 							     count);
4359 }
4360 
4361 static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev)
4362 {
4363 	umode_t umode = 0000;
4364 
4365 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE)
4366 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
4367 
4368 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET)
4369 		umode |= S_IWUSR;
4370 
4371 	return umode;
4372 }
4373 
4374 /**
4375  * DOC: fan_zero_rpm_stop_temperature
4376  *
4377  * The amdgpu driver provides a sysfs API for checking and adjusting the
4378  * zero RPM stop temperature feature.
4379  *
4380  * Reading back the file shows you the current setting and the permitted
4381  * ranges if changable.
4382  *
4383  * Writing an integer to the file, change the setting accordingly.
4384  *
4385  * When you have finished the editing, write "c" (commit) to the file to commit
4386  * your changes.
4387  *
4388  * If you want to reset to the default value, write "r" (reset) to the file to
4389  * reset them.
4390  *
4391  * This setting works only if the Zero RPM setting is enabled. It adjusts the
4392  * temperature below which the fan can stop.
4393  */
4394 static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj,
4395 					   struct kobj_attribute *attr,
4396 					   char *buf)
4397 {
4398 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4399 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4400 
4401 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf);
4402 }
4403 
4404 static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj,
4405 					    struct kobj_attribute *attr,
4406 					    const char *buf,
4407 					    size_t count)
4408 {
4409 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4410 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4411 
4412 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4413 							     PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP,
4414 							     buf,
4415 							     count);
4416 }
4417 
4418 static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev)
4419 {
4420 	umode_t umode = 0000;
4421 
4422 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE)
4423 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
4424 
4425 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET)
4426 		umode |= S_IWUSR;
4427 
4428 	return umode;
4429 }
4430 
4431 static struct od_feature_set amdgpu_od_set = {
4432 	.containers = {
4433 		[0] = {
4434 			.name = "fan_ctrl",
4435 			.sub_feature = {
4436 				[0] = {
4437 					.name = "fan_curve",
4438 					.ops = {
4439 						.is_visible = fan_curve_visible,
4440 						.show = fan_curve_show,
4441 						.store = fan_curve_store,
4442 					},
4443 				},
4444 				[1] = {
4445 					.name = "acoustic_limit_rpm_threshold",
4446 					.ops = {
4447 						.is_visible = acoustic_limit_threshold_visible,
4448 						.show = acoustic_limit_threshold_show,
4449 						.store = acoustic_limit_threshold_store,
4450 					},
4451 				},
4452 				[2] = {
4453 					.name = "acoustic_target_rpm_threshold",
4454 					.ops = {
4455 						.is_visible = acoustic_target_threshold_visible,
4456 						.show = acoustic_target_threshold_show,
4457 						.store = acoustic_target_threshold_store,
4458 					},
4459 				},
4460 				[3] = {
4461 					.name = "fan_target_temperature",
4462 					.ops = {
4463 						.is_visible = fan_target_temperature_visible,
4464 						.show = fan_target_temperature_show,
4465 						.store = fan_target_temperature_store,
4466 					},
4467 				},
4468 				[4] = {
4469 					.name = "fan_minimum_pwm",
4470 					.ops = {
4471 						.is_visible = fan_minimum_pwm_visible,
4472 						.show = fan_minimum_pwm_show,
4473 						.store = fan_minimum_pwm_store,
4474 					},
4475 				},
4476 				[5] = {
4477 					.name = "fan_zero_rpm_enable",
4478 					.ops = {
4479 						.is_visible = fan_zero_rpm_enable_visible,
4480 						.show = fan_zero_rpm_enable_show,
4481 						.store = fan_zero_rpm_enable_store,
4482 					},
4483 				},
4484 				[6] = {
4485 					.name = "fan_zero_rpm_stop_temperature",
4486 					.ops = {
4487 						.is_visible = fan_zero_rpm_stop_temp_visible,
4488 						.show = fan_zero_rpm_stop_temp_show,
4489 						.store = fan_zero_rpm_stop_temp_store,
4490 					},
4491 				},
4492 			},
4493 		},
4494 	},
4495 };
4496 
4497 static void od_kobj_release(struct kobject *kobj)
4498 {
4499 	struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
4500 
4501 	kfree(od_kobj);
4502 }
4503 
4504 static const struct kobj_type od_ktype = {
4505 	.release	= od_kobj_release,
4506 	.sysfs_ops	= &kobj_sysfs_ops,
4507 };
4508 
4509 static void amdgpu_od_set_fini(struct amdgpu_device *adev)
4510 {
4511 	struct od_kobj *container, *container_next;
4512 	struct od_attribute *attribute, *attribute_next;
4513 
4514 	if (list_empty(&adev->pm.od_kobj_list))
4515 		return;
4516 
4517 	list_for_each_entry_safe(container, container_next,
4518 				 &adev->pm.od_kobj_list, entry) {
4519 		list_del(&container->entry);
4520 
4521 		list_for_each_entry_safe(attribute, attribute_next,
4522 					 &container->attribute, entry) {
4523 			list_del(&attribute->entry);
4524 			sysfs_remove_file(&container->kobj,
4525 					  &attribute->attribute.attr);
4526 			kfree(attribute);
4527 		}
4528 
4529 		kobject_put(&container->kobj);
4530 	}
4531 }
4532 
4533 static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
4534 					   struct od_feature_ops *feature_ops)
4535 {
4536 	umode_t mode;
4537 
4538 	if (!feature_ops->is_visible)
4539 		return false;
4540 
4541 	/*
4542 	 * If the feature has no user read and write mode set,
4543 	 * we can assume the feature is actually not supported.(?)
4544 	 * And the revelant sysfs interface should not be exposed.
4545 	 */
4546 	mode = feature_ops->is_visible(adev);
4547 	if (mode & (S_IRUSR | S_IWUSR))
4548 		return true;
4549 
4550 	return false;
4551 }
4552 
4553 static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
4554 					struct od_feature_container *container)
4555 {
4556 	int i;
4557 
4558 	/*
4559 	 * If there is no valid entry within the container, the container
4560 	 * is recognized as a self contained container. And the valid entry
4561 	 * here means it has a valid naming and it is visible/supported by
4562 	 * the ASIC.
4563 	 */
4564 	for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
4565 		if (container->sub_feature[i].name &&
4566 		    amdgpu_is_od_feature_supported(adev,
4567 			&container->sub_feature[i].ops))
4568 			return false;
4569 	}
4570 
4571 	return true;
4572 }
4573 
4574 static int amdgpu_od_set_init(struct amdgpu_device *adev)
4575 {
4576 	struct od_kobj *top_set, *sub_set;
4577 	struct od_attribute *attribute;
4578 	struct od_feature_container *container;
4579 	struct od_feature_item *feature;
4580 	int i, j;
4581 	int ret;
4582 
4583 	/* Setup the top `gpu_od` directory which holds all other OD interfaces */
4584 	top_set = kzalloc_obj(*top_set);
4585 	if (!top_set)
4586 		return -ENOMEM;
4587 	list_add(&top_set->entry, &adev->pm.od_kobj_list);
4588 
4589 	ret = kobject_init_and_add(&top_set->kobj,
4590 				   &od_ktype,
4591 				   &adev->dev->kobj,
4592 				   "%s",
4593 				   "gpu_od");
4594 	if (ret)
4595 		goto err_out;
4596 	INIT_LIST_HEAD(&top_set->attribute);
4597 	top_set->priv = adev;
4598 
4599 	for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
4600 		container = &amdgpu_od_set.containers[i];
4601 
4602 		if (!container->name)
4603 			continue;
4604 
4605 		/*
4606 		 * If there is valid entries within the container, the container
4607 		 * will be presented as a sub directory and all its holding entries
4608 		 * will be presented as plain files under it.
4609 		 * While if there is no valid entry within the container, the container
4610 		 * itself will be presented as a plain file under top `gpu_od` directory.
4611 		 */
4612 		if (amdgpu_od_is_self_contained(adev, container)) {
4613 			if (!amdgpu_is_od_feature_supported(adev,
4614 			     &container->ops))
4615 				continue;
4616 
4617 			/*
4618 			 * The container is presented as a plain file under top `gpu_od`
4619 			 * directory.
4620 			 */
4621 			attribute = kzalloc_obj(*attribute);
4622 			if (!attribute) {
4623 				ret = -ENOMEM;
4624 				goto err_out;
4625 			}
4626 			list_add(&attribute->entry, &top_set->attribute);
4627 
4628 			attribute->attribute.attr.mode =
4629 					container->ops.is_visible(adev);
4630 			attribute->attribute.attr.name = container->name;
4631 			attribute->attribute.show =
4632 					container->ops.show;
4633 			attribute->attribute.store =
4634 					container->ops.store;
4635 			ret = sysfs_create_file(&top_set->kobj,
4636 						&attribute->attribute.attr);
4637 			if (ret)
4638 				goto err_out;
4639 		} else {
4640 			/* The container is presented as a sub directory. */
4641 			sub_set = kzalloc_obj(*sub_set);
4642 			if (!sub_set) {
4643 				ret = -ENOMEM;
4644 				goto err_out;
4645 			}
4646 			list_add(&sub_set->entry, &adev->pm.od_kobj_list);
4647 
4648 			ret = kobject_init_and_add(&sub_set->kobj,
4649 						   &od_ktype,
4650 						   &top_set->kobj,
4651 						   "%s",
4652 						   container->name);
4653 			if (ret)
4654 				goto err_out;
4655 			INIT_LIST_HEAD(&sub_set->attribute);
4656 			sub_set->priv = adev;
4657 
4658 			for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
4659 				feature = &container->sub_feature[j];
4660 				if (!feature->name)
4661 					continue;
4662 
4663 				if (!amdgpu_is_od_feature_supported(adev,
4664 				     &feature->ops))
4665 					continue;
4666 
4667 				/*
4668 				 * With the container presented as a sub directory, the entry within
4669 				 * it is presented as a plain file under the sub directory.
4670 				 */
4671 				attribute = kzalloc_obj(*attribute);
4672 				if (!attribute) {
4673 					ret = -ENOMEM;
4674 					goto err_out;
4675 				}
4676 				list_add(&attribute->entry, &sub_set->attribute);
4677 
4678 				attribute->attribute.attr.mode =
4679 						feature->ops.is_visible(adev);
4680 				attribute->attribute.attr.name = feature->name;
4681 				attribute->attribute.show =
4682 						feature->ops.show;
4683 				attribute->attribute.store =
4684 						feature->ops.store;
4685 				ret = sysfs_create_file(&sub_set->kobj,
4686 							&attribute->attribute.attr);
4687 				if (ret)
4688 					goto err_out;
4689 			}
4690 		}
4691 	}
4692 
4693 	/*
4694 	 * If gpu_od is the only member in the list, that means gpu_od is an
4695 	 * empty directory, so remove it.
4696 	 */
4697 	if (list_is_singular(&adev->pm.od_kobj_list))
4698 		goto err_out;
4699 
4700 	return 0;
4701 
4702 err_out:
4703 	amdgpu_od_set_fini(adev);
4704 
4705 	return ret;
4706 }
4707 
4708 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
4709 {
4710 	enum amdgpu_sriov_vf_mode mode;
4711 	uint32_t mask = 0;
4712 	uint32_t tmp;
4713 	int ret;
4714 
4715 	if (adev->pm.sysfs_initialized)
4716 		return 0;
4717 
4718 	INIT_LIST_HEAD(&adev->pm.pm_attr_list);
4719 
4720 	if (adev->pm.dpm_enabled == 0)
4721 		return 0;
4722 
4723 	mode = amdgpu_virt_get_sriov_vf_mode(adev);
4724 
4725 	/* under multi-vf mode, the hwmon attributes are all not supported */
4726 	if (mode != SRIOV_VF_MODE_MULTI_VF) {
4727 		adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
4728 									DRIVER_NAME, adev,
4729 									hwmon_groups);
4730 		if (IS_ERR(adev->pm.int_hwmon_dev)) {
4731 			ret = PTR_ERR(adev->pm.int_hwmon_dev);
4732 			dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
4733 			return ret;
4734 		}
4735 	}
4736 
4737 	switch (mode) {
4738 	case SRIOV_VF_MODE_ONE_VF:
4739 		mask = ATTR_FLAG_ONEVF;
4740 		break;
4741 	case SRIOV_VF_MODE_MULTI_VF:
4742 		mask = 0;
4743 		break;
4744 	case SRIOV_VF_MODE_BARE_METAL:
4745 	default:
4746 		mask = ATTR_FLAG_MASK_ALL;
4747 		break;
4748 	}
4749 
4750 	ret = amdgpu_device_attr_create_groups(adev,
4751 					       amdgpu_device_attrs,
4752 					       ARRAY_SIZE(amdgpu_device_attrs),
4753 					       mask,
4754 					       &adev->pm.pm_attr_list);
4755 	if (ret)
4756 		goto err_out0;
4757 
4758 	if (amdgpu_dpm_is_overdrive_supported(adev)) {
4759 		ret = amdgpu_od_set_init(adev);
4760 		if (ret)
4761 			goto err_out1;
4762 	} else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) {
4763 		dev_info(adev->dev, "overdrive feature is not supported\n");
4764 	}
4765 
4766 	if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) !=
4767 	    -EOPNOTSUPP) {
4768 		ret = devm_device_add_group(adev->dev,
4769 					    &amdgpu_pm_policy_attr_group);
4770 		if (ret)
4771 			goto err_out1;
4772 	}
4773 
4774 	if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
4775 		ret = devm_device_add_group(adev->dev,
4776 					    &amdgpu_board_attr_group);
4777 		if (ret)
4778 			goto err_out1;
4779 		if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
4780 						 (void *)&tmp) != -EOPNOTSUPP) {
4781 			sysfs_add_file_to_group(&adev->dev->kobj,
4782 						&dev_attr_cur_node_power_limit.attr,
4783 						amdgpu_board_attr_group.name);
4784 			sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr,
4785 						amdgpu_board_attr_group.name);
4786 			sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr,
4787 						amdgpu_board_attr_group.name);
4788 			sysfs_add_file_to_group(&adev->dev->kobj,
4789 						&dev_attr_max_node_power_limit.attr,
4790 						amdgpu_board_attr_group.name);
4791 			sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr,
4792 						amdgpu_board_attr_group.name);
4793 		}
4794 		if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER_LIMIT,
4795 						 (void *)&tmp) != -EOPNOTSUPP) {
4796 			sysfs_add_file_to_group(&adev->dev->kobj,
4797 						&dev_attr_baseboard_power_limit.attr,
4798 						amdgpu_board_attr_group.name);
4799 			sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_baseboard_power.attr,
4800 						amdgpu_board_attr_group.name);
4801 		}
4802 	}
4803 
4804 	adev->pm.sysfs_initialized = true;
4805 
4806 	return 0;
4807 
4808 err_out1:
4809 	amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4810 err_out0:
4811 	if (adev->pm.int_hwmon_dev)
4812 		hwmon_device_unregister(adev->pm.int_hwmon_dev);
4813 
4814 	return ret;
4815 }
4816 
4817 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
4818 {
4819 	amdgpu_od_set_fini(adev);
4820 
4821 	if (adev->pm.int_hwmon_dev)
4822 		hwmon_device_unregister(adev->pm.int_hwmon_dev);
4823 
4824 	amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4825 }
4826 
4827 /*
4828  * Debugfs info
4829  */
4830 #if defined(CONFIG_DEBUG_FS)
4831 
4832 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
4833 					   struct amdgpu_device *adev)
4834 {
4835 	uint16_t *p_val;
4836 	uint32_t size;
4837 	int i;
4838 	uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
4839 
4840 	if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
4841 		p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
4842 				GFP_KERNEL);
4843 
4844 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
4845 					    (void *)p_val, &size)) {
4846 			for (i = 0; i < num_cpu_cores; i++)
4847 				seq_printf(m, "\t%u MHz (CPU%d)\n",
4848 					   *(p_val + i), i);
4849 		}
4850 
4851 		kfree(p_val);
4852 	}
4853 }
4854 
4855 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
4856 {
4857 	uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
4858 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
4859 	uint32_t value;
4860 	uint64_t value64 = 0;
4861 	uint32_t query = 0;
4862 	int size;
4863 
4864 	/* GPU Clocks */
4865 	size = sizeof(value);
4866 	seq_printf(m, "GFX Clocks and Power:\n");
4867 
4868 	amdgpu_debugfs_prints_cpu_info(m, adev);
4869 
4870 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
4871 		seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
4872 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
4873 		seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
4874 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
4875 		seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
4876 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
4877 		seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
4878 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
4879 		seq_printf(m, "\t%u mV (VDDGFX)\n", value);
4880 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
4881 		seq_printf(m, "\t%u mV (VDDNB)\n", value);
4882 	size = sizeof(uint32_t);
4883 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
4884 		if (adev->flags & AMD_IS_APU)
4885 			seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
4886 		else
4887 			seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
4888 	}
4889 	size = sizeof(uint32_t);
4890 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
4891 		if (adev->flags & AMD_IS_APU)
4892 			seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
4893 		else
4894 			seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
4895 	}
4896 	size = sizeof(value);
4897 	seq_printf(m, "\n");
4898 
4899 	/* GPU Temp */
4900 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
4901 		seq_printf(m, "GPU Temperature: %u C\n", value/1000);
4902 
4903 	/* GPU Load */
4904 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
4905 		seq_printf(m, "GPU Load: %u %%\n", value);
4906 	/* MEM Load */
4907 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
4908 		seq_printf(m, "MEM Load: %u %%\n", value);
4909 	/* VCN Load */
4910 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size))
4911 		seq_printf(m, "VCN Load: %u %%\n", value);
4912 
4913 	seq_printf(m, "\n");
4914 
4915 	/* SMC feature mask */
4916 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
4917 		seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
4918 
4919 	/* ASICs greater than CHIP_VEGA20 supports these sensors */
4920 	if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
4921 		/* VCN clocks */
4922 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
4923 			if (!value) {
4924 				seq_printf(m, "VCN: Powered down\n");
4925 			} else {
4926 				seq_printf(m, "VCN: Powered up\n");
4927 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4928 					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4929 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4930 					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4931 			}
4932 		}
4933 		seq_printf(m, "\n");
4934 	} else {
4935 		/* UVD clocks */
4936 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
4937 			if (!value) {
4938 				seq_printf(m, "UVD: Powered down\n");
4939 			} else {
4940 				seq_printf(m, "UVD: Powered up\n");
4941 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4942 					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4943 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4944 					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4945 			}
4946 		}
4947 		seq_printf(m, "\n");
4948 
4949 		/* VCE clocks */
4950 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
4951 			if (!value) {
4952 				seq_printf(m, "VCE: Powered down\n");
4953 			} else {
4954 				seq_printf(m, "VCE: Powered up\n");
4955 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
4956 					seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
4957 			}
4958 		}
4959 	}
4960 
4961 	return 0;
4962 }
4963 
4964 static const struct cg_flag_name clocks[] = {
4965 	{AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
4966 	{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
4967 	{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
4968 	{AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
4969 	{AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
4970 	{AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
4971 	{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
4972 	{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
4973 	{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
4974 	{AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
4975 	{AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
4976 	{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
4977 	{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
4978 	{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
4979 	{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
4980 	{AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
4981 	{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
4982 	{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
4983 	{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
4984 	{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
4985 	{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
4986 	{AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
4987 	{AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
4988 	{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
4989 	{AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
4990 	{AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
4991 	{AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
4992 	{AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
4993 	{AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
4994 	{AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
4995 	{AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
4996 	{AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
4997 	{AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
4998 	{AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
4999 	{0, NULL},
5000 };
5001 
5002 static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
5003 {
5004 	int i;
5005 
5006 	for (i = 0; clocks[i].flag; i++)
5007 		seq_printf(m, "\t%s: %s\n", clocks[i].name,
5008 			   (flags & clocks[i].flag) ? "On" : "Off");
5009 }
5010 
5011 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
5012 {
5013 	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
5014 	u64 flags = 0;
5015 	int r;
5016 
5017 	r = amdgpu_pm_get_access(adev);
5018 	if (r < 0)
5019 		return r;
5020 
5021 	if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
5022 		r = amdgpu_debugfs_pm_info_pp(m, adev);
5023 		if (r)
5024 			goto out;
5025 	}
5026 
5027 	amdgpu_device_ip_get_clockgating_state(adev, &flags);
5028 
5029 	seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
5030 	amdgpu_parse_cg_state(m, flags);
5031 	seq_printf(m, "\n");
5032 
5033 out:
5034 	amdgpu_pm_put_access(adev);
5035 
5036 	return r;
5037 }
5038 
5039 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
5040 
5041 /*
5042  * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
5043  *
5044  * Reads debug memory region allocated to PMFW
5045  */
5046 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
5047 					 size_t size, loff_t *pos)
5048 {
5049 	struct amdgpu_device *adev = file_inode(f)->i_private;
5050 	size_t smu_prv_buf_size;
5051 	void *smu_prv_buf;
5052 	int ret = 0;
5053 
5054 	ret = amdgpu_pm_dev_state_check(adev, true);
5055 	if (ret)
5056 		return ret;
5057 
5058 	ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
5059 	if (ret)
5060 		return ret;
5061 
5062 	if (!smu_prv_buf || !smu_prv_buf_size)
5063 		return -EINVAL;
5064 
5065 	return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
5066 				       smu_prv_buf_size);
5067 }
5068 
5069 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
5070 	.owner = THIS_MODULE,
5071 	.open = simple_open,
5072 	.read = amdgpu_pm_prv_buffer_read,
5073 	.llseek = default_llseek,
5074 };
5075 
5076 #endif
5077 
5078 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
5079 {
5080 #if defined(CONFIG_DEBUG_FS)
5081 	struct drm_minor *minor = adev_to_drm(adev)->primary;
5082 	struct dentry *root = minor->debugfs_root;
5083 
5084 	if (!adev->pm.dpm_enabled)
5085 		return;
5086 
5087 	debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
5088 			    &amdgpu_debugfs_pm_info_fops);
5089 
5090 	if (adev->pm.smu_prv_buffer_size > 0)
5091 		debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
5092 					 adev,
5093 					 &amdgpu_debugfs_pm_prv_buffer_fops,
5094 					 adev->pm.smu_prv_buffer_size);
5095 
5096 	amdgpu_dpm_stb_debug_fs_init(adev);
5097 #endif
5098 }
5099