1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include <linux/hwmon-sysfs.h>
7 #include <linux/hwmon.h>
8 #include <linux/types.h>
9 #include <linux/units.h>
10
11 #include <drm/drm_managed.h>
12 #include "regs/xe_gt_regs.h"
13 #include "regs/xe_mchbar_regs.h"
14 #include "regs/xe_pcode_regs.h"
15 #include "xe_device.h"
16 #include "xe_hwmon.h"
17 #include "xe_mmio.h"
18 #include "xe_pcode.h"
19 #include "xe_pcode_api.h"
20 #include "xe_sriov.h"
21 #include "xe_pm.h"
22
23 enum xe_hwmon_reg {
24 REG_TEMP,
25 REG_PKG_RAPL_LIMIT,
26 REG_PKG_POWER_SKU,
27 REG_PKG_POWER_SKU_UNIT,
28 REG_GT_PERF_STATUS,
29 REG_PKG_ENERGY_STATUS,
30 };
31
32 enum xe_hwmon_reg_operation {
33 REG_READ32,
34 REG_RMW32,
35 REG_READ64,
36 };
37
38 enum xe_hwmon_channel {
39 CHANNEL_CARD,
40 CHANNEL_PKG,
41 CHANNEL_VRAM,
42 CHANNEL_MAX,
43 };
44
45 /*
46 * SF_* - scale factors for particular quantities according to hwmon spec.
47 */
48 #define SF_POWER 1000000 /* microwatts */
49 #define SF_CURR 1000 /* milliamperes */
50 #define SF_VOLTAGE 1000 /* millivolts */
51 #define SF_ENERGY 1000000 /* microjoules */
52 #define SF_TIME 1000 /* milliseconds */
53
54 /**
55 * struct xe_hwmon_energy_info - to accumulate energy
56 */
57 struct xe_hwmon_energy_info {
58 /** @reg_val_prev: previous energy reg val */
59 u32 reg_val_prev;
60 /** @accum_energy: accumulated energy */
61 long accum_energy;
62 };
63
64 /**
65 * struct xe_hwmon - xe hwmon data structure
66 */
67 struct xe_hwmon {
68 /** @hwmon_dev: hwmon device for xe */
69 struct device *hwmon_dev;
70 /** @xe: Xe device */
71 struct xe_device *xe;
72 /** @hwmon_lock: lock for rw attributes*/
73 struct mutex hwmon_lock;
74 /** @scl_shift_power: pkg power unit */
75 int scl_shift_power;
76 /** @scl_shift_energy: pkg energy unit */
77 int scl_shift_energy;
78 /** @scl_shift_time: pkg time unit */
79 int scl_shift_time;
80 /** @ei: Energy info for energyN_input */
81 struct xe_hwmon_energy_info ei[CHANNEL_MAX];
82 };
83
xe_hwmon_get_reg(struct xe_hwmon * hwmon,enum xe_hwmon_reg hwmon_reg,int channel)84 static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
85 int channel)
86 {
87 struct xe_device *xe = hwmon->xe;
88
89 switch (hwmon_reg) {
90 case REG_TEMP:
91 if (xe->info.platform == XE_BATTLEMAGE) {
92 if (channel == CHANNEL_PKG)
93 return BMG_PACKAGE_TEMPERATURE;
94 else if (channel == CHANNEL_VRAM)
95 return BMG_VRAM_TEMPERATURE;
96 } else if (xe->info.platform == XE_DG2) {
97 if (channel == CHANNEL_PKG)
98 return PCU_CR_PACKAGE_TEMPERATURE;
99 else if (channel == CHANNEL_VRAM)
100 return BMG_VRAM_TEMPERATURE;
101 }
102 break;
103 case REG_PKG_RAPL_LIMIT:
104 if (xe->info.platform == XE_BATTLEMAGE) {
105 if (channel == CHANNEL_PKG)
106 return BMG_PACKAGE_RAPL_LIMIT;
107 else
108 return BMG_PLATFORM_POWER_LIMIT;
109 } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
110 return PVC_GT0_PACKAGE_RAPL_LIMIT;
111 } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
112 return PCU_CR_PACKAGE_RAPL_LIMIT;
113 }
114 break;
115 case REG_PKG_POWER_SKU:
116 if (xe->info.platform == XE_BATTLEMAGE)
117 return BMG_PACKAGE_POWER_SKU;
118 else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
119 return PVC_GT0_PACKAGE_POWER_SKU;
120 else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
121 return PCU_CR_PACKAGE_POWER_SKU;
122 break;
123 case REG_PKG_POWER_SKU_UNIT:
124 if (xe->info.platform == XE_BATTLEMAGE)
125 return BMG_PACKAGE_POWER_SKU_UNIT;
126 else if (xe->info.platform == XE_PVC)
127 return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
128 else if (xe->info.platform == XE_DG2)
129 return PCU_CR_PACKAGE_POWER_SKU_UNIT;
130 break;
131 case REG_GT_PERF_STATUS:
132 if (xe->info.platform == XE_DG2 && channel == CHANNEL_PKG)
133 return GT_PERF_STATUS;
134 break;
135 case REG_PKG_ENERGY_STATUS:
136 if (xe->info.platform == XE_BATTLEMAGE) {
137 if (channel == CHANNEL_PKG)
138 return BMG_PACKAGE_ENERGY_STATUS;
139 else
140 return BMG_PLATFORM_ENERGY_STATUS;
141 } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
142 return PVC_GT0_PLATFORM_ENERGY_STATUS;
143 } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
144 return PCU_CR_PACKAGE_ENERGY_STATUS;
145 }
146 break;
147 default:
148 drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
149 break;
150 }
151
152 return XE_REG(0);
153 }
154
155 #define PL1_DISABLE 0
156
157 /*
158 * HW allows arbitrary PL1 limits to be set but silently clamps these values to
159 * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the
160 * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
161 * clamped values when read.
162 */
xe_hwmon_power_max_read(struct xe_hwmon * hwmon,int channel,long * value)163 static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
164 {
165 u64 reg_val, min, max;
166 struct xe_device *xe = hwmon->xe;
167 struct xe_reg rapl_limit, pkg_power_sku;
168 struct xe_mmio *mmio = xe_root_tile_mmio(xe);
169
170 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
171 pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
172
173 /*
174 * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible.
175 * So not checking it again here.
176 */
177 if (!xe_reg_is_valid(pkg_power_sku)) {
178 drm_warn(&xe->drm, "pkg_power_sku invalid\n");
179 *value = 0;
180 return;
181 }
182
183 mutex_lock(&hwmon->hwmon_lock);
184
185 reg_val = xe_mmio_read32(mmio, rapl_limit);
186 /* Check if PL1 limit is disabled */
187 if (!(reg_val & PKG_PWR_LIM_1_EN)) {
188 *value = PL1_DISABLE;
189 goto unlock;
190 }
191
192 reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
193 *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
194
195 reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
196 min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
197 min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
198 max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
199 max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
200
201 if (min && max)
202 *value = clamp_t(u64, *value, min, max);
203 unlock:
204 mutex_unlock(&hwmon->hwmon_lock);
205 }
206
xe_hwmon_power_max_write(struct xe_hwmon * hwmon,int channel,long value)207 static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
208 {
209 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
210 int ret = 0;
211 u64 reg_val;
212 struct xe_reg rapl_limit;
213
214 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
215
216 mutex_lock(&hwmon->hwmon_lock);
217
218 /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
219 if (value == PL1_DISABLE) {
220 reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN, 0);
221 reg_val = xe_mmio_read32(mmio, rapl_limit);
222 if (reg_val & PKG_PWR_LIM_1_EN) {
223 drm_warn(&hwmon->xe->drm, "PL1 disable is not supported!\n");
224 ret = -EOPNOTSUPP;
225 }
226 goto unlock;
227 }
228
229 /* Computation in 64-bits to avoid overflow. Round to nearest. */
230 reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
231 reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
232 reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
233
234 unlock:
235 mutex_unlock(&hwmon->hwmon_lock);
236 return ret;
237 }
238
xe_hwmon_power_rated_max_read(struct xe_hwmon * hwmon,int channel,long * value)239 static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
240 {
241 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
242 struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
243 u64 reg_val;
244
245 /*
246 * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check
247 * for this register can be skipped.
248 * See xe_hwmon_power_is_visible.
249 */
250 reg_val = xe_mmio_read32(mmio, reg);
251 reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
252 *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
253 }
254
255 /*
256 * xe_hwmon_energy_get - Obtain energy value
257 *
258 * The underlying energy hardware register is 32-bits and is subject to
259 * overflow. How long before overflow? For example, with an example
260 * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
261 * a power draw of 1000 watts, the 32-bit counter will overflow in
262 * approximately 4.36 minutes.
263 *
264 * Examples:
265 * 1 watt: (2^32 >> 14) / 1 W / (60 * 60 * 24) secs/day -> 3 days
266 * 1000 watts: (2^32 >> 14) / 1000 W / 60 secs/min -> 4.36 minutes
267 *
268 * The function significantly increases overflow duration (from 4.36
269 * minutes) by accumulating the energy register into a 'long' as allowed by
270 * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
271 * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
272 * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
273 * energyN_input overflows. This at 1000 W is an overflow duration of 278 years.
274 */
275 static void
xe_hwmon_energy_get(struct xe_hwmon * hwmon,int channel,long * energy)276 xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
277 {
278 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
279 struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
280 u64 reg_val;
281
282 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
283 channel));
284
285 if (reg_val >= ei->reg_val_prev)
286 ei->accum_energy += reg_val - ei->reg_val_prev;
287 else
288 ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
289
290 ei->reg_val_prev = reg_val;
291
292 *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
293 hwmon->scl_shift_energy);
294 }
295
296 static ssize_t
xe_hwmon_power_max_interval_show(struct device * dev,struct device_attribute * attr,char * buf)297 xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *attr,
298 char *buf)
299 {
300 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
301 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
302 u32 x, y, x_w = 2; /* 2 bits */
303 u64 r, tau4, out;
304 int sensor_index = to_sensor_dev_attr(attr)->index;
305
306 xe_pm_runtime_get(hwmon->xe);
307
308 mutex_lock(&hwmon->hwmon_lock);
309
310 r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
311
312 mutex_unlock(&hwmon->hwmon_lock);
313
314 xe_pm_runtime_put(hwmon->xe);
315
316 x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
317 y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
318
319 /*
320 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
321 * = (4 | x) << (y - 2)
322 *
323 * Here (y - 2) ensures a 1.x fixed point representation of 1.x
324 * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75
325 *
326 * As y can be < 2, we compute tau4 = (4 | x) << y
327 * and then add 2 when doing the final right shift to account for units
328 */
329 tau4 = (u64)((1 << x_w) | x) << y;
330
331 /* val in hwmon interface units (millisec) */
332 out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
333
334 return sysfs_emit(buf, "%llu\n", out);
335 }
336
337 static ssize_t
xe_hwmon_power_max_interval_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)338 xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *attr,
339 const char *buf, size_t count)
340 {
341 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
342 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
343 u32 x, y, rxy, x_w = 2; /* 2 bits */
344 u64 tau4, r, max_win;
345 unsigned long val;
346 int ret;
347 int sensor_index = to_sensor_dev_attr(attr)->index;
348
349 ret = kstrtoul(buf, 0, &val);
350 if (ret)
351 return ret;
352
353 /*
354 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12.
355 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
356 *
357 * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
358 * However, it is observed that existing discrete GPUs does not provide correct
359 * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs
360 * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU.
361 */
362 #define PKG_MAX_WIN_DEFAULT 0x12ull
363
364 /*
365 * val must be < max in hwmon interface units. The steps below are
366 * explained in xe_hwmon_power_max_interval_show()
367 */
368 r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
369 x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
370 y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
371 tau4 = (u64)((1 << x_w) | x) << y;
372 max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
373
374 if (val > max_win)
375 return -EINVAL;
376
377 /* val in hw units */
378 val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
379
380 /*
381 * Convert val to 1.x * power(2,y)
382 * y = ilog2(val)
383 * x = (val - (1 << y)) >> (y - 2)
384 */
385 if (!val) {
386 y = 0;
387 x = 0;
388 } else {
389 y = ilog2(val);
390 x = (val - (1ul << y)) << x_w >> y;
391 }
392
393 rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
394
395 xe_pm_runtime_get(hwmon->xe);
396
397 mutex_lock(&hwmon->hwmon_lock);
398
399 r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
400 PKG_PWR_LIM_1_TIME, rxy);
401
402 mutex_unlock(&hwmon->hwmon_lock);
403
404 xe_pm_runtime_put(hwmon->xe);
405
406 return count;
407 }
408
409 static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
410 xe_hwmon_power_max_interval_show,
411 xe_hwmon_power_max_interval_store, CHANNEL_CARD);
412
413 static SENSOR_DEVICE_ATTR(power2_max_interval, 0664,
414 xe_hwmon_power_max_interval_show,
415 xe_hwmon_power_max_interval_store, CHANNEL_PKG);
416
417 static struct attribute *hwmon_attributes[] = {
418 &sensor_dev_attr_power1_max_interval.dev_attr.attr,
419 &sensor_dev_attr_power2_max_interval.dev_attr.attr,
420 NULL
421 };
422
xe_hwmon_attributes_visible(struct kobject * kobj,struct attribute * attr,int index)423 static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
424 struct attribute *attr, int index)
425 {
426 struct device *dev = kobj_to_dev(kobj);
427 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
428 int ret = 0;
429
430 xe_pm_runtime_get(hwmon->xe);
431
432 ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
433
434 xe_pm_runtime_put(hwmon->xe);
435
436 return ret;
437 }
438
439 static const struct attribute_group hwmon_attrgroup = {
440 .attrs = hwmon_attributes,
441 .is_visible = xe_hwmon_attributes_visible,
442 };
443
444 static const struct attribute_group *hwmon_groups[] = {
445 &hwmon_attrgroup,
446 NULL
447 };
448
449 static const struct hwmon_channel_info * const hwmon_info[] = {
450 HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
451 HWMON_T_INPUT | HWMON_T_LABEL),
452 HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL,
453 HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT | HWMON_P_LABEL),
454 HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
455 HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
456 HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
457 NULL
458 };
459
460 /* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
xe_hwmon_pcode_read_i1(const struct xe_hwmon * hwmon,u32 * uval)461 static int xe_hwmon_pcode_read_i1(const struct xe_hwmon *hwmon, u32 *uval)
462 {
463 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
464
465 /* Avoid Illegal Subcommand error */
466 if (hwmon->xe->info.platform == XE_DG2)
467 return -ENXIO;
468
469 return xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
470 POWER_SETUP_SUBCOMMAND_READ_I1, 0),
471 uval, NULL);
472 }
473
xe_hwmon_pcode_write_i1(const struct xe_hwmon * hwmon,u32 uval)474 static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
475 {
476 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
477
478 return xe_pcode_write(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
479 POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
480 (uval & POWER_SETUP_I1_DATA_MASK));
481 }
482
xe_hwmon_power_curr_crit_read(struct xe_hwmon * hwmon,int channel,long * value,u32 scale_factor)483 static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
484 long *value, u32 scale_factor)
485 {
486 int ret;
487 u32 uval;
488
489 mutex_lock(&hwmon->hwmon_lock);
490
491 ret = xe_hwmon_pcode_read_i1(hwmon, &uval);
492 if (ret)
493 goto unlock;
494
495 *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
496 scale_factor, POWER_SETUP_I1_SHIFT);
497 unlock:
498 mutex_unlock(&hwmon->hwmon_lock);
499 return ret;
500 }
501
xe_hwmon_power_curr_crit_write(struct xe_hwmon * hwmon,int channel,long value,u32 scale_factor)502 static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
503 long value, u32 scale_factor)
504 {
505 int ret;
506 u32 uval;
507
508 mutex_lock(&hwmon->hwmon_lock);
509
510 uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
511 ret = xe_hwmon_pcode_write_i1(hwmon, uval);
512
513 mutex_unlock(&hwmon->hwmon_lock);
514 return ret;
515 }
516
xe_hwmon_get_voltage(struct xe_hwmon * hwmon,int channel,long * value)517 static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
518 {
519 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
520 u64 reg_val;
521
522 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
523 /* HW register value in units of 2.5 millivolt */
524 *value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
525 }
526
527 static umode_t
xe_hwmon_temp_is_visible(struct xe_hwmon * hwmon,u32 attr,int channel)528 xe_hwmon_temp_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
529 {
530 switch (attr) {
531 case hwmon_temp_input:
532 case hwmon_temp_label:
533 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_TEMP, channel)) ? 0444 : 0;
534 default:
535 return 0;
536 }
537 }
538
539 static int
xe_hwmon_temp_read(struct xe_hwmon * hwmon,u32 attr,int channel,long * val)540 xe_hwmon_temp_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
541 {
542 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
543 u64 reg_val;
544
545 switch (attr) {
546 case hwmon_temp_input:
547 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_TEMP, channel));
548
549 /* HW register value is in degrees Celsius, convert to millidegrees. */
550 *val = REG_FIELD_GET(TEMP_MASK, reg_val) * MILLIDEGREE_PER_DEGREE;
551 return 0;
552 default:
553 return -EOPNOTSUPP;
554 }
555 }
556
557 static umode_t
xe_hwmon_power_is_visible(struct xe_hwmon * hwmon,u32 attr,int channel)558 xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
559 {
560 u32 uval;
561
562 switch (attr) {
563 case hwmon_power_max:
564 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
565 channel)) ? 0664 : 0;
566 case hwmon_power_rated_max:
567 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
568 channel)) ? 0444 : 0;
569 case hwmon_power_crit:
570 if (channel == CHANNEL_PKG)
571 return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
572 !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
573 break;
574 case hwmon_power_label:
575 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
576 channel)) ? 0444 : 0;
577 default:
578 return 0;
579 }
580 return 0;
581 }
582
583 static int
xe_hwmon_power_read(struct xe_hwmon * hwmon,u32 attr,int channel,long * val)584 xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
585 {
586 switch (attr) {
587 case hwmon_power_max:
588 xe_hwmon_power_max_read(hwmon, channel, val);
589 return 0;
590 case hwmon_power_rated_max:
591 xe_hwmon_power_rated_max_read(hwmon, channel, val);
592 return 0;
593 case hwmon_power_crit:
594 return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER);
595 default:
596 return -EOPNOTSUPP;
597 }
598 }
599
600 static int
xe_hwmon_power_write(struct xe_hwmon * hwmon,u32 attr,int channel,long val)601 xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
602 {
603 switch (attr) {
604 case hwmon_power_max:
605 return xe_hwmon_power_max_write(hwmon, channel, val);
606 case hwmon_power_crit:
607 return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER);
608 default:
609 return -EOPNOTSUPP;
610 }
611 }
612
613 static umode_t
xe_hwmon_curr_is_visible(const struct xe_hwmon * hwmon,u32 attr,int channel)614 xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
615 {
616 u32 uval;
617
618 /* hwmon sysfs attribute of current available only for package */
619 if (channel != CHANNEL_PKG)
620 return 0;
621
622 switch (attr) {
623 case hwmon_curr_crit:
624 return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
625 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
626 case hwmon_curr_label:
627 return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
628 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444;
629 break;
630 default:
631 return 0;
632 }
633 return 0;
634 }
635
636 static int
xe_hwmon_curr_read(struct xe_hwmon * hwmon,u32 attr,int channel,long * val)637 xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
638 {
639 switch (attr) {
640 case hwmon_curr_crit:
641 return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_CURR);
642 default:
643 return -EOPNOTSUPP;
644 }
645 }
646
647 static int
xe_hwmon_curr_write(struct xe_hwmon * hwmon,u32 attr,int channel,long val)648 xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
649 {
650 switch (attr) {
651 case hwmon_curr_crit:
652 return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_CURR);
653 default:
654 return -EOPNOTSUPP;
655 }
656 }
657
658 static umode_t
xe_hwmon_in_is_visible(struct xe_hwmon * hwmon,u32 attr,int channel)659 xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
660 {
661 switch (attr) {
662 case hwmon_in_input:
663 case hwmon_in_label:
664 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS,
665 channel)) ? 0444 : 0;
666 default:
667 return 0;
668 }
669 }
670
671 static int
xe_hwmon_in_read(struct xe_hwmon * hwmon,u32 attr,int channel,long * val)672 xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
673 {
674 switch (attr) {
675 case hwmon_in_input:
676 xe_hwmon_get_voltage(hwmon, channel, val);
677 return 0;
678 default:
679 return -EOPNOTSUPP;
680 }
681 }
682
683 static umode_t
xe_hwmon_energy_is_visible(struct xe_hwmon * hwmon,u32 attr,int channel)684 xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
685 {
686 switch (attr) {
687 case hwmon_energy_input:
688 case hwmon_energy_label:
689 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
690 channel)) ? 0444 : 0;
691 default:
692 return 0;
693 }
694 }
695
696 static int
xe_hwmon_energy_read(struct xe_hwmon * hwmon,u32 attr,int channel,long * val)697 xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
698 {
699 switch (attr) {
700 case hwmon_energy_input:
701 xe_hwmon_energy_get(hwmon, channel, val);
702 return 0;
703 default:
704 return -EOPNOTSUPP;
705 }
706 }
707
708 static umode_t
xe_hwmon_is_visible(const void * drvdata,enum hwmon_sensor_types type,u32 attr,int channel)709 xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
710 u32 attr, int channel)
711 {
712 struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
713 int ret;
714
715 xe_pm_runtime_get(hwmon->xe);
716
717 switch (type) {
718 case hwmon_temp:
719 ret = xe_hwmon_temp_is_visible(hwmon, attr, channel);
720 break;
721 case hwmon_power:
722 ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
723 break;
724 case hwmon_curr:
725 ret = xe_hwmon_curr_is_visible(hwmon, attr, channel);
726 break;
727 case hwmon_in:
728 ret = xe_hwmon_in_is_visible(hwmon, attr, channel);
729 break;
730 case hwmon_energy:
731 ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
732 break;
733 default:
734 ret = 0;
735 break;
736 }
737
738 xe_pm_runtime_put(hwmon->xe);
739
740 return ret;
741 }
742
743 static int
xe_hwmon_read(struct device * dev,enum hwmon_sensor_types type,u32 attr,int channel,long * val)744 xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
745 int channel, long *val)
746 {
747 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
748 int ret;
749
750 xe_pm_runtime_get(hwmon->xe);
751
752 switch (type) {
753 case hwmon_temp:
754 ret = xe_hwmon_temp_read(hwmon, attr, channel, val);
755 break;
756 case hwmon_power:
757 ret = xe_hwmon_power_read(hwmon, attr, channel, val);
758 break;
759 case hwmon_curr:
760 ret = xe_hwmon_curr_read(hwmon, attr, channel, val);
761 break;
762 case hwmon_in:
763 ret = xe_hwmon_in_read(hwmon, attr, channel, val);
764 break;
765 case hwmon_energy:
766 ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
767 break;
768 default:
769 ret = -EOPNOTSUPP;
770 break;
771 }
772
773 xe_pm_runtime_put(hwmon->xe);
774
775 return ret;
776 }
777
778 static int
xe_hwmon_write(struct device * dev,enum hwmon_sensor_types type,u32 attr,int channel,long val)779 xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
780 int channel, long val)
781 {
782 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
783 int ret;
784
785 xe_pm_runtime_get(hwmon->xe);
786
787 switch (type) {
788 case hwmon_power:
789 ret = xe_hwmon_power_write(hwmon, attr, channel, val);
790 break;
791 case hwmon_curr:
792 ret = xe_hwmon_curr_write(hwmon, attr, channel, val);
793 break;
794 default:
795 ret = -EOPNOTSUPP;
796 break;
797 }
798
799 xe_pm_runtime_put(hwmon->xe);
800
801 return ret;
802 }
803
xe_hwmon_read_label(struct device * dev,enum hwmon_sensor_types type,u32 attr,int channel,const char ** str)804 static int xe_hwmon_read_label(struct device *dev,
805 enum hwmon_sensor_types type,
806 u32 attr, int channel, const char **str)
807 {
808 switch (type) {
809 case hwmon_temp:
810 if (channel == CHANNEL_PKG)
811 *str = "pkg";
812 else if (channel == CHANNEL_VRAM)
813 *str = "vram";
814 return 0;
815 case hwmon_power:
816 case hwmon_energy:
817 case hwmon_curr:
818 case hwmon_in:
819 if (channel == CHANNEL_CARD)
820 *str = "card";
821 else if (channel == CHANNEL_PKG)
822 *str = "pkg";
823 return 0;
824 default:
825 return -EOPNOTSUPP;
826 }
827 }
828
829 static const struct hwmon_ops hwmon_ops = {
830 .is_visible = xe_hwmon_is_visible,
831 .read = xe_hwmon_read,
832 .write = xe_hwmon_write,
833 .read_string = xe_hwmon_read_label,
834 };
835
836 static const struct hwmon_chip_info hwmon_chip_info = {
837 .ops = &hwmon_ops,
838 .info = hwmon_info,
839 };
840
841 static void
xe_hwmon_get_preregistration_info(struct xe_hwmon * hwmon)842 xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
843 {
844 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
845 long energy;
846 u64 val_sku_unit = 0;
847 int channel;
848 struct xe_reg pkg_power_sku_unit;
849
850 /*
851 * The contents of register PKG_POWER_SKU_UNIT do not change,
852 * so read it once and store the shift values.
853 */
854 pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
855 if (xe_reg_is_valid(pkg_power_sku_unit)) {
856 val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
857 hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
858 hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
859 hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
860 }
861
862 /*
863 * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
864 * first value of the energy register read
865 */
866 for (channel = 0; channel < CHANNEL_MAX; channel++)
867 if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel))
868 xe_hwmon_energy_get(hwmon, channel, &energy);
869 }
870
xe_hwmon_mutex_destroy(void * arg)871 static void xe_hwmon_mutex_destroy(void *arg)
872 {
873 struct xe_hwmon *hwmon = arg;
874
875 mutex_destroy(&hwmon->hwmon_lock);
876 }
877
xe_hwmon_register(struct xe_device * xe)878 int xe_hwmon_register(struct xe_device *xe)
879 {
880 struct device *dev = xe->drm.dev;
881 struct xe_hwmon *hwmon;
882 int ret;
883
884 /* hwmon is available only for dGfx */
885 if (!IS_DGFX(xe))
886 return 0;
887
888 /* hwmon is not available on VFs */
889 if (IS_SRIOV_VF(xe))
890 return 0;
891
892 hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
893 if (!hwmon)
894 return -ENOMEM;
895
896 mutex_init(&hwmon->hwmon_lock);
897 ret = devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon);
898 if (ret)
899 return ret;
900
901 /* There's only one instance of hwmon per device */
902 hwmon->xe = xe;
903 xe->hwmon = hwmon;
904
905 xe_hwmon_get_preregistration_info(hwmon);
906
907 drm_dbg(&xe->drm, "Register xe hwmon interface\n");
908
909 /* hwmon_dev points to device hwmon<i> */
910 hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
911 &hwmon_chip_info,
912 hwmon_groups);
913 if (IS_ERR(hwmon->hwmon_dev)) {
914 drm_err(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
915 xe->hwmon = NULL;
916 return PTR_ERR(hwmon->hwmon_dev);
917 }
918
919 return 0;
920 }
921
922