1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/delay.h>
9 #include <linux/ktime.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/drm_print.h>
13 #include <generated/xe_wa_oob.h>
14 
15 #include "abi/guc_actions_slpc_abi.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_regs.h"
18 #include "xe_bo.h"
19 #include "xe_device.h"
20 #include "xe_force_wake.h"
21 #include "xe_gt.h"
22 #include "xe_gt_idle.h"
23 #include "xe_gt_printk.h"
24 #include "xe_gt_throttle.h"
25 #include "xe_gt_types.h"
26 #include "xe_guc.h"
27 #include "xe_guc_ct.h"
28 #include "xe_map.h"
29 #include "xe_mmio.h"
30 #include "xe_pcode.h"
31 #include "xe_pm.h"
32 #include "xe_sriov.h"
33 #include "xe_wa.h"
34 
35 #define MCHBAR_MIRROR_BASE_SNB	0x140000
36 
37 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
38 #define   RP0_MASK		REG_GENMASK(7, 0)
39 #define   RP1_MASK		REG_GENMASK(15, 8)
40 #define   RPN_MASK		REG_GENMASK(23, 16)
41 
42 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
43 #define   RPE_MASK		REG_GENMASK(15, 8)
44 #define   RPA_MASK		REG_GENMASK(31, 16)
45 
46 #define GT_PERF_STATUS		XE_REG(0x1381b4)
47 #define   CAGF_MASK	REG_GENMASK(19, 11)
48 
49 #define GT_FREQUENCY_MULTIPLIER	50
50 #define GT_FREQUENCY_SCALER	3
51 
52 #define LNL_MERT_FREQ_CAP	800
53 #define BMG_MERT_FREQ_CAP	2133
54 
55 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
56 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
57 
58 /**
59  * DOC: GuC Power Conservation (PC)
60  *
61  * GuC Power Conservation (PC) supports multiple features for the most
62  * efficient and performing use of the GT when GuC submission is enabled,
63  * including frequency management, Render-C states management, and various
64  * algorithms for power balancing.
65  *
66  * Single Loop Power Conservation (SLPC) is the name given to the suite of
67  * connected power conservation features in the GuC firmware. The firmware
68  * exposes a programming interface to the host for the control of SLPC.
69  *
70  * Frequency management:
71  * =====================
72  *
73  * Xe driver enables SLPC with all of its defaults features and frequency
74  * selection, which varies per platform.
75  *
76  * Render-C States:
77  * ================
78  *
79  * Render-C states is also a GuC PC feature that is now enabled in Xe for
80  * all platforms.
81  *
82  */
83 
pc_to_guc(struct xe_guc_pc * pc)84 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
85 {
86 	return container_of(pc, struct xe_guc, pc);
87 }
88 
pc_to_ct(struct xe_guc_pc * pc)89 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
90 {
91 	return &pc_to_guc(pc)->ct;
92 }
93 
pc_to_gt(struct xe_guc_pc * pc)94 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
95 {
96 	return guc_to_gt(pc_to_guc(pc));
97 }
98 
pc_to_xe(struct xe_guc_pc * pc)99 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
100 {
101 	return guc_to_xe(pc_to_guc(pc));
102 }
103 
pc_to_maps(struct xe_guc_pc * pc)104 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
105 {
106 	return &pc->bo->vmap;
107 }
108 
109 #define slpc_shared_data_read(pc_, field_) \
110 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
111 			struct slpc_shared_data, field_)
112 
113 #define slpc_shared_data_write(pc_, field_, val_) \
114 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
115 			struct slpc_shared_data, field_, val_)
116 
117 #define SLPC_EVENT(id, count) \
118 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
119 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
120 
wait_for_pc_state(struct xe_guc_pc * pc,enum slpc_global_state state,int timeout_ms)121 static int wait_for_pc_state(struct xe_guc_pc *pc,
122 			     enum slpc_global_state state,
123 			     int timeout_ms)
124 {
125 	int timeout_us = 1000 * timeout_ms;
126 	int slept, wait = 10;
127 
128 	xe_device_assert_mem_access(pc_to_xe(pc));
129 
130 	for (slept = 0; slept < timeout_us;) {
131 		if (slpc_shared_data_read(pc, header.global_state) == state)
132 			return 0;
133 
134 		usleep_range(wait, wait << 1);
135 		slept += wait;
136 		wait <<= 1;
137 		if (slept + wait > timeout_us)
138 			wait = timeout_us - slept;
139 	}
140 
141 	return -ETIMEDOUT;
142 }
143 
pc_action_reset(struct xe_guc_pc * pc)144 static int pc_action_reset(struct xe_guc_pc *pc)
145 {
146 	struct xe_guc_ct *ct = pc_to_ct(pc);
147 	u32 action[] = {
148 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
149 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
150 		xe_bo_ggtt_addr(pc->bo),
151 		0,
152 	};
153 	int ret;
154 
155 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
156 	if (ret)
157 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
158 			  ERR_PTR(ret));
159 
160 	return ret;
161 }
162 
pc_action_query_task_state(struct xe_guc_pc * pc)163 static int pc_action_query_task_state(struct xe_guc_pc *pc)
164 {
165 	struct xe_guc_ct *ct = pc_to_ct(pc);
166 	u32 action[] = {
167 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
168 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
169 		xe_bo_ggtt_addr(pc->bo),
170 		0,
171 	};
172 	int ret;
173 
174 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
175 			      SLPC_RESET_TIMEOUT_MS))
176 		return -EAGAIN;
177 
178 	/* Blocking here to ensure the results are ready before reading them */
179 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
180 	if (ret)
181 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
182 			  ERR_PTR(ret));
183 
184 	return ret;
185 }
186 
pc_action_set_param(struct xe_guc_pc * pc,u8 id,u32 value)187 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
188 {
189 	struct xe_guc_ct *ct = pc_to_ct(pc);
190 	u32 action[] = {
191 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
192 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
193 		id,
194 		value,
195 	};
196 	int ret;
197 
198 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
199 			      SLPC_RESET_TIMEOUT_MS))
200 		return -EAGAIN;
201 
202 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
203 	if (ret)
204 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
205 			  id, value, ERR_PTR(ret));
206 
207 	return ret;
208 }
209 
pc_action_unset_param(struct xe_guc_pc * pc,u8 id)210 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
211 {
212 	u32 action[] = {
213 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
214 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
215 		id,
216 	};
217 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
218 	int ret;
219 
220 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
221 			      SLPC_RESET_TIMEOUT_MS))
222 		return -EAGAIN;
223 
224 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
225 	if (ret)
226 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
227 			  ERR_PTR(ret));
228 
229 	return ret;
230 }
231 
pc_action_setup_gucrc(struct xe_guc_pc * pc,u32 mode)232 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
233 {
234 	struct xe_guc_ct *ct = pc_to_ct(pc);
235 	u32 action[] = {
236 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
237 		mode,
238 	};
239 	int ret;
240 
241 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
242 	if (ret)
243 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
244 			  mode, ERR_PTR(ret));
245 	return ret;
246 }
247 
decode_freq(u32 raw)248 static u32 decode_freq(u32 raw)
249 {
250 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
251 				 GT_FREQUENCY_SCALER);
252 }
253 
encode_freq(u32 freq)254 static u32 encode_freq(u32 freq)
255 {
256 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
257 				 GT_FREQUENCY_MULTIPLIER);
258 }
259 
pc_get_min_freq(struct xe_guc_pc * pc)260 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
261 {
262 	u32 freq;
263 
264 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
265 			 slpc_shared_data_read(pc, task_state_data.freq));
266 
267 	return decode_freq(freq);
268 }
269 
pc_set_manual_rp_ctrl(struct xe_guc_pc * pc,bool enable)270 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
271 {
272 	struct xe_gt *gt = pc_to_gt(pc);
273 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
274 
275 	/* Allow/Disallow punit to process software freq requests */
276 	xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
277 }
278 
pc_set_cur_freq(struct xe_guc_pc * pc,u32 freq)279 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
280 {
281 	struct xe_gt *gt = pc_to_gt(pc);
282 	u32 rpnswreq;
283 
284 	pc_set_manual_rp_ctrl(pc, true);
285 
286 	/* Req freq is in units of 16.66 Mhz */
287 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
288 	xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
289 
290 	/* Sleep for a small time to allow pcode to respond */
291 	usleep_range(100, 300);
292 
293 	pc_set_manual_rp_ctrl(pc, false);
294 }
295 
pc_set_min_freq(struct xe_guc_pc * pc,u32 freq)296 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
297 {
298 	/*
299 	 * Let's only check for the rpn-rp0 range. If max < min,
300 	 * min becomes a fixed request.
301 	 */
302 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
303 		return -EINVAL;
304 
305 	/*
306 	 * GuC policy is to elevate minimum frequency to the efficient levels
307 	 * Our goal is to have the admin choices respected.
308 	 */
309 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
310 			    freq < pc->rpe_freq);
311 
312 	return pc_action_set_param(pc,
313 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
314 				   freq);
315 }
316 
pc_get_max_freq(struct xe_guc_pc * pc)317 static int pc_get_max_freq(struct xe_guc_pc *pc)
318 {
319 	u32 freq;
320 
321 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
322 			 slpc_shared_data_read(pc, task_state_data.freq));
323 
324 	return decode_freq(freq);
325 }
326 
pc_set_max_freq(struct xe_guc_pc * pc,u32 freq)327 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
328 {
329 	/*
330 	 * Let's only check for the rpn-rp0 range. If max < min,
331 	 * min becomes a fixed request.
332 	 * Also, overclocking is not supported.
333 	 */
334 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
335 		return -EINVAL;
336 
337 	return pc_action_set_param(pc,
338 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
339 				   freq);
340 }
341 
mtl_update_rpa_value(struct xe_guc_pc * pc)342 static void mtl_update_rpa_value(struct xe_guc_pc *pc)
343 {
344 	struct xe_gt *gt = pc_to_gt(pc);
345 	u32 reg;
346 
347 	if (xe_gt_is_media_type(gt))
348 		reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
349 	else
350 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
351 
352 	pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
353 }
354 
mtl_update_rpe_value(struct xe_guc_pc * pc)355 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
356 {
357 	struct xe_gt *gt = pc_to_gt(pc);
358 	u32 reg;
359 
360 	if (xe_gt_is_media_type(gt))
361 		reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
362 	else
363 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
364 
365 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
366 }
367 
tgl_update_rpa_value(struct xe_guc_pc * pc)368 static void tgl_update_rpa_value(struct xe_guc_pc *pc)
369 {
370 	struct xe_gt *gt = pc_to_gt(pc);
371 	struct xe_device *xe = gt_to_xe(gt);
372 	u32 reg;
373 
374 	/*
375 	 * For PVC we still need to use fused RP0 as the approximation for RPa
376 	 * For other platforms than PVC we get the resolved RPa directly from
377 	 * PCODE at a different register
378 	 */
379 	if (xe->info.platform == XE_PVC) {
380 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
381 		pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
382 	} else {
383 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
384 		pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
385 	}
386 }
387 
tgl_update_rpe_value(struct xe_guc_pc * pc)388 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
389 {
390 	struct xe_gt *gt = pc_to_gt(pc);
391 	struct xe_device *xe = gt_to_xe(gt);
392 	u32 reg;
393 
394 	/*
395 	 * For PVC we still need to use fused RP1 as the approximation for RPe
396 	 * For other platforms than PVC we get the resolved RPe directly from
397 	 * PCODE at a different register
398 	 */
399 	if (xe->info.platform == XE_PVC) {
400 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
401 		pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
402 	} else {
403 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
404 		pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
405 	}
406 }
407 
pc_update_rp_values(struct xe_guc_pc * pc)408 static void pc_update_rp_values(struct xe_guc_pc *pc)
409 {
410 	struct xe_gt *gt = pc_to_gt(pc);
411 	struct xe_device *xe = gt_to_xe(gt);
412 
413 	if (GRAPHICS_VERx100(xe) >= 1270) {
414 		mtl_update_rpa_value(pc);
415 		mtl_update_rpe_value(pc);
416 	} else {
417 		tgl_update_rpa_value(pc);
418 		tgl_update_rpe_value(pc);
419 	}
420 
421 	/*
422 	 * RPe is decided at runtime by PCODE. In the rare case where that's
423 	 * smaller than the fused min, we will trust the PCODE and use that
424 	 * as our minimum one.
425 	 */
426 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
427 }
428 
429 /**
430  * xe_guc_pc_get_act_freq - Get Actual running frequency
431  * @pc: The GuC PC
432  *
433  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
434  */
xe_guc_pc_get_act_freq(struct xe_guc_pc * pc)435 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
436 {
437 	struct xe_gt *gt = pc_to_gt(pc);
438 	struct xe_device *xe = gt_to_xe(gt);
439 	u32 freq;
440 
441 	/* When in RC6, actual frequency reported will be 0. */
442 	if (GRAPHICS_VERx100(xe) >= 1270) {
443 		freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
444 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
445 	} else {
446 		freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
447 		freq = REG_FIELD_GET(CAGF_MASK, freq);
448 	}
449 
450 	freq = decode_freq(freq);
451 
452 	return freq;
453 }
454 
get_cur_freq(struct xe_gt * gt)455 static u32 get_cur_freq(struct xe_gt *gt)
456 {
457 	u32 freq;
458 
459 	freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
460 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
461 	return decode_freq(freq);
462 }
463 
464 /**
465  * xe_guc_pc_get_cur_freq - Get Current requested frequency
466  * @pc: The GuC PC
467  * @freq: A pointer to a u32 where the freq value will be returned
468  *
469  * Returns: 0 on success,
470  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
471  */
xe_guc_pc_get_cur_freq(struct xe_guc_pc * pc,u32 * freq)472 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
473 {
474 	struct xe_gt *gt = pc_to_gt(pc);
475 	unsigned int fw_ref;
476 
477 	/*
478 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
479 	 * Block RC6 for a more reliable read.
480 	 */
481 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
482 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
483 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
484 		return -ETIMEDOUT;
485 	}
486 
487 	*freq = get_cur_freq(gt);
488 
489 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
490 	return 0;
491 }
492 
493 /**
494  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
495  * @pc: The GuC PC
496  *
497  * Returns: RP0 freq.
498  */
xe_guc_pc_get_rp0_freq(struct xe_guc_pc * pc)499 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
500 {
501 	return pc->rp0_freq;
502 }
503 
504 /**
505  * xe_guc_pc_get_rpa_freq - Get the RPa freq
506  * @pc: The GuC PC
507  *
508  * Returns: RPa freq.
509  */
xe_guc_pc_get_rpa_freq(struct xe_guc_pc * pc)510 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
511 {
512 	pc_update_rp_values(pc);
513 
514 	return pc->rpa_freq;
515 }
516 
517 /**
518  * xe_guc_pc_get_rpe_freq - Get the RPe freq
519  * @pc: The GuC PC
520  *
521  * Returns: RPe freq.
522  */
xe_guc_pc_get_rpe_freq(struct xe_guc_pc * pc)523 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
524 {
525 	pc_update_rp_values(pc);
526 
527 	return pc->rpe_freq;
528 }
529 
530 /**
531  * xe_guc_pc_get_rpn_freq - Get the RPn freq
532  * @pc: The GuC PC
533  *
534  * Returns: RPn freq.
535  */
xe_guc_pc_get_rpn_freq(struct xe_guc_pc * pc)536 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
537 {
538 	return pc->rpn_freq;
539 }
540 
541 /**
542  * xe_guc_pc_get_min_freq - Get the min operational frequency
543  * @pc: The GuC PC
544  * @freq: A pointer to a u32 where the freq value will be returned
545  *
546  * Returns: 0 on success,
547  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
548  */
xe_guc_pc_get_min_freq(struct xe_guc_pc * pc,u32 * freq)549 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
550 {
551 	int ret;
552 
553 	xe_device_assert_mem_access(pc_to_xe(pc));
554 
555 	mutex_lock(&pc->freq_lock);
556 	if (!pc->freq_ready) {
557 		/* Might be in the middle of a gt reset */
558 		ret = -EAGAIN;
559 		goto out;
560 	}
561 
562 	ret = pc_action_query_task_state(pc);
563 	if (ret)
564 		goto out;
565 
566 	*freq = pc_get_min_freq(pc);
567 
568 out:
569 	mutex_unlock(&pc->freq_lock);
570 	return ret;
571 }
572 
573 /**
574  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
575  * @pc: The GuC PC
576  * @freq: The selected minimal frequency
577  *
578  * Returns: 0 on success,
579  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
580  *         -EINVAL if value out of bounds.
581  */
xe_guc_pc_set_min_freq(struct xe_guc_pc * pc,u32 freq)582 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
583 {
584 	int ret;
585 
586 	mutex_lock(&pc->freq_lock);
587 	if (!pc->freq_ready) {
588 		/* Might be in the middle of a gt reset */
589 		ret = -EAGAIN;
590 		goto out;
591 	}
592 
593 	ret = pc_set_min_freq(pc, freq);
594 	if (ret)
595 		goto out;
596 
597 	pc->user_requested_min = freq;
598 
599 out:
600 	mutex_unlock(&pc->freq_lock);
601 	return ret;
602 }
603 
604 /**
605  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
606  * @pc: The GuC PC
607  * @freq: A pointer to a u32 where the freq value will be returned
608  *
609  * Returns: 0 on success,
610  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
611  */
xe_guc_pc_get_max_freq(struct xe_guc_pc * pc,u32 * freq)612 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
613 {
614 	int ret;
615 
616 	mutex_lock(&pc->freq_lock);
617 	if (!pc->freq_ready) {
618 		/* Might be in the middle of a gt reset */
619 		ret = -EAGAIN;
620 		goto out;
621 	}
622 
623 	ret = pc_action_query_task_state(pc);
624 	if (ret)
625 		goto out;
626 
627 	*freq = pc_get_max_freq(pc);
628 
629 out:
630 	mutex_unlock(&pc->freq_lock);
631 	return ret;
632 }
633 
634 /**
635  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
636  * @pc: The GuC PC
637  * @freq: The selected maximum frequency value
638  *
639  * Returns: 0 on success,
640  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
641  *         -EINVAL if value out of bounds.
642  */
xe_guc_pc_set_max_freq(struct xe_guc_pc * pc,u32 freq)643 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
644 {
645 	int ret;
646 
647 	mutex_lock(&pc->freq_lock);
648 	if (!pc->freq_ready) {
649 		/* Might be in the middle of a gt reset */
650 		ret = -EAGAIN;
651 		goto out;
652 	}
653 
654 	ret = pc_set_max_freq(pc, freq);
655 	if (ret)
656 		goto out;
657 
658 	pc->user_requested_max = freq;
659 
660 out:
661 	mutex_unlock(&pc->freq_lock);
662 	return ret;
663 }
664 
665 /**
666  * xe_guc_pc_c_status - get the current GT C state
667  * @pc: XE_GuC_PC instance
668  */
xe_guc_pc_c_status(struct xe_guc_pc * pc)669 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
670 {
671 	struct xe_gt *gt = pc_to_gt(pc);
672 	u32 reg, gt_c_state;
673 
674 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
675 		reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
676 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
677 	} else {
678 		reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
679 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
680 	}
681 
682 	switch (gt_c_state) {
683 	case GT_C6:
684 		return GT_IDLE_C6;
685 	case GT_C0:
686 		return GT_IDLE_C0;
687 	default:
688 		return GT_IDLE_UNKNOWN;
689 	}
690 }
691 
692 /**
693  * xe_guc_pc_rc6_residency - rc6 residency counter
694  * @pc: Xe_GuC_PC instance
695  */
xe_guc_pc_rc6_residency(struct xe_guc_pc * pc)696 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
697 {
698 	struct xe_gt *gt = pc_to_gt(pc);
699 	u32 reg;
700 
701 	reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
702 
703 	return reg;
704 }
705 
706 /**
707  * xe_guc_pc_mc6_residency - mc6 residency counter
708  * @pc: Xe_GuC_PC instance
709  */
xe_guc_pc_mc6_residency(struct xe_guc_pc * pc)710 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
711 {
712 	struct xe_gt *gt = pc_to_gt(pc);
713 	u64 reg;
714 
715 	reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
716 
717 	return reg;
718 }
719 
mtl_init_fused_rp_values(struct xe_guc_pc * pc)720 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
721 {
722 	struct xe_gt *gt = pc_to_gt(pc);
723 	u32 reg;
724 
725 	xe_device_assert_mem_access(pc_to_xe(pc));
726 
727 	if (xe_gt_is_media_type(gt))
728 		reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
729 	else
730 		reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
731 
732 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
733 
734 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
735 }
736 
tgl_init_fused_rp_values(struct xe_guc_pc * pc)737 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
738 {
739 	struct xe_gt *gt = pc_to_gt(pc);
740 	struct xe_device *xe = gt_to_xe(gt);
741 	u32 reg;
742 
743 	xe_device_assert_mem_access(pc_to_xe(pc));
744 
745 	if (xe->info.platform == XE_PVC)
746 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
747 	else
748 		reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
749 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
750 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
751 }
752 
pc_init_fused_rp_values(struct xe_guc_pc * pc)753 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
754 {
755 	struct xe_gt *gt = pc_to_gt(pc);
756 	struct xe_device *xe = gt_to_xe(gt);
757 
758 	if (GRAPHICS_VERx100(xe) >= 1270)
759 		mtl_init_fused_rp_values(pc);
760 	else
761 		tgl_init_fused_rp_values(pc);
762 }
763 
pc_max_freq_cap(struct xe_guc_pc * pc)764 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
765 {
766 	struct xe_gt *gt = pc_to_gt(pc);
767 
768 	if (XE_WA(gt, 22019338487)) {
769 		if (xe_gt_is_media_type(gt))
770 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
771 		else
772 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
773 	} else {
774 		return pc->rp0_freq;
775 	}
776 }
777 
778 /**
779  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
780  * frequency to allow faster GuC load times
781  * @pc: Xe_GuC_PC instance
782  */
xe_guc_pc_raise_unslice(struct xe_guc_pc * pc)783 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
784 {
785 	struct xe_gt *gt = pc_to_gt(pc);
786 
787 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
788 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
789 }
790 
791 /**
792  * xe_guc_pc_init_early - Initialize RPx values
793  * @pc: Xe_GuC_PC instance
794  */
xe_guc_pc_init_early(struct xe_guc_pc * pc)795 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
796 {
797 	struct xe_gt *gt = pc_to_gt(pc);
798 
799 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
800 	pc_init_fused_rp_values(pc);
801 }
802 
pc_adjust_freq_bounds(struct xe_guc_pc * pc)803 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
804 {
805 	int ret;
806 
807 	lockdep_assert_held(&pc->freq_lock);
808 
809 	ret = pc_action_query_task_state(pc);
810 	if (ret)
811 		goto out;
812 
813 	/*
814 	 * GuC defaults to some RPmax that is not actually achievable without
815 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
816 	 * regular maximum
817 	 */
818 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
819 		ret = pc_set_max_freq(pc, pc->rp0_freq);
820 		if (ret)
821 			goto out;
822 	}
823 
824 	/*
825 	 * Same thing happens for Server platforms where min is listed as
826 	 * RPMax
827 	 */
828 	if (pc_get_min_freq(pc) > pc->rp0_freq)
829 		ret = pc_set_min_freq(pc, pc->rp0_freq);
830 
831 out:
832 	return ret;
833 }
834 
pc_adjust_requested_freq(struct xe_guc_pc * pc)835 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
836 {
837 	int ret = 0;
838 
839 	lockdep_assert_held(&pc->freq_lock);
840 
841 	if (pc->user_requested_min != 0) {
842 		ret = pc_set_min_freq(pc, pc->user_requested_min);
843 		if (ret)
844 			return ret;
845 	}
846 
847 	if (pc->user_requested_max != 0) {
848 		ret = pc_set_max_freq(pc, pc->user_requested_max);
849 		if (ret)
850 			return ret;
851 	}
852 
853 	return ret;
854 }
855 
pc_set_mert_freq_cap(struct xe_guc_pc * pc)856 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
857 {
858 	int ret = 0;
859 
860 	if (XE_WA(pc_to_gt(pc), 22019338487)) {
861 		/*
862 		 * Get updated min/max and stash them.
863 		 */
864 		ret = xe_guc_pc_get_min_freq(pc, &pc->stashed_min_freq);
865 		if (!ret)
866 			ret = xe_guc_pc_get_max_freq(pc, &pc->stashed_max_freq);
867 		if (ret)
868 			return ret;
869 
870 		/*
871 		 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
872 		 */
873 		mutex_lock(&pc->freq_lock);
874 		ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
875 		if (!ret)
876 			ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
877 		mutex_unlock(&pc->freq_lock);
878 	}
879 
880 	return ret;
881 }
882 
883 /**
884  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
885  * @pc: The GuC PC
886  *
887  * Returns: 0 on success,
888  *          error code on failure
889  */
xe_guc_pc_restore_stashed_freq(struct xe_guc_pc * pc)890 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
891 {
892 	int ret = 0;
893 
894 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
895 		return 0;
896 
897 	mutex_lock(&pc->freq_lock);
898 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
899 	if (!ret)
900 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
901 	mutex_unlock(&pc->freq_lock);
902 
903 	return ret;
904 }
905 
906 /**
907  * xe_guc_pc_gucrc_disable - Disable GuC RC
908  * @pc: Xe_GuC_PC instance
909  *
910  * Disables GuC RC by taking control of RC6 back from GuC.
911  *
912  * Return: 0 on success, negative error code on error.
913  */
xe_guc_pc_gucrc_disable(struct xe_guc_pc * pc)914 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
915 {
916 	struct xe_device *xe = pc_to_xe(pc);
917 	struct xe_gt *gt = pc_to_gt(pc);
918 	unsigned int fw_ref;
919 	int ret = 0;
920 
921 	if (xe->info.skip_guc_pc)
922 		return 0;
923 
924 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
925 	if (ret)
926 		return ret;
927 
928 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
929 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
930 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
931 		return -ETIMEDOUT;
932 	}
933 
934 	xe_gt_idle_disable_c6(gt);
935 
936 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
937 
938 	return 0;
939 }
940 
941 /**
942  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
943  * @pc: Xe_GuC_PC instance
944  * @mode: new value of the mode.
945  *
946  * Return: 0 on success, negative error code on error
947  */
xe_guc_pc_override_gucrc_mode(struct xe_guc_pc * pc,enum slpc_gucrc_mode mode)948 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
949 {
950 	int ret;
951 
952 	xe_pm_runtime_get(pc_to_xe(pc));
953 	ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
954 	xe_pm_runtime_put(pc_to_xe(pc));
955 
956 	return ret;
957 }
958 
959 /**
960  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
961  * @pc: Xe_GuC_PC instance
962  *
963  * Return: 0 on success, negative error code on error
964  */
xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc * pc)965 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
966 {
967 	int ret;
968 
969 	xe_pm_runtime_get(pc_to_xe(pc));
970 	ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
971 	xe_pm_runtime_put(pc_to_xe(pc));
972 
973 	return ret;
974 }
975 
pc_init_pcode_freq(struct xe_guc_pc * pc)976 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
977 {
978 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
979 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
980 
981 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
982 }
983 
pc_init_freqs(struct xe_guc_pc * pc)984 static int pc_init_freqs(struct xe_guc_pc *pc)
985 {
986 	int ret;
987 
988 	mutex_lock(&pc->freq_lock);
989 
990 	ret = pc_adjust_freq_bounds(pc);
991 	if (ret)
992 		goto out;
993 
994 	ret = pc_adjust_requested_freq(pc);
995 	if (ret)
996 		goto out;
997 
998 	pc_update_rp_values(pc);
999 
1000 	pc_init_pcode_freq(pc);
1001 
1002 	/*
1003 	 * The frequencies are really ready for use only after the user
1004 	 * requested ones got restored.
1005 	 */
1006 	pc->freq_ready = true;
1007 
1008 out:
1009 	mutex_unlock(&pc->freq_lock);
1010 	return ret;
1011 }
1012 
pc_action_set_strategy(struct xe_guc_pc * pc,u32 val)1013 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
1014 {
1015 	int ret = 0;
1016 
1017 	ret = pc_action_set_param(pc,
1018 				  SLPC_PARAM_STRATEGIES,
1019 				  val);
1020 
1021 	return ret;
1022 }
1023 
1024 /**
1025  * xe_guc_pc_start - Start GuC's Power Conservation component
1026  * @pc: Xe_GuC_PC instance
1027  */
xe_guc_pc_start(struct xe_guc_pc * pc)1028 int xe_guc_pc_start(struct xe_guc_pc *pc)
1029 {
1030 	struct xe_device *xe = pc_to_xe(pc);
1031 	struct xe_gt *gt = pc_to_gt(pc);
1032 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1033 	unsigned int fw_ref;
1034 	ktime_t earlier;
1035 	int ret;
1036 
1037 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1038 
1039 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
1040 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
1041 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
1042 		return -ETIMEDOUT;
1043 	}
1044 
1045 	if (xe->info.skip_guc_pc) {
1046 		if (xe->info.platform != XE_PVC)
1047 			xe_gt_idle_enable_c6(gt);
1048 
1049 		/* Request max possible since dynamic freq mgmt is not enabled */
1050 		pc_set_cur_freq(pc, UINT_MAX);
1051 
1052 		ret = 0;
1053 		goto out;
1054 	}
1055 
1056 	memset(pc->bo->vmap.vaddr, 0, size);
1057 	slpc_shared_data_write(pc, header.size, size);
1058 
1059 	earlier = ktime_get();
1060 	ret = pc_action_reset(pc);
1061 	if (ret)
1062 		goto out;
1063 
1064 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1065 			      SLPC_RESET_TIMEOUT_MS)) {
1066 		xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
1067 			   xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
1068 			   xe_gt_throttle_get_limit_reasons(gt));
1069 
1070 		if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1071 				      SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
1072 			xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
1073 			ret = -EIO;
1074 			goto out;
1075 		}
1076 
1077 		xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
1078 			   ktime_ms_delta(ktime_get(), earlier));
1079 	}
1080 
1081 	ret = pc_init_freqs(pc);
1082 	if (ret)
1083 		goto out;
1084 
1085 	ret = pc_set_mert_freq_cap(pc);
1086 	if (ret)
1087 		goto out;
1088 
1089 	if (xe->info.platform == XE_PVC) {
1090 		xe_guc_pc_gucrc_disable(pc);
1091 		ret = 0;
1092 		goto out;
1093 	}
1094 
1095 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1096 	if (ret)
1097 		goto out;
1098 
1099 	/* Enable SLPC Optimized Strategy for compute */
1100 	ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
1101 
1102 out:
1103 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
1104 	return ret;
1105 }
1106 
1107 /**
1108  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1109  * @pc: Xe_GuC_PC instance
1110  */
xe_guc_pc_stop(struct xe_guc_pc * pc)1111 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1112 {
1113 	struct xe_device *xe = pc_to_xe(pc);
1114 
1115 	if (xe->info.skip_guc_pc) {
1116 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1117 		return 0;
1118 	}
1119 
1120 	mutex_lock(&pc->freq_lock);
1121 	pc->freq_ready = false;
1122 	mutex_unlock(&pc->freq_lock);
1123 
1124 	return 0;
1125 }
1126 
1127 /**
1128  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1129  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1130  */
xe_guc_pc_fini_hw(void * arg)1131 static void xe_guc_pc_fini_hw(void *arg)
1132 {
1133 	struct xe_guc_pc *pc = arg;
1134 	struct xe_device *xe = pc_to_xe(pc);
1135 	unsigned int fw_ref;
1136 
1137 	if (xe_device_wedged(xe))
1138 		return;
1139 
1140 	fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1141 	xe_guc_pc_gucrc_disable(pc);
1142 	XE_WARN_ON(xe_guc_pc_stop(pc));
1143 
1144 	/* Bind requested freq to mert_freq_cap before unload */
1145 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
1146 
1147 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
1148 }
1149 
1150 /**
1151  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1152  * @pc: Xe_GuC_PC instance
1153  */
xe_guc_pc_init(struct xe_guc_pc * pc)1154 int xe_guc_pc_init(struct xe_guc_pc *pc)
1155 {
1156 	struct xe_gt *gt = pc_to_gt(pc);
1157 	struct xe_tile *tile = gt_to_tile(gt);
1158 	struct xe_device *xe = gt_to_xe(gt);
1159 	struct xe_bo *bo;
1160 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1161 	int err;
1162 
1163 	if (xe->info.skip_guc_pc)
1164 		return 0;
1165 
1166 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1167 	if (err)
1168 		return err;
1169 
1170 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1171 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1172 					  XE_BO_FLAG_GGTT |
1173 					  XE_BO_FLAG_GGTT_INVALIDATE);
1174 	if (IS_ERR(bo))
1175 		return PTR_ERR(bo);
1176 
1177 	pc->bo = bo;
1178 
1179 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1180 }
1181 
pc_get_state_string(struct xe_guc_pc * pc)1182 static const char *pc_get_state_string(struct xe_guc_pc *pc)
1183 {
1184 	switch (slpc_shared_data_read(pc, header.global_state)) {
1185 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
1186 		return "not running";
1187 	case SLPC_GLOBAL_STATE_INITIALIZING:
1188 		return "initializing";
1189 	case SLPC_GLOBAL_STATE_RESETTING:
1190 		return "resetting";
1191 	case SLPC_GLOBAL_STATE_RUNNING:
1192 		return "running";
1193 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
1194 		return "shutting down";
1195 	case SLPC_GLOBAL_STATE_ERROR:
1196 		return "error";
1197 	default:
1198 		return "unknown";
1199 	}
1200 }
1201 
1202 /**
1203  * xe_guc_pc_print - Print GuC's Power Conservation information for debug
1204  * @pc: Xe_GuC_PC instance
1205  * @p: drm_printer
1206  */
xe_guc_pc_print(struct xe_guc_pc * pc,struct drm_printer * p)1207 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
1208 {
1209 	drm_printf(p, "SLPC Shared Data Header:\n");
1210 	drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
1211 	drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
1212 
1213 	if (pc_action_query_task_state(pc))
1214 		return;
1215 
1216 	drm_printf(p, "\nSLPC Tasks Status:\n");
1217 	drm_printf(p, "\tGTPERF enabled: %s\n",
1218 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1219 			      SLPC_GTPERF_TASK_ENABLED));
1220 	drm_printf(p, "\tDCC enabled: %s\n",
1221 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1222 			      SLPC_DCC_TASK_ENABLED));
1223 	drm_printf(p, "\tDCC in use: %s\n",
1224 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1225 			      SLPC_IN_DCC));
1226 	drm_printf(p, "\tBalancer enabled: %s\n",
1227 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1228 			      SLPC_BALANCER_ENABLED));
1229 	drm_printf(p, "\tIBC enabled: %s\n",
1230 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1231 			      SLPC_IBC_TASK_ENABLED));
1232 	drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
1233 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1234 			      SLPC_BALANCER_IA_LMT_ENABLED));
1235 	drm_printf(p, "\tBalancer IA LMT active: %s\n",
1236 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1237 			      SLPC_BALANCER_IA_LMT_ACTIVE));
1238 }
1239