1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <drm/drm_cache.h>
7 #include <linux/string_helpers.h>
8 
9 #include "i915_drv.h"
10 #include "i915_reg.h"
11 #include "intel_guc_slpc.h"
12 #include "intel_guc_print.h"
13 #include "intel_mchbar_regs.h"
14 #include "gt/intel_gt.h"
15 #include "gt/intel_gt_regs.h"
16 #include "gt/intel_rps.h"
17 
18 /**
19  * DOC: SLPC - Dynamic Frequency management
20  *
21  * Single Loop Power Control (SLPC) is a GuC algorithm that manages
22  * GT frequency based on busyness and how KMD initializes it. SLPC is
23  * almost completely in control after initialization except for a few
24  * scenarios mentioned below.
25  *
26  * KMD uses the concept of waitboost to ramp frequency to RP0 when there
27  * are pending submissions for a context. It achieves this by sending GuC a
28  * request to update the min frequency to RP0. Waitboost is disabled
29  * when the request retires.
30  *
31  * Another form of frequency control happens through per-context hints.
32  * A context can be marked as low latency during creation. That will ensure
33  * that SLPC uses an aggressive frequency ramp when that context is active.
34  *
35  * Power profiles add another level of control to these mechanisms.
36  * When power saving profile is chosen, SLPC will use conservative
37  * thresholds to ramp frequency, thus saving power. KMD will disable
38  * waitboosts as well, which achieves further power savings. Base profile
39  * is default and ensures balanced performance for any workload.
40  *
41  * Lastly, users have some level of control through sysfs, where min/max
42  * frequency values can be altered and the use of efficient freq
43  * can be toggled.
44  */
45 
slpc_to_guc(struct intel_guc_slpc * slpc)46 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
47 {
48 	return container_of(slpc, struct intel_guc, slpc);
49 }
50 
slpc_to_gt(struct intel_guc_slpc * slpc)51 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
52 {
53 	return guc_to_gt(slpc_to_guc(slpc));
54 }
55 
slpc_to_i915(struct intel_guc_slpc * slpc)56 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
57 {
58 	return slpc_to_gt(slpc)->i915;
59 }
60 
__detect_slpc_supported(struct intel_guc * guc)61 static bool __detect_slpc_supported(struct intel_guc *guc)
62 {
63 	/* GuC SLPC is unavailable for pre-Gen12 */
64 	return guc->submission_supported &&
65 		GRAPHICS_VER(guc_to_i915(guc)) >= 12;
66 }
67 
__guc_slpc_selected(struct intel_guc * guc)68 static bool __guc_slpc_selected(struct intel_guc *guc)
69 {
70 	if (!intel_guc_slpc_is_supported(guc))
71 		return false;
72 
73 	return guc->submission_selected;
74 }
75 
intel_guc_slpc_init_early(struct intel_guc_slpc * slpc)76 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
77 {
78 	struct intel_guc *guc = slpc_to_guc(slpc);
79 
80 	slpc->supported = __detect_slpc_supported(guc);
81 	slpc->selected = __guc_slpc_selected(guc);
82 }
83 
slpc_mem_set_param(struct slpc_shared_data * data,u32 id,u32 value)84 static void slpc_mem_set_param(struct slpc_shared_data *data,
85 			       u32 id, u32 value)
86 {
87 	GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS);
88 	/*
89 	 * When the flag bit is set, corresponding value will be read
90 	 * and applied by SLPC.
91 	 */
92 	data->override_params.bits[id >> 5] |= (1 << (id % 32));
93 	data->override_params.values[id] = value;
94 }
95 
slpc_mem_set_enabled(struct slpc_shared_data * data,u8 enable_id,u8 disable_id)96 static void slpc_mem_set_enabled(struct slpc_shared_data *data,
97 				 u8 enable_id, u8 disable_id)
98 {
99 	/*
100 	 * Enabling a param involves setting the enable_id
101 	 * to 1 and disable_id to 0.
102 	 */
103 	slpc_mem_set_param(data, enable_id, 1);
104 	slpc_mem_set_param(data, disable_id, 0);
105 }
106 
slpc_mem_set_disabled(struct slpc_shared_data * data,u8 enable_id,u8 disable_id)107 static void slpc_mem_set_disabled(struct slpc_shared_data *data,
108 				  u8 enable_id, u8 disable_id)
109 {
110 	/*
111 	 * Disabling a param involves setting the enable_id
112 	 * to 0 and disable_id to 1.
113 	 */
114 	slpc_mem_set_param(data, disable_id, 1);
115 	slpc_mem_set_param(data, enable_id, 0);
116 }
117 
slpc_get_state(struct intel_guc_slpc * slpc)118 static u32 slpc_get_state(struct intel_guc_slpc *slpc)
119 {
120 	struct slpc_shared_data *data;
121 
122 	GEM_BUG_ON(!slpc->vma);
123 
124 	drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
125 	data = slpc->vaddr;
126 
127 	return data->header.global_state;
128 }
129 
guc_action_slpc_set_param_nb(struct intel_guc * guc,u8 id,u32 value)130 static int guc_action_slpc_set_param_nb(struct intel_guc *guc, u8 id, u32 value)
131 {
132 	u32 request[] = {
133 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
134 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
135 		id,
136 		value,
137 	};
138 	int ret;
139 
140 	ret = intel_guc_send_nb(guc, request, ARRAY_SIZE(request), 0);
141 
142 	return ret > 0 ? -EPROTO : ret;
143 }
144 
slpc_set_param_nb(struct intel_guc_slpc * slpc,u8 id,u32 value)145 static int slpc_set_param_nb(struct intel_guc_slpc *slpc, u8 id, u32 value)
146 {
147 	struct intel_guc *guc = slpc_to_guc(slpc);
148 
149 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
150 
151 	return guc_action_slpc_set_param_nb(guc, id, value);
152 }
153 
guc_action_slpc_set_param(struct intel_guc * guc,u8 id,u32 value)154 static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
155 {
156 	u32 request[] = {
157 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
158 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
159 		id,
160 		value,
161 	};
162 	int ret;
163 
164 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
165 
166 	return ret > 0 ? -EPROTO : ret;
167 }
168 
slpc_is_running(struct intel_guc_slpc * slpc)169 static bool slpc_is_running(struct intel_guc_slpc *slpc)
170 {
171 	return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
172 }
173 
guc_action_slpc_query(struct intel_guc * guc,u32 offset)174 static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
175 {
176 	u32 request[] = {
177 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
178 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
179 		offset,
180 		0,
181 	};
182 	int ret;
183 
184 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
185 
186 	return ret > 0 ? -EPROTO : ret;
187 }
188 
slpc_query_task_state(struct intel_guc_slpc * slpc)189 static int slpc_query_task_state(struct intel_guc_slpc *slpc)
190 {
191 	struct intel_guc *guc = slpc_to_guc(slpc);
192 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
193 	int ret;
194 
195 	ret = guc_action_slpc_query(guc, offset);
196 	if (unlikely(ret))
197 		guc_probe_error(guc, "Failed to query task state: %pe\n", ERR_PTR(ret));
198 
199 	drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
200 
201 	return ret;
202 }
203 
slpc_set_param(struct intel_guc_slpc * slpc,u8 id,u32 value)204 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
205 {
206 	struct intel_guc *guc = slpc_to_guc(slpc);
207 	int ret;
208 
209 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
210 
211 	ret = guc_action_slpc_set_param(guc, id, value);
212 	if (ret)
213 		guc_probe_error(guc, "Failed to set param %d to %u: %pe\n",
214 				id, value, ERR_PTR(ret));
215 
216 	return ret;
217 }
218 
slpc_force_min_freq(struct intel_guc_slpc * slpc,u32 freq)219 static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
220 {
221 	struct intel_guc *guc = slpc_to_guc(slpc);
222 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
223 	intel_wakeref_t wakeref;
224 	int ret = 0;
225 
226 	lockdep_assert_held(&slpc->lock);
227 
228 	if (!intel_guc_is_ready(guc))
229 		return -ENODEV;
230 
231 	/*
232 	 * This function is a little different as compared to
233 	 * intel_guc_slpc_set_min_freq(). Softlimit will not be updated
234 	 * here since this is used to temporarily change min freq,
235 	 * for example, during a waitboost. Caller is responsible for
236 	 * checking bounds.
237 	 */
238 
239 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
240 		/* Non-blocking request will avoid stalls */
241 		ret = slpc_set_param_nb(slpc,
242 					SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
243 					freq);
244 		if (ret)
245 			guc_notice(guc, "Failed to send set_param for min freq(%d): %pe\n",
246 				   freq, ERR_PTR(ret));
247 	}
248 
249 	return ret;
250 }
251 
slpc_boost_work(struct work_struct * work)252 static void slpc_boost_work(struct work_struct *work)
253 {
254 	struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
255 	int err;
256 
257 	/*
258 	 * Raise min freq to boost. It's possible that
259 	 * this is greater than current max. But it will
260 	 * certainly be limited by RP0. An error setting
261 	 * the min param is not fatal.
262 	 */
263 	mutex_lock(&slpc->lock);
264 	if (atomic_read(&slpc->num_waiters)) {
265 		err = slpc_force_min_freq(slpc, slpc->boost_freq);
266 		if (!err)
267 			slpc->num_boosts++;
268 	}
269 	mutex_unlock(&slpc->lock);
270 }
271 
intel_guc_slpc_init(struct intel_guc_slpc * slpc)272 int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
273 {
274 	struct intel_guc *guc = slpc_to_guc(slpc);
275 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
276 	int err;
277 
278 	GEM_BUG_ON(slpc->vma);
279 
280 	err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
281 	if (unlikely(err)) {
282 		guc_probe_error(guc, "Failed to allocate SLPC struct: %pe\n", ERR_PTR(err));
283 		return err;
284 	}
285 
286 	slpc->max_freq_softlimit = 0;
287 	slpc->min_freq_softlimit = 0;
288 	slpc->ignore_eff_freq = false;
289 	slpc->min_is_rpmax = false;
290 
291 	slpc->boost_freq = 0;
292 	atomic_set(&slpc->num_waiters, 0);
293 	slpc->num_boosts = 0;
294 	slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
295 
296 	slpc->power_profile = SLPC_POWER_PROFILES_BASE;
297 
298 	mutex_init(&slpc->lock);
299 	INIT_WORK(&slpc->boost_work, slpc_boost_work);
300 
301 	return err;
302 }
303 
slpc_global_state_to_string(enum slpc_global_state state)304 static const char *slpc_global_state_to_string(enum slpc_global_state state)
305 {
306 	switch (state) {
307 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
308 		return "not running";
309 	case SLPC_GLOBAL_STATE_INITIALIZING:
310 		return "initializing";
311 	case SLPC_GLOBAL_STATE_RESETTING:
312 		return "resetting";
313 	case SLPC_GLOBAL_STATE_RUNNING:
314 		return "running";
315 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
316 		return "shutting down";
317 	case SLPC_GLOBAL_STATE_ERROR:
318 		return "error";
319 	default:
320 		return "unknown";
321 	}
322 }
323 
slpc_get_state_string(struct intel_guc_slpc * slpc)324 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
325 {
326 	return slpc_global_state_to_string(slpc_get_state(slpc));
327 }
328 
guc_action_slpc_reset(struct intel_guc * guc,u32 offset)329 static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
330 {
331 	u32 request[] = {
332 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
333 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
334 		offset,
335 		0,
336 	};
337 	int ret;
338 
339 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
340 
341 	return ret > 0 ? -EPROTO : ret;
342 }
343 
slpc_reset(struct intel_guc_slpc * slpc)344 static int slpc_reset(struct intel_guc_slpc *slpc)
345 {
346 	struct intel_guc *guc = slpc_to_guc(slpc);
347 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
348 	int ret;
349 
350 	ret = guc_action_slpc_reset(guc, offset);
351 
352 	if (unlikely(ret < 0)) {
353 		guc_probe_error(guc, "SLPC reset action failed: %pe\n", ERR_PTR(ret));
354 		return ret;
355 	}
356 
357 	if (!ret) {
358 		if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
359 			guc_probe_error(guc, "SLPC not enabled! State = %s\n",
360 					slpc_get_state_string(slpc));
361 			return -EIO;
362 		}
363 	}
364 
365 	return 0;
366 }
367 
slpc_decode_min_freq(struct intel_guc_slpc * slpc)368 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
369 {
370 	struct slpc_shared_data *data = slpc->vaddr;
371 
372 	GEM_BUG_ON(!slpc->vma);
373 
374 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
375 				  data->task_state_data.freq) *
376 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
377 }
378 
slpc_decode_max_freq(struct intel_guc_slpc * slpc)379 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
380 {
381 	struct slpc_shared_data *data = slpc->vaddr;
382 
383 	GEM_BUG_ON(!slpc->vma);
384 
385 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
386 				  data->task_state_data.freq) *
387 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
388 }
389 
slpc_shared_data_reset(struct intel_guc_slpc * slpc)390 static void slpc_shared_data_reset(struct intel_guc_slpc *slpc)
391 {
392 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
393 	struct slpc_shared_data *data = slpc->vaddr;
394 
395 	memset(data, 0, sizeof(struct slpc_shared_data));
396 	data->header.size = sizeof(struct slpc_shared_data);
397 
398 	/* Enable only GTPERF task, disable others */
399 	slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
400 			     SLPC_PARAM_TASK_DISABLE_GTPERF);
401 
402 	/*
403 	 * Don't allow balancer related algorithms on platforms before
404 	 * Xe_LPG, where GuC started to restrict it to TDP limited scenarios.
405 	 */
406 	if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)) {
407 		slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
408 				      SLPC_PARAM_TASK_DISABLE_BALANCER);
409 
410 		slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
411 				      SLPC_PARAM_TASK_DISABLE_DCC);
412 	}
413 }
414 
415 /**
416  * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC.
417  * @slpc: pointer to intel_guc_slpc.
418  * @val: frequency (MHz)
419  *
420  * This function will invoke GuC SLPC action to update the max frequency
421  * limit for unslice.
422  *
423  * Return: 0 on success, non-zero error code on failure.
424  */
intel_guc_slpc_set_max_freq(struct intel_guc_slpc * slpc,u32 val)425 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
426 {
427 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
428 	intel_wakeref_t wakeref;
429 	int ret;
430 
431 	if (val < slpc->min_freq ||
432 	    val > slpc->rp0_freq ||
433 	    val < slpc->min_freq_softlimit)
434 		return -EINVAL;
435 
436 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
437 		ret = slpc_set_param(slpc,
438 				     SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
439 				     val);
440 
441 		/* Return standardized err code for sysfs calls */
442 		if (ret)
443 			ret = -EIO;
444 	}
445 
446 	if (!ret)
447 		slpc->max_freq_softlimit = val;
448 
449 	return ret;
450 }
451 
452 /**
453  * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC.
454  * @slpc: pointer to intel_guc_slpc.
455  * @val: pointer to val which will hold max frequency (MHz)
456  *
457  * This function will invoke GuC SLPC action to read the max frequency
458  * limit for unslice.
459  *
460  * Return: 0 on success, non-zero error code on failure.
461  */
intel_guc_slpc_get_max_freq(struct intel_guc_slpc * slpc,u32 * val)462 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
463 {
464 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
465 	intel_wakeref_t wakeref;
466 	int ret = 0;
467 
468 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
469 		/* Force GuC to update task data */
470 		ret = slpc_query_task_state(slpc);
471 
472 		if (!ret)
473 			*val = slpc_decode_max_freq(slpc);
474 	}
475 
476 	return ret;
477 }
478 
intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc * slpc,bool val)479 int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
480 {
481 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
482 	intel_wakeref_t wakeref;
483 	int ret;
484 
485 	mutex_lock(&slpc->lock);
486 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
487 
488 	ret = slpc_set_param(slpc,
489 			     SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
490 			     val);
491 	if (ret) {
492 		guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
493 				val, ERR_PTR(ret));
494 	} else {
495 		slpc->ignore_eff_freq = val;
496 
497 		/* Set min to RPn when we disable efficient freq */
498 		if (val)
499 			ret = slpc_set_param(slpc,
500 					     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
501 					     slpc->min_freq);
502 	}
503 
504 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
505 	mutex_unlock(&slpc->lock);
506 	return ret;
507 }
508 
509 /**
510  * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
511  * @slpc: pointer to intel_guc_slpc.
512  * @val: frequency (MHz)
513  *
514  * This function will invoke GuC SLPC action to update the min unslice
515  * frequency.
516  *
517  * Return: 0 on success, non-zero error code on failure.
518  */
intel_guc_slpc_set_min_freq(struct intel_guc_slpc * slpc,u32 val)519 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
520 {
521 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
522 	intel_wakeref_t wakeref;
523 	int ret;
524 
525 	if (val < slpc->min_freq ||
526 	    val > slpc->rp0_freq ||
527 	    val > slpc->max_freq_softlimit)
528 		return -EINVAL;
529 
530 	/* Need a lock now since waitboost can be modifying min as well */
531 	mutex_lock(&slpc->lock);
532 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
533 
534 	ret = slpc_set_param(slpc,
535 			     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
536 			     val);
537 
538 	if (!ret)
539 		slpc->min_freq_softlimit = val;
540 
541 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
542 	mutex_unlock(&slpc->lock);
543 
544 	/* Return standardized err code for sysfs calls */
545 	if (ret)
546 		ret = -EIO;
547 
548 	return ret;
549 }
550 
551 /**
552  * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC.
553  * @slpc: pointer to intel_guc_slpc.
554  * @val: pointer to val which will hold min frequency (MHz)
555  *
556  * This function will invoke GuC SLPC action to read the min frequency
557  * limit for unslice.
558  *
559  * Return: 0 on success, non-zero error code on failure.
560  */
intel_guc_slpc_get_min_freq(struct intel_guc_slpc * slpc,u32 * val)561 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
562 {
563 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
564 	intel_wakeref_t wakeref;
565 	int ret = 0;
566 
567 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
568 		/* Force GuC to update task data */
569 		ret = slpc_query_task_state(slpc);
570 
571 		if (!ret)
572 			*val = slpc_decode_min_freq(slpc);
573 	}
574 
575 	return ret;
576 }
577 
intel_guc_slpc_set_strategy(struct intel_guc_slpc * slpc,u32 val)578 int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val)
579 {
580 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
581 	intel_wakeref_t wakeref;
582 	int ret = 0;
583 
584 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
585 		ret = slpc_set_param(slpc,
586 				     SLPC_PARAM_STRATEGIES,
587 				     val);
588 
589 	return ret;
590 }
591 
intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc * slpc,u32 val)592 int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
593 {
594 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
595 	intel_wakeref_t wakeref;
596 	int ret = 0;
597 
598 	if (!HAS_MEDIA_RATIO_MODE(i915))
599 		return -ENODEV;
600 
601 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
602 		ret = slpc_set_param(slpc,
603 				     SLPC_PARAM_MEDIA_FF_RATIO_MODE,
604 				     val);
605 	return ret;
606 }
607 
intel_guc_slpc_set_power_profile(struct intel_guc_slpc * slpc,u32 val)608 int intel_guc_slpc_set_power_profile(struct intel_guc_slpc *slpc, u32 val)
609 {
610 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
611 	intel_wakeref_t wakeref;
612 	int ret = 0;
613 
614 	if (val > SLPC_POWER_PROFILES_POWER_SAVING)
615 		return -EINVAL;
616 
617 	mutex_lock(&slpc->lock);
618 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
619 
620 	ret = slpc_set_param(slpc,
621 			     SLPC_PARAM_POWER_PROFILE,
622 			     val);
623 	if (ret)
624 		guc_err(slpc_to_guc(slpc),
625 			"Failed to set power profile to %d: %pe\n",
626 			 val, ERR_PTR(ret));
627 	else
628 		slpc->power_profile = val;
629 
630 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
631 	mutex_unlock(&slpc->lock);
632 
633 	return ret;
634 }
635 
intel_guc_pm_intrmsk_enable(struct intel_gt * gt)636 void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
637 {
638 	u32 pm_intrmsk_mbz = 0;
639 
640 	/*
641 	 * Allow GuC to receive ARAT timer expiry event.
642 	 * This interrupt register is setup by RPS code
643 	 * when host based Turbo is enabled.
644 	 */
645 	pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
646 
647 	intel_uncore_rmw(gt->uncore,
648 			 GEN6_PMINTRMSK, pm_intrmsk_mbz, 0);
649 }
650 
slpc_set_softlimits(struct intel_guc_slpc * slpc)651 static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
652 {
653 	int ret = 0;
654 
655 	/*
656 	 * Softlimits are initially equivalent to platform limits
657 	 * unless they have deviated from defaults, in which case,
658 	 * we retain the values and set min/max accordingly.
659 	 */
660 	if (!slpc->max_freq_softlimit) {
661 		slpc->max_freq_softlimit = slpc->rp0_freq;
662 		slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
663 	} else if (slpc->max_freq_softlimit != slpc->rp0_freq) {
664 		ret = intel_guc_slpc_set_max_freq(slpc,
665 						  slpc->max_freq_softlimit);
666 	}
667 
668 	if (unlikely(ret))
669 		return ret;
670 
671 	if (!slpc->min_freq_softlimit) {
672 		/* Min softlimit is initialized to RPn */
673 		slpc->min_freq_softlimit = slpc->min_freq;
674 		slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
675 	} else {
676 		return intel_guc_slpc_set_min_freq(slpc,
677 						   slpc->min_freq_softlimit);
678 	}
679 
680 	return 0;
681 }
682 
is_slpc_min_freq_rpmax(struct intel_guc_slpc * slpc)683 static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
684 {
685 	int slpc_min_freq;
686 	int ret;
687 
688 	ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
689 	if (ret) {
690 		guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret));
691 		return false;
692 	}
693 
694 	if (slpc_min_freq == SLPC_MAX_FREQ_MHZ)
695 		return true;
696 	else
697 		return false;
698 }
699 
update_server_min_softlimit(struct intel_guc_slpc * slpc)700 static void update_server_min_softlimit(struct intel_guc_slpc *slpc)
701 {
702 	/* For server parts, SLPC min will be at RPMax.
703 	 * Use min softlimit to clamp it to RP0 instead.
704 	 */
705 	if (!slpc->min_freq_softlimit &&
706 	    is_slpc_min_freq_rpmax(slpc)) {
707 		slpc->min_is_rpmax = true;
708 		slpc->min_freq_softlimit = slpc->rp0_freq;
709 		(slpc_to_gt(slpc))->defaults.min_freq = slpc->min_freq_softlimit;
710 	}
711 }
712 
slpc_use_fused_rp0(struct intel_guc_slpc * slpc)713 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
714 {
715 	/* Force SLPC to used platform rp0 */
716 	return slpc_set_param(slpc,
717 			      SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
718 			      slpc->rp0_freq);
719 }
720 
slpc_get_rp_values(struct intel_guc_slpc * slpc)721 static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
722 {
723 	struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
724 	struct intel_rps_freq_caps caps;
725 
726 	gen6_rps_get_freq_caps(rps, &caps);
727 	slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq);
728 	slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq);
729 	slpc->min_freq = intel_gpu_freq(rps, caps.min_freq);
730 
731 	if (!slpc->boost_freq)
732 		slpc->boost_freq = slpc->rp0_freq;
733 }
734 
735 /*
736  * intel_guc_slpc_enable() - Start SLPC
737  * @slpc: pointer to intel_guc_slpc.
738  *
739  * SLPC is enabled by setting up the shared data structure and
740  * sending reset event to GuC SLPC. Initial data is setup in
741  * intel_guc_slpc_init. Here we send the reset event. We do
742  * not currently need a slpc_disable since this is taken care
743  * of automatically when a reset/suspend occurs and the GuC
744  * CTB is destroyed.
745  *
746  * Return: 0 on success, non-zero error code on failure.
747  */
intel_guc_slpc_enable(struct intel_guc_slpc * slpc)748 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
749 {
750 	struct intel_guc *guc = slpc_to_guc(slpc);
751 	int ret;
752 
753 	GEM_BUG_ON(!slpc->vma);
754 
755 	slpc_shared_data_reset(slpc);
756 
757 	ret = slpc_reset(slpc);
758 	if (unlikely(ret < 0)) {
759 		guc_probe_error(guc, "SLPC Reset event returned: %pe\n", ERR_PTR(ret));
760 		return ret;
761 	}
762 
763 	ret = slpc_query_task_state(slpc);
764 	if (unlikely(ret < 0))
765 		return ret;
766 
767 	intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc));
768 
769 	slpc_get_rp_values(slpc);
770 
771 	/* Handle the case where min=max=RPmax */
772 	update_server_min_softlimit(slpc);
773 
774 	/* Set SLPC max limit to RP0 */
775 	ret = slpc_use_fused_rp0(slpc);
776 	if (unlikely(ret)) {
777 		guc_probe_error(guc, "Failed to set SLPC max to RP0: %pe\n", ERR_PTR(ret));
778 		return ret;
779 	}
780 
781 	/* Set cached value of ignore efficient freq */
782 	intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
783 
784 	/* Revert SLPC min/max to softlimits if necessary */
785 	ret = slpc_set_softlimits(slpc);
786 	if (unlikely(ret)) {
787 		guc_probe_error(guc, "Failed to set SLPC softlimits: %pe\n", ERR_PTR(ret));
788 		return ret;
789 	}
790 
791 	/* Set cached media freq ratio mode */
792 	intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
793 
794 	/* Enable SLPC Optimized Strategy for compute */
795 	intel_guc_slpc_set_strategy(slpc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
796 
797 	/* Set cached value of power_profile */
798 	ret = intel_guc_slpc_set_power_profile(slpc, slpc->power_profile);
799 	if (unlikely(ret)) {
800 		guc_probe_error(guc, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
801 		return ret;
802 	}
803 
804 	return 0;
805 }
806 
intel_guc_slpc_set_boost_freq(struct intel_guc_slpc * slpc,u32 val)807 int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
808 {
809 	int ret = 0;
810 
811 	if (val < slpc->min_freq || val > slpc->rp0_freq)
812 		return -EINVAL;
813 
814 	mutex_lock(&slpc->lock);
815 
816 	if (slpc->boost_freq != val) {
817 		/* Apply only if there are active waiters */
818 		if (atomic_read(&slpc->num_waiters)) {
819 			ret = slpc_force_min_freq(slpc, val);
820 			if (ret) {
821 				ret = -EIO;
822 				goto done;
823 			}
824 		}
825 
826 		slpc->boost_freq = val;
827 	}
828 
829 done:
830 	mutex_unlock(&slpc->lock);
831 	return ret;
832 }
833 
intel_guc_slpc_dec_waiters(struct intel_guc_slpc * slpc)834 void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
835 {
836 	/*
837 	 * Return min back to the softlimit.
838 	 * This is called during request retire,
839 	 * so we don't need to fail that if the
840 	 * set_param fails.
841 	 */
842 	mutex_lock(&slpc->lock);
843 	if (atomic_dec_and_test(&slpc->num_waiters))
844 		slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
845 	mutex_unlock(&slpc->lock);
846 }
847 
intel_guc_slpc_print_info(struct intel_guc_slpc * slpc,struct drm_printer * p)848 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
849 {
850 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
851 	struct slpc_shared_data *data = slpc->vaddr;
852 	struct slpc_task_state_data *slpc_tasks;
853 	intel_wakeref_t wakeref;
854 	int ret = 0;
855 
856 	GEM_BUG_ON(!slpc->vma);
857 
858 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
859 		ret = slpc_query_task_state(slpc);
860 
861 		if (!ret) {
862 			slpc_tasks = &data->task_state_data;
863 
864 			drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
865 			drm_printf(p, "\tGTPERF task active: %s\n",
866 				   str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
867 			drm_printf(p, "\tDCC enabled: %s\n",
868 				   str_yes_no(slpc_tasks->status &
869 					      SLPC_DCC_TASK_ENABLED));
870 			drm_printf(p, "\tDCC in: %s\n",
871 				   str_yes_no(slpc_tasks->status & SLPC_IN_DCC));
872 			drm_printf(p, "\tBalancer enabled: %s\n",
873 				   str_yes_no(slpc_tasks->status &
874 					      SLPC_BALANCER_ENABLED));
875 			drm_printf(p, "\tIBC enabled: %s\n",
876 				   str_yes_no(slpc_tasks->status &
877 					      SLPC_IBC_TASK_ENABLED));
878 			drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
879 				   str_yes_no(slpc_tasks->status &
880 					      SLPC_BALANCER_IA_LMT_ENABLED));
881 			drm_printf(p, "\tBalancer IA LMT active: %s\n",
882 				   str_yes_no(slpc_tasks->status &
883 					      SLPC_BALANCER_IA_LMT_ACTIVE));
884 			drm_printf(p, "\tMax freq: %u MHz\n",
885 				   slpc_decode_max_freq(slpc));
886 			drm_printf(p, "\tMin freq: %u MHz\n",
887 				   slpc_decode_min_freq(slpc));
888 			drm_printf(p, "\twaitboosts: %u\n",
889 				   slpc->num_boosts);
890 			drm_printf(p, "\tBoosts outstanding: %u\n",
891 				   atomic_read(&slpc->num_waiters));
892 		}
893 	}
894 
895 	return ret;
896 }
897 
intel_guc_slpc_fini(struct intel_guc_slpc * slpc)898 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
899 {
900 	if (!slpc->vma)
901 		return;
902 
903 	i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);
904 }
905