xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pm.h"
7 
8 #include <linux/fault-inject.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/suspend.h>
11 
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_placement.h>
14 
15 #include "display/xe_display.h"
16 #include "xe_bo.h"
17 #include "xe_bo_evict.h"
18 #include "xe_device.h"
19 #include "xe_ggtt.h"
20 #include "xe_gt.h"
21 #include "xe_guc.h"
22 #include "xe_i2c.h"
23 #include "xe_irq.h"
24 #include "xe_pcode.h"
25 #include "xe_pxp.h"
26 #include "xe_trace.h"
27 #include "xe_wa.h"
28 
29 /**
30  * DOC: Xe Power Management
31  *
32  * Xe PM implements the main routines for both system level suspend states and
33  * for the opportunistic runtime suspend states.
34  *
35  * System Level Suspend (S-States) - In general this is OS initiated suspend
36  * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
37  * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
38  * are the main point for the suspend to and resume from these states.
39  *
40  * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
41  * state D3, controlled by the PCI subsystem and ACPI with the help from the
42  * runtime_pm infrastructure.
43  * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
44  * alive and quicker low latency resume or D3Cold where Vcc power is off for
45  * better power savings.
46  * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
47  * level, while the device driver can be behind multiple bridges/switches and
48  * paired with other devices. For this reason, the PCI subsystem cannot perform
49  * the transition towards D3Cold. The lowest runtime PM possible from the PCI
50  * subsystem is D3hot. Then, if all these paired devices in the same root port
51  * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
52  * to perform the transition from D3hot to D3cold. Xe may disallow this
53  * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
54  * suspend. It will be based on runtime conditions such as VRAM usage for a
55  * quick and low latency resume for instance.
56  *
57  * Runtime PM - This infrastructure provided by the Linux kernel allows the
58  * device drivers to indicate when the can be runtime suspended, so the device
59  * could be put at D3 (if supported), or allow deeper package sleep states
60  * (PC-states), and/or other low level power states. Xe PM component provides
61  * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
62  * subsystem will call before transition to/from runtime suspend.
63  *
64  * Also, Xe PM provides get and put functions that Xe driver will use to
65  * indicate activity. In order to avoid locking complications with the memory
66  * management, whenever possible, these get and put functions needs to be called
67  * from the higher/outer levels.
68  * The main cases that need to be protected from the outer levels are: IOCTL,
69  * sysfs, debugfs, dma-buf sharing, GPU execution.
70  *
71  * This component is not responsible for GT idleness (RC6) nor GT frequency
72  * management (RPS).
73  */
74 
75 #ifdef CONFIG_LOCKDEP
76 static struct lockdep_map xe_pm_runtime_d3cold_map = {
77 	.name = "xe_rpm_d3cold_map"
78 };
79 
80 static struct lockdep_map xe_pm_runtime_nod3cold_map = {
81 	.name = "xe_rpm_nod3cold_map"
82 };
83 #endif
84 
85 /**
86  * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
87  * @xe: The xe device.
88  *
89  * Return: true if it is safe to runtime resume from reclaim context.
90  * false otherwise.
91  */
xe_rpm_reclaim_safe(const struct xe_device * xe)92 bool xe_rpm_reclaim_safe(const struct xe_device *xe)
93 {
94 	return !xe->d3cold.capable;
95 }
96 
xe_rpm_lockmap_acquire(const struct xe_device * xe)97 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
98 {
99 	lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
100 			 &xe_pm_runtime_nod3cold_map :
101 			 &xe_pm_runtime_d3cold_map);
102 }
103 
xe_rpm_lockmap_release(const struct xe_device * xe)104 static void xe_rpm_lockmap_release(const struct xe_device *xe)
105 {
106 	lock_map_release(xe_rpm_reclaim_safe(xe) ?
107 			 &xe_pm_runtime_nod3cold_map :
108 			 &xe_pm_runtime_d3cold_map);
109 }
110 
111 /**
112  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
113  * @xe: xe device instance
114  *
115  * Return: 0 on success
116  */
xe_pm_suspend(struct xe_device * xe)117 int xe_pm_suspend(struct xe_device *xe)
118 {
119 	struct xe_gt *gt;
120 	u8 id;
121 	int err;
122 
123 	drm_dbg(&xe->drm, "Suspending device\n");
124 	trace_xe_pm_suspend(xe, __builtin_return_address(0));
125 
126 	err = xe_pxp_pm_suspend(xe->pxp);
127 	if (err)
128 		goto err;
129 
130 	for_each_gt(gt, xe, id)
131 		xe_gt_suspend_prepare(gt);
132 
133 	xe_display_pm_suspend(xe);
134 
135 	/* FIXME: Super racey... */
136 	err = xe_bo_evict_all(xe);
137 	if (err)
138 		goto err_display;
139 
140 	for_each_gt(gt, xe, id) {
141 		err = xe_gt_suspend(gt);
142 		if (err)
143 			goto err_display;
144 	}
145 
146 	xe_irq_suspend(xe);
147 
148 	xe_display_pm_suspend_late(xe);
149 
150 	xe_i2c_pm_suspend(xe);
151 
152 	drm_dbg(&xe->drm, "Device suspended\n");
153 	return 0;
154 
155 err_display:
156 	xe_display_pm_resume(xe);
157 	xe_pxp_pm_resume(xe->pxp);
158 err:
159 	drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
160 	return err;
161 }
162 
163 /**
164  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
165  * @xe: xe device instance
166  *
167  * Return: 0 on success
168  */
xe_pm_resume(struct xe_device * xe)169 int xe_pm_resume(struct xe_device *xe)
170 {
171 	struct xe_tile *tile;
172 	struct xe_gt *gt;
173 	u8 id;
174 	int err;
175 
176 	drm_dbg(&xe->drm, "Resuming device\n");
177 	trace_xe_pm_resume(xe, __builtin_return_address(0));
178 
179 	for_each_tile(tile, xe, id)
180 		xe_wa_apply_tile_workarounds(tile);
181 
182 	err = xe_pcode_ready(xe, true);
183 	if (err)
184 		return err;
185 
186 	xe_display_pm_resume_early(xe);
187 
188 	/*
189 	 * This only restores pinned memory which is the memory required for the
190 	 * GT(s) to resume.
191 	 */
192 	err = xe_bo_restore_early(xe);
193 	if (err)
194 		goto err;
195 
196 	xe_i2c_pm_resume(xe, xe->d3cold.allowed);
197 
198 	xe_irq_resume(xe);
199 
200 	for_each_gt(gt, xe, id)
201 		xe_gt_resume(gt);
202 
203 	xe_display_pm_resume(xe);
204 
205 	err = xe_bo_restore_late(xe);
206 	if (err)
207 		goto err;
208 
209 	xe_pxp_pm_resume(xe->pxp);
210 
211 	drm_dbg(&xe->drm, "Device resumed\n");
212 	return 0;
213 err:
214 	drm_dbg(&xe->drm, "Device resume failed %d\n", err);
215 	return err;
216 }
217 
xe_pm_pci_d3cold_capable(struct xe_device * xe)218 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
219 {
220 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
221 	struct pci_dev *root_pdev;
222 
223 	root_pdev = pcie_find_root_port(pdev);
224 	if (!root_pdev)
225 		return false;
226 
227 	/* D3Cold requires PME capability */
228 	if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
229 		drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
230 		return false;
231 	}
232 
233 	/* D3Cold requires _PR3 power resource */
234 	if (!pci_pr3_present(root_pdev)) {
235 		drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
236 		return false;
237 	}
238 
239 	return true;
240 }
241 
xe_pm_runtime_init(struct xe_device * xe)242 static void xe_pm_runtime_init(struct xe_device *xe)
243 {
244 	struct device *dev = xe->drm.dev;
245 
246 	/*
247 	 * Disable the system suspend direct complete optimization.
248 	 * We need to ensure that the regular device suspend/resume functions
249 	 * are called since our runtime_pm cannot guarantee local memory
250 	 * eviction for d3cold.
251 	 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
252 	 *       this option to integrated graphics as well.
253 	 */
254 	if (IS_DGFX(xe))
255 		dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
256 
257 	pm_runtime_use_autosuspend(dev);
258 	pm_runtime_set_autosuspend_delay(dev, 1000);
259 	pm_runtime_set_active(dev);
260 	pm_runtime_allow(dev);
261 	pm_runtime_mark_last_busy(dev);
262 	pm_runtime_put(dev);
263 }
264 
xe_pm_init_early(struct xe_device * xe)265 int xe_pm_init_early(struct xe_device *xe)
266 {
267 	int err;
268 
269 	INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
270 
271 	err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
272 	if (err)
273 		return err;
274 
275 	err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
276 	if (err)
277 		return err;
278 
279 	xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
280 	return 0;
281 }
282 ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
283 
vram_threshold_value(struct xe_device * xe)284 static u32 vram_threshold_value(struct xe_device *xe)
285 {
286 	/* FIXME: D3Cold temporarily disabled by default on BMG */
287 	if (xe->info.platform == XE_BATTLEMAGE)
288 		return 0;
289 
290 	return DEFAULT_VRAM_THRESHOLD;
291 }
292 
xe_pm_notifier_callback(struct notifier_block * nb,unsigned long action,void * data)293 static int xe_pm_notifier_callback(struct notifier_block *nb,
294 				   unsigned long action, void *data)
295 {
296 	struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
297 	int err = 0;
298 
299 	switch (action) {
300 	case PM_HIBERNATION_PREPARE:
301 	case PM_SUSPEND_PREPARE:
302 		xe_pm_runtime_get(xe);
303 		err = xe_bo_evict_all_user(xe);
304 		if (err) {
305 			drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
306 			xe_pm_runtime_put(xe);
307 			break;
308 		}
309 
310 		err = xe_bo_notifier_prepare_all_pinned(xe);
311 		if (err) {
312 			drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
313 			xe_pm_runtime_put(xe);
314 		}
315 		break;
316 	case PM_POST_HIBERNATION:
317 	case PM_POST_SUSPEND:
318 		xe_bo_notifier_unprepare_all_pinned(xe);
319 		xe_pm_runtime_put(xe);
320 		break;
321 	}
322 
323 	if (err)
324 		return NOTIFY_BAD;
325 
326 	return NOTIFY_DONE;
327 }
328 
329 /**
330  * xe_pm_init - Initialize Xe Power Management
331  * @xe: xe device instance
332  *
333  * This component is responsible for System and Device sleep states.
334  *
335  * Returns 0 for success, negative error code otherwise.
336  */
xe_pm_init(struct xe_device * xe)337 int xe_pm_init(struct xe_device *xe)
338 {
339 	u32 vram_threshold;
340 	int err;
341 
342 	xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
343 	err = register_pm_notifier(&xe->pm_notifier);
344 	if (err)
345 		return err;
346 
347 	/* For now suspend/resume is only allowed with GuC */
348 	if (!xe_device_uc_enabled(xe))
349 		return 0;
350 
351 	if (xe->d3cold.capable) {
352 		vram_threshold = vram_threshold_value(xe);
353 		err = xe_pm_set_vram_threshold(xe, vram_threshold);
354 		if (err)
355 			goto err_unregister;
356 	}
357 
358 	xe_pm_runtime_init(xe);
359 	return 0;
360 
361 err_unregister:
362 	unregister_pm_notifier(&xe->pm_notifier);
363 	return err;
364 }
365 
xe_pm_runtime_fini(struct xe_device * xe)366 static void xe_pm_runtime_fini(struct xe_device *xe)
367 {
368 	struct device *dev = xe->drm.dev;
369 
370 	pm_runtime_get_sync(dev);
371 	pm_runtime_forbid(dev);
372 }
373 
374 /**
375  * xe_pm_fini - Finalize PM
376  * @xe: xe device instance
377  */
xe_pm_fini(struct xe_device * xe)378 void xe_pm_fini(struct xe_device *xe)
379 {
380 	if (xe_device_uc_enabled(xe))
381 		xe_pm_runtime_fini(xe);
382 
383 	unregister_pm_notifier(&xe->pm_notifier);
384 }
385 
xe_pm_write_callback_task(struct xe_device * xe,struct task_struct * task)386 static void xe_pm_write_callback_task(struct xe_device *xe,
387 				      struct task_struct *task)
388 {
389 	WRITE_ONCE(xe->pm_callback_task, task);
390 
391 	/*
392 	 * Just in case it's somehow possible for our writes to be reordered to
393 	 * the extent that something else re-uses the task written in
394 	 * pm_callback_task. For example after returning from the callback, but
395 	 * before the reordered write that resets pm_callback_task back to NULL.
396 	 */
397 	smp_mb(); /* pairs with xe_pm_read_callback_task */
398 }
399 
xe_pm_read_callback_task(struct xe_device * xe)400 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
401 {
402 	smp_mb(); /* pairs with xe_pm_write_callback_task */
403 
404 	return READ_ONCE(xe->pm_callback_task);
405 }
406 
407 /**
408  * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
409  * @xe: xe device instance
410  *
411  * This does not provide any guarantee that the device is going to remain
412  * suspended as it might be racing with the runtime state transitions.
413  * It can be used only as a non-reliable assertion, to ensure that we are not in
414  * the sleep state while trying to access some memory for instance.
415  *
416  * Returns true if PCI device is suspended, false otherwise.
417  */
xe_pm_runtime_suspended(struct xe_device * xe)418 bool xe_pm_runtime_suspended(struct xe_device *xe)
419 {
420 	return pm_runtime_suspended(xe->drm.dev);
421 }
422 
423 /**
424  * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
425  * @xe: xe device instance
426  *
427  * Returns 0 for success, negative error code otherwise.
428  */
xe_pm_runtime_suspend(struct xe_device * xe)429 int xe_pm_runtime_suspend(struct xe_device *xe)
430 {
431 	struct xe_bo *bo, *on;
432 	struct xe_gt *gt;
433 	u8 id;
434 	int err = 0;
435 
436 	trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
437 	/* Disable access_ongoing asserts and prevent recursive pm calls */
438 	xe_pm_write_callback_task(xe, current);
439 
440 	/*
441 	 * The actual xe_pm_runtime_put() is always async underneath, so
442 	 * exactly where that is called should makes no difference to us. However
443 	 * we still need to be very careful with the locks that this callback
444 	 * acquires and the locks that are acquired and held by any callers of
445 	 * xe_runtime_pm_get(). We already have the matching annotation
446 	 * on that side, but we also need it here. For example lockdep should be
447 	 * able to tell us if the following scenario is in theory possible:
448 	 *
449 	 * CPU0                          | CPU1 (kworker)
450 	 * lock(A)                       |
451 	 *                               | xe_pm_runtime_suspend()
452 	 *                               |      lock(A)
453 	 * xe_pm_runtime_get()           |
454 	 *
455 	 * This will clearly deadlock since rpm core needs to wait for
456 	 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
457 	 * on CPU0 which prevents CPU1 making forward progress.  With the
458 	 * annotation here and in xe_pm_runtime_get() lockdep will see
459 	 * the potential lock inversion and give us a nice splat.
460 	 */
461 	xe_rpm_lockmap_acquire(xe);
462 
463 	err = xe_pxp_pm_suspend(xe->pxp);
464 	if (err)
465 		goto out;
466 
467 	/*
468 	 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
469 	 * also checks and deletes bo entry from user fault list.
470 	 */
471 	mutex_lock(&xe->mem_access.vram_userfault.lock);
472 	list_for_each_entry_safe(bo, on,
473 				 &xe->mem_access.vram_userfault.list, vram_userfault_link)
474 		xe_bo_runtime_pm_release_mmap_offset(bo);
475 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
476 
477 	xe_display_pm_runtime_suspend(xe);
478 
479 	if (xe->d3cold.allowed) {
480 		err = xe_bo_evict_all(xe);
481 		if (err)
482 			goto out_resume;
483 	}
484 
485 	for_each_gt(gt, xe, id) {
486 		err = xe_gt_suspend(gt);
487 		if (err)
488 			goto out_resume;
489 	}
490 
491 	xe_irq_suspend(xe);
492 
493 	xe_display_pm_runtime_suspend_late(xe);
494 
495 	xe_i2c_pm_suspend(xe);
496 
497 	xe_rpm_lockmap_release(xe);
498 	xe_pm_write_callback_task(xe, NULL);
499 	return 0;
500 
501 out_resume:
502 	xe_display_pm_runtime_resume(xe);
503 	xe_pxp_pm_resume(xe->pxp);
504 out:
505 	xe_rpm_lockmap_release(xe);
506 	xe_pm_write_callback_task(xe, NULL);
507 	return err;
508 }
509 
510 /**
511  * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
512  * @xe: xe device instance
513  *
514  * Returns 0 for success, negative error code otherwise.
515  */
xe_pm_runtime_resume(struct xe_device * xe)516 int xe_pm_runtime_resume(struct xe_device *xe)
517 {
518 	struct xe_gt *gt;
519 	u8 id;
520 	int err = 0;
521 
522 	trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
523 	/* Disable access_ongoing asserts and prevent recursive pm calls */
524 	xe_pm_write_callback_task(xe, current);
525 
526 	xe_rpm_lockmap_acquire(xe);
527 
528 	if (xe->d3cold.allowed) {
529 		err = xe_pcode_ready(xe, true);
530 		if (err)
531 			goto out;
532 
533 		xe_display_pm_resume_early(xe);
534 
535 		/*
536 		 * This only restores pinned memory which is the memory
537 		 * required for the GT(s) to resume.
538 		 */
539 		err = xe_bo_restore_early(xe);
540 		if (err)
541 			goto out;
542 	}
543 
544 	xe_i2c_pm_resume(xe, xe->d3cold.allowed);
545 
546 	xe_irq_resume(xe);
547 
548 	for_each_gt(gt, xe, id)
549 		xe_gt_resume(gt);
550 
551 	xe_display_pm_runtime_resume(xe);
552 
553 	if (xe->d3cold.allowed) {
554 		err = xe_bo_restore_late(xe);
555 		if (err)
556 			goto out;
557 	}
558 
559 	xe_pxp_pm_resume(xe->pxp);
560 
561 out:
562 	xe_rpm_lockmap_release(xe);
563 	xe_pm_write_callback_task(xe, NULL);
564 	return err;
565 }
566 
567 /*
568  * For places where resume is synchronous it can be quite easy to deadlock
569  * if we are not careful. Also in practice it might be quite timing
570  * sensitive to ever see the 0 -> 1 transition with the callers locks
571  * held, so deadlocks might exist but are hard for lockdep to ever see.
572  * With this in mind, help lockdep learn about the potentially scary
573  * stuff that can happen inside the runtime_resume callback by acquiring
574  * a dummy lock (it doesn't protect anything and gets compiled out on
575  * non-debug builds).  Lockdep then only needs to see the
576  * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
577  * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
578  * For example if the (callers_locks) are ever grabbed in the
579  * runtime_resume callback, lockdep should give us a nice splat.
580  */
xe_rpm_might_enter_cb(const struct xe_device * xe)581 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
582 {
583 	xe_rpm_lockmap_acquire(xe);
584 	xe_rpm_lockmap_release(xe);
585 }
586 
587 /*
588  * Prime the lockdep maps for known locking orders that need to
589  * be supported but that may not always occur on all systems.
590  */
xe_pm_runtime_lockdep_prime(void)591 static void xe_pm_runtime_lockdep_prime(void)
592 {
593 	struct dma_resv lockdep_resv;
594 
595 	dma_resv_init(&lockdep_resv);
596 	lock_map_acquire(&xe_pm_runtime_d3cold_map);
597 	/* D3Cold takes the dma_resv locks to evict bos */
598 	dma_resv_lock(&lockdep_resv, NULL);
599 	dma_resv_unlock(&lockdep_resv);
600 	lock_map_release(&xe_pm_runtime_d3cold_map);
601 
602 	/* Shrinkers might like to wake up the device under reclaim. */
603 	fs_reclaim_acquire(GFP_KERNEL);
604 	lock_map_acquire(&xe_pm_runtime_nod3cold_map);
605 	lock_map_release(&xe_pm_runtime_nod3cold_map);
606 	fs_reclaim_release(GFP_KERNEL);
607 }
608 
609 /**
610  * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
611  * @xe: xe device instance
612  */
xe_pm_runtime_get(struct xe_device * xe)613 void xe_pm_runtime_get(struct xe_device *xe)
614 {
615 	trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
616 	pm_runtime_get_noresume(xe->drm.dev);
617 
618 	if (xe_pm_read_callback_task(xe) == current)
619 		return;
620 
621 	xe_rpm_might_enter_cb(xe);
622 	pm_runtime_resume(xe->drm.dev);
623 }
624 
625 /**
626  * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
627  * @xe: xe device instance
628  */
xe_pm_runtime_put(struct xe_device * xe)629 void xe_pm_runtime_put(struct xe_device *xe)
630 {
631 	trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
632 	if (xe_pm_read_callback_task(xe) == current) {
633 		pm_runtime_put_noidle(xe->drm.dev);
634 	} else {
635 		pm_runtime_mark_last_busy(xe->drm.dev);
636 		pm_runtime_put(xe->drm.dev);
637 	}
638 }
639 
640 /**
641  * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
642  * @xe: xe device instance
643  *
644  * Returns: Any number greater than or equal to 0 for success, negative error
645  * code otherwise.
646  */
xe_pm_runtime_get_ioctl(struct xe_device * xe)647 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
648 {
649 	trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
650 	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
651 		return -ELOOP;
652 
653 	xe_rpm_might_enter_cb(xe);
654 	return pm_runtime_get_sync(xe->drm.dev);
655 }
656 
657 /**
658  * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
659  * @xe: xe device instance
660  *
661  * Return: True if device is awake (regardless the previous number of references)
662  * and a new reference was taken, false otherwise.
663  */
xe_pm_runtime_get_if_active(struct xe_device * xe)664 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
665 {
666 	return pm_runtime_get_if_active(xe->drm.dev) > 0;
667 }
668 
669 /**
670  * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
671  * @xe: xe device instance
672  *
673  * Return: True if device is awake, a previous reference had been already taken,
674  * and a new reference was now taken, false otherwise.
675  */
xe_pm_runtime_get_if_in_use(struct xe_device * xe)676 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
677 {
678 	if (xe_pm_read_callback_task(xe) == current) {
679 		/* The device is awake, grab the ref and move on */
680 		pm_runtime_get_noresume(xe->drm.dev);
681 		return true;
682 	}
683 
684 	return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
685 }
686 
687 /*
688  * Very unreliable! Should only be used to suppress the false positive case
689  * in the missing outer rpm protection warning.
690  */
xe_pm_suspending_or_resuming(struct xe_device * xe)691 static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
692 {
693 #ifdef CONFIG_PM
694 	struct device *dev = xe->drm.dev;
695 
696 	return dev->power.runtime_status == RPM_SUSPENDING ||
697 		dev->power.runtime_status == RPM_RESUMING ||
698 		pm_suspend_in_progress();
699 #else
700 	return false;
701 #endif
702 }
703 
704 /**
705  * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
706  * @xe: xe device instance
707  *
708  * This function should be used in inner places where it is surely already
709  * protected by outer-bound callers of `xe_pm_runtime_get`.
710  * It will warn if not protected.
711  * The reference should be put back after this function regardless, since it
712  * will always bump the usage counter, regardless.
713  */
xe_pm_runtime_get_noresume(struct xe_device * xe)714 void xe_pm_runtime_get_noresume(struct xe_device *xe)
715 {
716 	bool ref;
717 
718 	ref = xe_pm_runtime_get_if_in_use(xe);
719 
720 	if (!ref) {
721 		pm_runtime_get_noresume(xe->drm.dev);
722 		drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
723 			 "Missing outer runtime PM protection\n");
724 	}
725 }
726 
727 /**
728  * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
729  * @xe: xe device instance
730  *
731  * Returns: True if device is awake and the reference was taken, false otherwise.
732  */
xe_pm_runtime_resume_and_get(struct xe_device * xe)733 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
734 {
735 	if (xe_pm_read_callback_task(xe) == current) {
736 		/* The device is awake, grab the ref and move on */
737 		pm_runtime_get_noresume(xe->drm.dev);
738 		return true;
739 	}
740 
741 	xe_rpm_might_enter_cb(xe);
742 	return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
743 }
744 
745 /**
746  * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
747  * @xe: xe device instance
748  */
xe_pm_assert_unbounded_bridge(struct xe_device * xe)749 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
750 {
751 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
752 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
753 
754 	if (!bridge)
755 		return;
756 
757 	if (!bridge->driver) {
758 		drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
759 		device_set_pm_not_required(&pdev->dev);
760 	}
761 }
762 
763 /**
764  * xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold
765  * @xe: xe device instance
766  * @threshold: VRAM size in MiB for the D3cold threshold
767  *
768  * Return:
769  * * 0		- success
770  * * -EINVAL	- invalid argument
771  */
xe_pm_set_vram_threshold(struct xe_device * xe,u32 threshold)772 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
773 {
774 	struct ttm_resource_manager *man;
775 	u32 vram_total_mb = 0;
776 	int i;
777 
778 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
779 		man = ttm_manager_type(&xe->ttm, i);
780 		if (man)
781 			vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
782 	}
783 
784 	drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
785 
786 	if (threshold > vram_total_mb)
787 		return -EINVAL;
788 
789 	mutex_lock(&xe->d3cold.lock);
790 	xe->d3cold.vram_threshold = threshold;
791 	mutex_unlock(&xe->d3cold.lock);
792 
793 	return 0;
794 }
795 
796 /**
797  * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
798  * @xe: xe device instance
799  *
800  * To be called during runtime_pm idle callback.
801  * Check for all the D3Cold conditions ahead of runtime suspend.
802  */
xe_pm_d3cold_allowed_toggle(struct xe_device * xe)803 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
804 {
805 	struct ttm_resource_manager *man;
806 	u32 total_vram_used_mb = 0;
807 	u64 vram_used;
808 	int i;
809 
810 	if (!xe->d3cold.capable) {
811 		xe->d3cold.allowed = false;
812 		return;
813 	}
814 
815 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
816 		man = ttm_manager_type(&xe->ttm, i);
817 		if (man) {
818 			vram_used = ttm_resource_manager_usage(man);
819 			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
820 		}
821 	}
822 
823 	mutex_lock(&xe->d3cold.lock);
824 
825 	if (total_vram_used_mb < xe->d3cold.vram_threshold)
826 		xe->d3cold.allowed = true;
827 	else
828 		xe->d3cold.allowed = false;
829 
830 	mutex_unlock(&xe->d3cold.lock);
831 }
832 
833 /**
834  * xe_pm_module_init() - Perform xe_pm specific module initialization.
835  *
836  * Return: 0 on success. Currently doesn't fail.
837  */
xe_pm_module_init(void)838 int __init xe_pm_module_init(void)
839 {
840 	xe_pm_runtime_lockdep_prime();
841 	return 0;
842 }
843