1 /*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29 #include <linux/pm_runtime.h>
30
31 #include <drm/drm_print.h>
32 #include <drm/intel/display_parent_interface.h>
33
34 #include "i915_drv.h"
35 #include "i915_trace.h"
36
37 /**
38 * DOC: runtime pm
39 *
40 * The i915 driver supports dynamic enabling and disabling of entire hardware
41 * blocks at runtime. This is especially important on the display side where
42 * software is supposed to control many power gates manually on recent hardware,
43 * since on the GT side a lot of the power management is done by the hardware.
44 * But even there some manual control at the device level is required.
45 *
46 * Since i915 supports a diverse set of platforms with a unified codebase and
47 * hardware engineers just love to shuffle functionality around between power
48 * domains there's a sizeable amount of indirection required. This file provides
49 * generic functions to the driver for grabbing and releasing references for
50 * abstract power domains. It then maps those to the actual power wells
51 * present for a given platform.
52 */
53
rpm_to_i915(struct intel_runtime_pm * rpm)54 static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
55 {
56 return container_of(rpm, struct drm_i915_private, runtime_pm);
57 }
58
59 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
60
init_intel_runtime_pm_wakeref(struct intel_runtime_pm * rpm)61 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
62 {
63 if (!rpm->debug.class)
64 ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT,
65 "intel_runtime_pm");
66 }
67
68 static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm * rpm)69 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
70 {
71 if (!rpm->available || rpm->no_wakeref_tracking)
72 return INTEL_WAKEREF_DEF;
73
74 return intel_ref_tracker_alloc(&rpm->debug);
75 }
76
untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm * rpm,intel_wakeref_t wakeref)77 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
78 intel_wakeref_t wakeref)
79 {
80 if (!rpm->available || rpm->no_wakeref_tracking)
81 return;
82
83 intel_ref_tracker_free(&rpm->debug, wakeref);
84 }
85
untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm * rpm)86 static void untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
87 {
88 ref_tracker_dir_exit(&rpm->debug);
89 }
90
91 static noinline void
__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm * rpm)92 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
93 {
94 unsigned long flags;
95
96 if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
97 &rpm->debug.lock,
98 flags))
99 return;
100
101 ref_tracker_dir_print_locked(&rpm->debug, INTEL_REFTRACK_PRINT_LIMIT);
102 spin_unlock_irqrestore(&rpm->debug.lock, flags);
103 }
104
print_intel_runtime_pm_wakeref(struct intel_runtime_pm * rpm,struct drm_printer * p)105 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
106 struct drm_printer *p)
107 {
108 intel_ref_tracker_show(&rpm->debug, p);
109 }
110
111 #else
112
init_intel_runtime_pm_wakeref(struct intel_runtime_pm * rpm)113 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
114 {
115 }
116
117 static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm * rpm)118 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
119 {
120 return INTEL_WAKEREF_DEF;
121 }
122
untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm * rpm,intel_wakeref_t wakeref)123 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
124 intel_wakeref_t wakeref)
125 {
126 }
127
128 static void
__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm * rpm)129 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
130 {
131 atomic_dec(&rpm->wakeref_count);
132 }
133
134 static void
untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm * rpm)135 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
136 {
137 }
138
139 #endif
140
141 static void
intel_runtime_pm_acquire(struct intel_runtime_pm * rpm,bool wakelock)142 intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
143 {
144 if (wakelock) {
145 atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
146 assert_rpm_wakelock_held(rpm);
147 } else {
148 atomic_inc(&rpm->wakeref_count);
149 assert_rpm_raw_wakeref_held(rpm);
150 }
151 }
152
153 static void
intel_runtime_pm_release(struct intel_runtime_pm * rpm,int wakelock)154 intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
155 {
156 if (wakelock) {
157 assert_rpm_wakelock_held(rpm);
158 atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
159 } else {
160 assert_rpm_raw_wakeref_held(rpm);
161 }
162
163 __intel_wakeref_dec_and_check_tracking(rpm);
164 }
165
__intel_runtime_pm_get(struct intel_runtime_pm * rpm,bool wakelock)166 static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
167 bool wakelock)
168 {
169 struct drm_i915_private *i915 = rpm_to_i915(rpm);
170 int ret;
171
172 ret = pm_runtime_get_sync(rpm->kdev);
173 drm_WARN_ONCE(&i915->drm, ret < 0,
174 "pm_runtime_get_sync() failed: %d\n", ret);
175
176 intel_runtime_pm_acquire(rpm, wakelock);
177
178 return track_intel_runtime_pm_wakeref(rpm);
179 }
180
drm_to_rpm(const struct drm_device * drm)181 static struct intel_runtime_pm *drm_to_rpm(const struct drm_device *drm)
182 {
183 struct drm_i915_private *i915 = to_i915(drm);
184
185 return &i915->runtime_pm;
186 }
187
i915_display_rpm_get(const struct drm_device * drm)188 static struct ref_tracker *i915_display_rpm_get(const struct drm_device *drm)
189 {
190 return intel_runtime_pm_get(drm_to_rpm(drm));
191 }
192
i915_display_rpm_get_raw(const struct drm_device * drm)193 static struct ref_tracker *i915_display_rpm_get_raw(const struct drm_device *drm)
194 {
195 return intel_runtime_pm_get_raw(drm_to_rpm(drm));
196 }
197
i915_display_rpm_get_if_in_use(const struct drm_device * drm)198 static struct ref_tracker *i915_display_rpm_get_if_in_use(const struct drm_device *drm)
199 {
200 return intel_runtime_pm_get_if_in_use(drm_to_rpm(drm));
201 }
202
i915_display_rpm_get_noresume(const struct drm_device * drm)203 static struct ref_tracker *i915_display_rpm_get_noresume(const struct drm_device *drm)
204 {
205 return intel_runtime_pm_get_noresume(drm_to_rpm(drm));
206 }
207
i915_display_rpm_put(const struct drm_device * drm,struct ref_tracker * wakeref)208 static void i915_display_rpm_put(const struct drm_device *drm, struct ref_tracker *wakeref)
209 {
210 intel_runtime_pm_put(drm_to_rpm(drm), wakeref);
211 }
212
i915_display_rpm_put_raw(const struct drm_device * drm,struct ref_tracker * wakeref)213 static void i915_display_rpm_put_raw(const struct drm_device *drm, struct ref_tracker *wakeref)
214 {
215 intel_runtime_pm_put_raw(drm_to_rpm(drm), wakeref);
216 }
217
i915_display_rpm_put_unchecked(const struct drm_device * drm)218 static void i915_display_rpm_put_unchecked(const struct drm_device *drm)
219 {
220 intel_runtime_pm_put_unchecked(drm_to_rpm(drm));
221 }
222
i915_display_rpm_suspended(const struct drm_device * drm)223 static bool i915_display_rpm_suspended(const struct drm_device *drm)
224 {
225 return intel_runtime_pm_suspended(drm_to_rpm(drm));
226 }
227
i915_display_rpm_assert_held(const struct drm_device * drm)228 static void i915_display_rpm_assert_held(const struct drm_device *drm)
229 {
230 assert_rpm_wakelock_held(drm_to_rpm(drm));
231 }
232
i915_display_rpm_assert_block(const struct drm_device * drm)233 static void i915_display_rpm_assert_block(const struct drm_device *drm)
234 {
235 disable_rpm_wakeref_asserts(drm_to_rpm(drm));
236 }
237
i915_display_rpm_assert_unblock(const struct drm_device * drm)238 static void i915_display_rpm_assert_unblock(const struct drm_device *drm)
239 {
240 enable_rpm_wakeref_asserts(drm_to_rpm(drm));
241 }
242
243 const struct intel_display_rpm_interface i915_display_rpm_interface = {
244 .get = i915_display_rpm_get,
245 .get_raw = i915_display_rpm_get_raw,
246 .get_if_in_use = i915_display_rpm_get_if_in_use,
247 .get_noresume = i915_display_rpm_get_noresume,
248 .put = i915_display_rpm_put,
249 .put_raw = i915_display_rpm_put_raw,
250 .put_unchecked = i915_display_rpm_put_unchecked,
251 .suspended = i915_display_rpm_suspended,
252 .assert_held = i915_display_rpm_assert_held,
253 .assert_block = i915_display_rpm_assert_block,
254 .assert_unblock = i915_display_rpm_assert_unblock
255 };
256
257 /**
258 * intel_runtime_pm_get_raw - grab a raw runtime pm reference
259 * @rpm: the intel_runtime_pm structure
260 *
261 * This is the unlocked version of intel_display_power_is_enabled() and should
262 * only be used from error capture and recovery code where deadlocks are
263 * possible.
264 * This function grabs a device-level runtime pm reference (mostly used for
265 * asynchronous PM management from display code) and ensures that it is powered
266 * up. Raw references are not considered during wakelock assert checks.
267 *
268 * Any runtime pm reference obtained by this function must have a symmetric
269 * call to intel_runtime_pm_put_raw() to release the reference again.
270 *
271 * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
272 * as True if the wakeref was acquired, or False otherwise.
273 */
intel_runtime_pm_get_raw(struct intel_runtime_pm * rpm)274 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
275 {
276 return __intel_runtime_pm_get(rpm, false);
277 }
278
279 /**
280 * intel_runtime_pm_get - grab a runtime pm reference
281 * @rpm: the intel_runtime_pm structure
282 *
283 * This function grabs a device-level runtime pm reference (mostly used for GEM
284 * code to ensure the GTT or GT is on) and ensures that it is powered up.
285 *
286 * Any runtime pm reference obtained by this function must have a symmetric
287 * call to intel_runtime_pm_put() to release the reference again.
288 *
289 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
290 */
intel_runtime_pm_get(struct intel_runtime_pm * rpm)291 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
292 {
293 return __intel_runtime_pm_get(rpm, true);
294 }
295
296 /**
297 * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
298 * @rpm: the intel_runtime_pm structure
299 * @ignore_usecount: get a ref even if dev->power.usage_count is 0
300 *
301 * This function grabs a device-level runtime pm reference if the device is
302 * already active and ensures that it is powered up. It is illegal to try
303 * and access the HW should intel_runtime_pm_get_if_active() report failure.
304 *
305 * If @ignore_usecount is true, a reference will be acquired even if there is no
306 * user requiring the device to be powered up (dev->power.usage_count == 0).
307 * If the function returns false in this case then it's guaranteed that the
308 * device's runtime suspend hook has been called already or that it will be
309 * called (and hence it's also guaranteed that the device's runtime resume
310 * hook will be called eventually).
311 *
312 * Any runtime pm reference obtained by this function must have a symmetric
313 * call to intel_runtime_pm_put() to release the reference again.
314 *
315 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
316 * as True if the wakeref was acquired, or False otherwise.
317 */
__intel_runtime_pm_get_if_active(struct intel_runtime_pm * rpm,bool ignore_usecount)318 static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
319 bool ignore_usecount)
320 {
321 if (IS_ENABLED(CONFIG_PM)) {
322 /*
323 * In cases runtime PM is disabled by the RPM core and we get
324 * an -EINVAL return value we are not supposed to call this
325 * function, since the power state is undefined. This applies
326 * atm to the late/early system suspend/resume handlers.
327 */
328 if ((ignore_usecount &&
329 pm_runtime_get_if_active(rpm->kdev) <= 0) ||
330 (!ignore_usecount &&
331 pm_runtime_get_if_in_use(rpm->kdev) <= 0))
332 return NULL;
333 }
334
335 intel_runtime_pm_acquire(rpm, true);
336
337 return track_intel_runtime_pm_wakeref(rpm);
338 }
339
intel_runtime_pm_get_if_in_use(struct intel_runtime_pm * rpm)340 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
341 {
342 return __intel_runtime_pm_get_if_active(rpm, false);
343 }
344
intel_runtime_pm_get_if_active(struct intel_runtime_pm * rpm)345 intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
346 {
347 return __intel_runtime_pm_get_if_active(rpm, true);
348 }
349
350 /**
351 * intel_runtime_pm_get_noresume - grab a runtime pm reference
352 * @rpm: the intel_runtime_pm structure
353 *
354 * This function grabs a device-level runtime pm reference.
355 *
356 * It will _not_ resume the device but instead only get an extra wakeref.
357 * Therefore it is only valid to call this functions from contexts where
358 * the device is known to be active and with another wakeref previously hold.
359 *
360 * Any runtime pm reference obtained by this function must have a symmetric
361 * call to intel_runtime_pm_put() to release the reference again.
362 *
363 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
364 */
intel_runtime_pm_get_noresume(struct intel_runtime_pm * rpm)365 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
366 {
367 assert_rpm_raw_wakeref_held(rpm);
368 pm_runtime_get_noresume(rpm->kdev);
369
370 intel_runtime_pm_acquire(rpm, true);
371
372 return track_intel_runtime_pm_wakeref(rpm);
373 }
374
__intel_runtime_pm_put(struct intel_runtime_pm * rpm,intel_wakeref_t wref,bool wakelock)375 static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
376 intel_wakeref_t wref,
377 bool wakelock)
378 {
379 struct device *kdev = rpm->kdev;
380
381 untrack_intel_runtime_pm_wakeref(rpm, wref);
382
383 intel_runtime_pm_release(rpm, wakelock);
384
385 pm_runtime_mark_last_busy(kdev);
386 pm_runtime_put_autosuspend(kdev);
387 }
388
389 /**
390 * intel_runtime_pm_put_raw - release a raw runtime pm reference
391 * @rpm: the intel_runtime_pm structure
392 * @wref: wakeref acquired for the reference that is being released
393 *
394 * This function drops the device-level runtime pm reference obtained by
395 * intel_runtime_pm_get_raw() and might power down the corresponding
396 * hardware block right away if this is the last reference.
397 */
398 void
intel_runtime_pm_put_raw(struct intel_runtime_pm * rpm,intel_wakeref_t wref)399 intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
400 {
401 __intel_runtime_pm_put(rpm, wref, false);
402 }
403
404 /**
405 * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
406 * @rpm: the intel_runtime_pm structure
407 *
408 * This function drops the device-level runtime pm reference obtained by
409 * intel_runtime_pm_get() and might power down the corresponding
410 * hardware block right away if this is the last reference.
411 *
412 * This function exists only for historical reasons and should be avoided in
413 * new code, as the correctness of its use cannot be checked. Always use
414 * intel_runtime_pm_put() instead.
415 */
intel_runtime_pm_put_unchecked(struct intel_runtime_pm * rpm)416 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
417 {
418 __intel_runtime_pm_put(rpm, INTEL_WAKEREF_DEF, true);
419 }
420
421 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
422 /**
423 * intel_runtime_pm_put - release a runtime pm reference
424 * @rpm: the intel_runtime_pm structure
425 * @wref: wakeref acquired for the reference that is being released
426 *
427 * This function drops the device-level runtime pm reference obtained by
428 * intel_runtime_pm_get() and might power down the corresponding
429 * hardware block right away if this is the last reference.
430 */
intel_runtime_pm_put(struct intel_runtime_pm * rpm,intel_wakeref_t wref)431 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
432 {
433 __intel_runtime_pm_put(rpm, wref, true);
434 }
435 #endif
436
437 /**
438 * intel_runtime_pm_enable - enable runtime pm
439 * @rpm: the intel_runtime_pm structure
440 *
441 * This function enables runtime pm at the end of the driver load sequence.
442 *
443 * Note that this function does currently not enable runtime pm for the
444 * subordinate display power domains. That is done by
445 * intel_power_domains_enable().
446 */
intel_runtime_pm_enable(struct intel_runtime_pm * rpm)447 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
448 {
449 struct drm_i915_private *i915 = rpm_to_i915(rpm);
450 struct device *kdev = rpm->kdev;
451
452 /*
453 * Disable the system suspend direct complete optimization, which can
454 * leave the device suspended skipping the driver's suspend handlers
455 * if the device was already runtime suspended. This is needed due to
456 * the difference in our runtime and system suspend sequence and
457 * because the HDA driver may require us to enable the audio power
458 * domain during system suspend.
459 */
460 dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
461
462 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
463 pm_runtime_mark_last_busy(kdev);
464
465 /*
466 * Take a permanent reference to disable the RPM functionality and drop
467 * it only when unloading the driver. Use the low level get/put helpers,
468 * so the driver's own RPM reference tracking asserts also work on
469 * platforms without RPM support.
470 */
471 if (!rpm->available) {
472 int ret;
473
474 pm_runtime_dont_use_autosuspend(kdev);
475 ret = pm_runtime_get_sync(kdev);
476 drm_WARN(&i915->drm, ret < 0,
477 "pm_runtime_get_sync() failed: %d\n", ret);
478 } else {
479 pm_runtime_use_autosuspend(kdev);
480 }
481
482 /*
483 * FIXME: Temp hammer to keep autosupend disable on lmem supported platforms.
484 * As per PCIe specs 5.3.1.4.1, all iomem read write request over a PCIe
485 * function will be unsupported in case PCIe endpoint function is in D3.
486 * Let's keep i915 autosuspend control 'on' till we fix all known issue
487 * with lmem access in D3.
488 */
489 if (!IS_DGFX(i915))
490 pm_runtime_allow(kdev);
491
492 /*
493 * The core calls the driver load handler with an RPM reference held.
494 * We drop that here and will reacquire it during unloading in
495 * intel_power_domains_fini().
496 */
497 pm_runtime_put_autosuspend(kdev);
498 }
499
intel_runtime_pm_disable(struct intel_runtime_pm * rpm)500 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
501 {
502 struct drm_i915_private *i915 = rpm_to_i915(rpm);
503 struct device *kdev = rpm->kdev;
504
505 /* Transfer rpm ownership back to core */
506 drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0,
507 "Failed to pass rpm ownership back to core\n");
508
509 pm_runtime_dont_use_autosuspend(kdev);
510
511 if (!rpm->available)
512 pm_runtime_put(kdev);
513 }
514
intel_runtime_pm_driver_release(struct intel_runtime_pm * rpm)515 void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
516 {
517 struct drm_i915_private *i915 = rpm_to_i915(rpm);
518 int count = atomic_read(&rpm->wakeref_count);
519
520 intel_wakeref_auto_fini(&rpm->userfault_wakeref);
521
522 drm_WARN(&i915->drm, count,
523 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
524 intel_rpm_raw_wakeref_count(count),
525 intel_rpm_wakelock_count(count));
526 }
527
intel_runtime_pm_driver_last_release(struct intel_runtime_pm * rpm)528 void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm)
529 {
530 intel_runtime_pm_driver_release(rpm);
531 untrack_all_intel_runtime_pm_wakerefs(rpm);
532 }
533
intel_runtime_pm_init_early(struct intel_runtime_pm * rpm)534 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
535 {
536 struct drm_i915_private *i915 = rpm_to_i915(rpm);
537 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
538 struct device *kdev = &pdev->dev;
539
540 rpm->kdev = kdev;
541 rpm->available = HAS_RUNTIME_PM(i915);
542 atomic_set(&rpm->wakeref_count, 0);
543
544 init_intel_runtime_pm_wakeref(rpm);
545 INIT_LIST_HEAD(&rpm->lmem_userfault_list);
546 spin_lock_init(&rpm->lmem_userfault_lock);
547 intel_wakeref_auto_init(&rpm->userfault_wakeref, i915);
548 }
549