1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
4 *
5 * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/device.h>
10 #include <linux/io.h>
11 #include <linux/pm.h>
12 #include <linux/pm_clock.h>
13 #include <linux/clk.h>
14 #include <linux/clkdev.h>
15 #include <linux/of_clk.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/pm_domain.h>
19 #include <linux/pm_runtime.h>
20
21 #ifdef CONFIG_PM_CLK
22
23 enum pce_status {
24 PCE_STATUS_NONE = 0,
25 PCE_STATUS_ACQUIRED,
26 PCE_STATUS_PREPARED,
27 PCE_STATUS_ENABLED,
28 PCE_STATUS_ERROR,
29 };
30
31 struct pm_clock_entry {
32 struct list_head node;
33 char *con_id;
34 struct clk *clk;
35 enum pce_status status;
36 bool enabled_when_prepared;
37 };
38
39 /**
40 * pm_clk_list_lock - ensure exclusive access for modifying the PM clock
41 * entry list.
42 * @psd: pm_subsys_data instance corresponding to the PM clock entry list
43 * and clk_op_might_sleep count to be modified.
44 *
45 * Get exclusive access before modifying the PM clock entry list and the
46 * clock_op_might_sleep count to guard against concurrent modifications.
47 * This also protects against a concurrent clock_op_might_sleep and PM clock
48 * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
49 * happen in atomic context, hence both the mutex and the spinlock must be
50 * taken here.
51 */
pm_clk_list_lock(struct pm_subsys_data * psd)52 static void pm_clk_list_lock(struct pm_subsys_data *psd)
53 __acquires(&psd->lock)
54 {
55 mutex_lock(&psd->clock_mutex);
56 spin_lock_irq(&psd->lock);
57 }
58
59 /**
60 * pm_clk_list_unlock - counterpart to pm_clk_list_lock().
61 * @psd: the same pm_subsys_data instance previously passed to
62 * pm_clk_list_lock().
63 */
pm_clk_list_unlock(struct pm_subsys_data * psd)64 static void pm_clk_list_unlock(struct pm_subsys_data *psd)
65 __releases(&psd->lock)
66 {
67 spin_unlock_irq(&psd->lock);
68 mutex_unlock(&psd->clock_mutex);
69 }
70
71 /**
72 * pm_clk_op_lock - ensure exclusive access for performing clock operations.
73 * @psd: pm_subsys_data instance corresponding to the PM clock entry list
74 * and clk_op_might_sleep count being used.
75 * @flags: stored irq flags.
76 * @fn: string for the caller function's name.
77 *
78 * This is used by pm_clk_suspend() and pm_clk_resume() to guard
79 * against concurrent modifications to the clock entry list and the
80 * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
81 * only the mutex can be locked and those functions can only be used in
82 * non atomic context. If clock_op_might_sleep == 0 then these functions
83 * may be used in any context and only the spinlock can be locked.
84 * Returns -EINVAL if called in atomic context when clock ops might sleep.
85 */
pm_clk_op_lock(struct pm_subsys_data * psd,unsigned long * flags,const char * fn)86 static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
87 const char *fn)
88 /* sparse annotations don't work here as exit state isn't static */
89 {
90 bool atomic_context = in_atomic() || irqs_disabled();
91
92 try_again:
93 spin_lock_irqsave(&psd->lock, *flags);
94 if (!psd->clock_op_might_sleep) {
95 /* the __release is there to work around sparse limitations */
96 __release(&psd->lock);
97 return 0;
98 }
99
100 /* bail out if in atomic context */
101 if (atomic_context) {
102 pr_err("%s: atomic context with clock_ops_might_sleep = %d",
103 fn, psd->clock_op_might_sleep);
104 spin_unlock_irqrestore(&psd->lock, *flags);
105 might_sleep();
106 return -EPERM;
107 }
108
109 /* we must switch to the mutex */
110 spin_unlock_irqrestore(&psd->lock, *flags);
111 mutex_lock(&psd->clock_mutex);
112
113 /*
114 * There was a possibility for psd->clock_op_might_sleep
115 * to become 0 above. Keep the mutex only if not the case.
116 */
117 if (likely(psd->clock_op_might_sleep))
118 return 0;
119
120 mutex_unlock(&psd->clock_mutex);
121 goto try_again;
122 }
123
124 /**
125 * pm_clk_op_unlock - counterpart to pm_clk_op_lock().
126 * @psd: the same pm_subsys_data instance previously passed to
127 * pm_clk_op_lock().
128 * @flags: irq flags provided by pm_clk_op_lock().
129 */
pm_clk_op_unlock(struct pm_subsys_data * psd,unsigned long * flags)130 static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
131 /* sparse annotations don't work here as entry state isn't static */
132 {
133 if (psd->clock_op_might_sleep) {
134 mutex_unlock(&psd->clock_mutex);
135 } else {
136 /* the __acquire is there to work around sparse limitations */
137 __acquire(&psd->lock);
138 spin_unlock_irqrestore(&psd->lock, *flags);
139 }
140 }
141
142 /**
143 * __pm_clk_enable - Enable a clock, reporting any errors
144 * @dev: The device for the given clock
145 * @ce: PM clock entry corresponding to the clock.
146 */
__pm_clk_enable(struct device * dev,struct pm_clock_entry * ce)147 static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
148 {
149 int ret;
150
151 switch (ce->status) {
152 case PCE_STATUS_ACQUIRED:
153 ret = clk_prepare_enable(ce->clk);
154 break;
155 case PCE_STATUS_PREPARED:
156 ret = clk_enable(ce->clk);
157 break;
158 default:
159 return;
160 }
161 if (!ret)
162 ce->status = PCE_STATUS_ENABLED;
163 else
164 dev_err(dev, "%s: failed to enable clk %p, error %d\n",
165 __func__, ce->clk, ret);
166 }
167
168 /**
169 * pm_clk_acquire - Acquire a device clock.
170 * @dev: Device whose clock is to be acquired.
171 * @ce: PM clock entry corresponding to the clock.
172 */
pm_clk_acquire(struct device * dev,struct pm_clock_entry * ce)173 static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
174 {
175 if (!ce->clk)
176 ce->clk = clk_get(dev, ce->con_id);
177 if (IS_ERR(ce->clk)) {
178 ce->status = PCE_STATUS_ERROR;
179 return;
180 } else if (clk_is_enabled_when_prepared(ce->clk)) {
181 /* we defer preparing the clock in that case */
182 ce->status = PCE_STATUS_ACQUIRED;
183 ce->enabled_when_prepared = true;
184 } else if (clk_prepare(ce->clk)) {
185 ce->status = PCE_STATUS_ERROR;
186 dev_err(dev, "clk_prepare() failed\n");
187 return;
188 } else {
189 ce->status = PCE_STATUS_PREPARED;
190 }
191 dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
192 ce->clk, ce->con_id);
193 }
194
__pm_clk_add(struct device * dev,const char * con_id,struct clk * clk)195 static int __pm_clk_add(struct device *dev, const char *con_id,
196 struct clk *clk)
197 {
198 struct pm_subsys_data *psd = dev_to_psd(dev);
199 struct pm_clock_entry *ce;
200
201 if (!psd)
202 return -EINVAL;
203
204 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
205 if (!ce)
206 return -ENOMEM;
207
208 if (con_id) {
209 ce->con_id = kstrdup(con_id, GFP_KERNEL);
210 if (!ce->con_id) {
211 kfree(ce);
212 return -ENOMEM;
213 }
214 } else {
215 if (IS_ERR(clk)) {
216 kfree(ce);
217 return -ENOENT;
218 }
219 ce->clk = clk;
220 }
221
222 pm_clk_acquire(dev, ce);
223
224 pm_clk_list_lock(psd);
225 list_add_tail(&ce->node, &psd->clock_list);
226 if (ce->enabled_when_prepared)
227 psd->clock_op_might_sleep++;
228 pm_clk_list_unlock(psd);
229 return 0;
230 }
231
232 /**
233 * pm_clk_add - Start using a device clock for power management.
234 * @dev: Device whose clock is going to be used for power management.
235 * @con_id: Connection ID of the clock.
236 *
237 * Add the clock represented by @con_id to the list of clocks used for
238 * the power management of @dev.
239 */
pm_clk_add(struct device * dev,const char * con_id)240 int pm_clk_add(struct device *dev, const char *con_id)
241 {
242 return __pm_clk_add(dev, con_id, NULL);
243 }
244 EXPORT_SYMBOL_GPL(pm_clk_add);
245
246 /**
247 * pm_clk_add_clk - Start using a device clock for power management.
248 * @dev: Device whose clock is going to be used for power management.
249 * @clk: Clock pointer
250 *
251 * Add the clock to the list of clocks used for the power management of @dev.
252 * The power-management code will take control of the clock reference, so
253 * callers should not call clk_put() on @clk after this function sucessfully
254 * returned.
255 */
pm_clk_add_clk(struct device * dev,struct clk * clk)256 int pm_clk_add_clk(struct device *dev, struct clk *clk)
257 {
258 return __pm_clk_add(dev, NULL, clk);
259 }
260 EXPORT_SYMBOL_GPL(pm_clk_add_clk);
261
262 /**
263 * of_pm_clk_add_clks - Start using device clock(s) for power management.
264 * @dev: Device whose clock(s) is going to be used for power management.
265 *
266 * Add a series of clocks described in the 'clocks' device-tree node for
267 * a device to the list of clocks used for the power management of @dev.
268 * On success, returns the number of clocks added. Returns a negative
269 * error code if there are no clocks in the device node for the device
270 * or if adding a clock fails.
271 */
of_pm_clk_add_clks(struct device * dev)272 int of_pm_clk_add_clks(struct device *dev)
273 {
274 struct clk **clks;
275 int i, count;
276 int ret;
277
278 if (!dev || !dev->of_node)
279 return -EINVAL;
280
281 count = of_clk_get_parent_count(dev->of_node);
282 if (count <= 0)
283 return -ENODEV;
284
285 clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
286 if (!clks)
287 return -ENOMEM;
288
289 for (i = 0; i < count; i++) {
290 clks[i] = of_clk_get(dev->of_node, i);
291 if (IS_ERR(clks[i])) {
292 ret = PTR_ERR(clks[i]);
293 goto error;
294 }
295
296 ret = pm_clk_add_clk(dev, clks[i]);
297 if (ret) {
298 clk_put(clks[i]);
299 goto error;
300 }
301 }
302
303 kfree(clks);
304
305 return i;
306
307 error:
308 while (i--)
309 pm_clk_remove_clk(dev, clks[i]);
310
311 kfree(clks);
312
313 return ret;
314 }
315 EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
316
317 /**
318 * __pm_clk_remove - Destroy PM clock entry.
319 * @ce: PM clock entry to destroy.
320 */
__pm_clk_remove(struct pm_clock_entry * ce)321 static void __pm_clk_remove(struct pm_clock_entry *ce)
322 {
323 if (!ce)
324 return;
325
326 switch (ce->status) {
327 case PCE_STATUS_ENABLED:
328 clk_disable(ce->clk);
329 fallthrough;
330 case PCE_STATUS_PREPARED:
331 clk_unprepare(ce->clk);
332 fallthrough;
333 case PCE_STATUS_ACQUIRED:
334 case PCE_STATUS_ERROR:
335 if (!IS_ERR(ce->clk))
336 clk_put(ce->clk);
337 break;
338 default:
339 break;
340 }
341
342 kfree(ce->con_id);
343 kfree(ce);
344 }
345
346 /**
347 * pm_clk_remove_clk - Stop using a device clock for power management.
348 * @dev: Device whose clock should not be used for PM any more.
349 * @clk: Clock pointer
350 *
351 * Remove the clock pointed to by @clk from the list of clocks used for
352 * the power management of @dev.
353 */
pm_clk_remove_clk(struct device * dev,struct clk * clk)354 void pm_clk_remove_clk(struct device *dev, struct clk *clk)
355 {
356 struct pm_subsys_data *psd = dev_to_psd(dev);
357 struct pm_clock_entry *ce;
358
359 if (!psd || !clk)
360 return;
361
362 pm_clk_list_lock(psd);
363
364 list_for_each_entry(ce, &psd->clock_list, node) {
365 if (clk == ce->clk)
366 goto remove;
367 }
368
369 pm_clk_list_unlock(psd);
370 return;
371
372 remove:
373 list_del(&ce->node);
374 if (ce->enabled_when_prepared)
375 psd->clock_op_might_sleep--;
376 pm_clk_list_unlock(psd);
377
378 __pm_clk_remove(ce);
379 }
380 EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
381
382 /**
383 * pm_clk_init - Initialize a device's list of power management clocks.
384 * @dev: Device to initialize the list of PM clocks for.
385 *
386 * Initialize the lock and clock_list members of the device's pm_subsys_data
387 * object, set the count of clocks that might sleep to 0.
388 */
pm_clk_init(struct device * dev)389 void pm_clk_init(struct device *dev)
390 {
391 struct pm_subsys_data *psd = dev_to_psd(dev);
392 if (psd) {
393 INIT_LIST_HEAD(&psd->clock_list);
394 mutex_init(&psd->clock_mutex);
395 psd->clock_op_might_sleep = 0;
396 }
397 }
398 EXPORT_SYMBOL_GPL(pm_clk_init);
399
400 /**
401 * pm_clk_create - Create and initialize a device's list of PM clocks.
402 * @dev: Device to create and initialize the list of PM clocks for.
403 *
404 * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
405 * members and make the @dev's power.subsys_data field point to it.
406 */
pm_clk_create(struct device * dev)407 int pm_clk_create(struct device *dev)
408 {
409 return dev_pm_get_subsys_data(dev);
410 }
411 EXPORT_SYMBOL_GPL(pm_clk_create);
412
413 /**
414 * pm_clk_destroy - Destroy a device's list of power management clocks.
415 * @dev: Device to destroy the list of PM clocks for.
416 *
417 * Clear the @dev's power.subsys_data field, remove the list of clock entries
418 * from the struct pm_subsys_data object pointed to by it before and free
419 * that object.
420 */
pm_clk_destroy(struct device * dev)421 void pm_clk_destroy(struct device *dev)
422 {
423 struct pm_subsys_data *psd = dev_to_psd(dev);
424 struct pm_clock_entry *ce, *c;
425 struct list_head list;
426
427 if (!psd)
428 return;
429
430 INIT_LIST_HEAD(&list);
431
432 pm_clk_list_lock(psd);
433
434 list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
435 list_move(&ce->node, &list);
436 psd->clock_op_might_sleep = 0;
437
438 pm_clk_list_unlock(psd);
439
440 dev_pm_put_subsys_data(dev);
441
442 list_for_each_entry_safe_reverse(ce, c, &list, node) {
443 list_del(&ce->node);
444 __pm_clk_remove(ce);
445 }
446 }
447 EXPORT_SYMBOL_GPL(pm_clk_destroy);
448
pm_clk_destroy_action(void * data)449 static void pm_clk_destroy_action(void *data)
450 {
451 pm_clk_destroy(data);
452 }
453
devm_pm_clk_create(struct device * dev)454 int devm_pm_clk_create(struct device *dev)
455 {
456 int ret;
457
458 ret = pm_clk_create(dev);
459 if (ret)
460 return ret;
461
462 return devm_add_action_or_reset(dev, pm_clk_destroy_action, dev);
463 }
464 EXPORT_SYMBOL_GPL(devm_pm_clk_create);
465
466 /**
467 * pm_clk_suspend - Disable clocks in a device's PM clock list.
468 * @dev: Device to disable the clocks for.
469 */
pm_clk_suspend(struct device * dev)470 int pm_clk_suspend(struct device *dev)
471 {
472 struct pm_subsys_data *psd = dev_to_psd(dev);
473 struct pm_clock_entry *ce;
474 unsigned long flags;
475 int ret;
476
477 dev_dbg(dev, "%s()\n", __func__);
478
479 if (!psd)
480 return 0;
481
482 ret = pm_clk_op_lock(psd, &flags, __func__);
483 if (ret)
484 return ret;
485
486 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
487 if (ce->status == PCE_STATUS_ENABLED) {
488 if (ce->enabled_when_prepared) {
489 clk_disable_unprepare(ce->clk);
490 ce->status = PCE_STATUS_ACQUIRED;
491 } else {
492 clk_disable(ce->clk);
493 ce->status = PCE_STATUS_PREPARED;
494 }
495 }
496 }
497
498 pm_clk_op_unlock(psd, &flags);
499
500 return 0;
501 }
502 EXPORT_SYMBOL_GPL(pm_clk_suspend);
503
504 /**
505 * pm_clk_resume - Enable clocks in a device's PM clock list.
506 * @dev: Device to enable the clocks for.
507 */
pm_clk_resume(struct device * dev)508 int pm_clk_resume(struct device *dev)
509 {
510 struct pm_subsys_data *psd = dev_to_psd(dev);
511 struct pm_clock_entry *ce;
512 unsigned long flags;
513 int ret;
514
515 dev_dbg(dev, "%s()\n", __func__);
516
517 if (!psd)
518 return 0;
519
520 ret = pm_clk_op_lock(psd, &flags, __func__);
521 if (ret)
522 return ret;
523
524 list_for_each_entry(ce, &psd->clock_list, node)
525 __pm_clk_enable(dev, ce);
526
527 pm_clk_op_unlock(psd, &flags);
528
529 return 0;
530 }
531 EXPORT_SYMBOL_GPL(pm_clk_resume);
532
533 /**
534 * pm_clk_notify - Notify routine for device addition and removal.
535 * @nb: Notifier block object this function is a member of.
536 * @action: Operation being carried out by the caller.
537 * @data: Device the routine is being run for.
538 *
539 * For this function to work, @nb must be a member of an object of type
540 * struct pm_clk_notifier_block containing all of the requisite data.
541 * Specifically, the pm_domain member of that object is copied to the device's
542 * pm_domain field and its con_ids member is used to populate the device's list
543 * of PM clocks, depending on @action.
544 *
545 * If the device's pm_domain field is already populated with a value different
546 * from the one stored in the struct pm_clk_notifier_block object, the function
547 * does nothing.
548 */
pm_clk_notify(struct notifier_block * nb,unsigned long action,void * data)549 static int pm_clk_notify(struct notifier_block *nb,
550 unsigned long action, void *data)
551 {
552 struct pm_clk_notifier_block *clknb;
553 struct device *dev = data;
554 char **con_id;
555 int error;
556
557 dev_dbg(dev, "%s() %ld\n", __func__, action);
558
559 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
560
561 switch (action) {
562 case BUS_NOTIFY_ADD_DEVICE:
563 if (dev->pm_domain)
564 break;
565
566 error = pm_clk_create(dev);
567 if (error)
568 break;
569
570 dev_pm_domain_set(dev, clknb->pm_domain);
571 if (clknb->con_ids[0]) {
572 for (con_id = clknb->con_ids; *con_id; con_id++)
573 pm_clk_add(dev, *con_id);
574 } else {
575 pm_clk_add(dev, NULL);
576 }
577
578 break;
579 case BUS_NOTIFY_DEL_DEVICE:
580 if (dev->pm_domain != clknb->pm_domain)
581 break;
582
583 dev_pm_domain_set(dev, NULL);
584 pm_clk_destroy(dev);
585 break;
586 }
587
588 return 0;
589 }
590
pm_clk_runtime_suspend(struct device * dev)591 int pm_clk_runtime_suspend(struct device *dev)
592 {
593 int ret;
594
595 dev_dbg(dev, "%s\n", __func__);
596
597 ret = pm_generic_runtime_suspend(dev);
598 if (ret) {
599 dev_err(dev, "failed to suspend device\n");
600 return ret;
601 }
602
603 ret = pm_clk_suspend(dev);
604 if (ret) {
605 dev_err(dev, "failed to suspend clock\n");
606 pm_generic_runtime_resume(dev);
607 return ret;
608 }
609
610 return 0;
611 }
612 EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
613
pm_clk_runtime_resume(struct device * dev)614 int pm_clk_runtime_resume(struct device *dev)
615 {
616 int ret;
617
618 dev_dbg(dev, "%s\n", __func__);
619
620 ret = pm_clk_resume(dev);
621 if (ret) {
622 dev_err(dev, "failed to resume clock\n");
623 return ret;
624 }
625
626 return pm_generic_runtime_resume(dev);
627 }
628 EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
629
630 #else /* !CONFIG_PM_CLK */
631
632 /**
633 * enable_clock - Enable a device clock.
634 * @dev: Device whose clock is to be enabled.
635 * @con_id: Connection ID of the clock.
636 */
enable_clock(struct device * dev,const char * con_id)637 static void enable_clock(struct device *dev, const char *con_id)
638 {
639 struct clk *clk;
640
641 clk = clk_get(dev, con_id);
642 if (!IS_ERR(clk)) {
643 clk_prepare_enable(clk);
644 clk_put(clk);
645 dev_info(dev, "Runtime PM disabled, clock forced on.\n");
646 }
647 }
648
649 /**
650 * disable_clock - Disable a device clock.
651 * @dev: Device whose clock is to be disabled.
652 * @con_id: Connection ID of the clock.
653 */
disable_clock(struct device * dev,const char * con_id)654 static void disable_clock(struct device *dev, const char *con_id)
655 {
656 struct clk *clk;
657
658 clk = clk_get(dev, con_id);
659 if (!IS_ERR(clk)) {
660 clk_disable_unprepare(clk);
661 clk_put(clk);
662 dev_info(dev, "Runtime PM disabled, clock forced off.\n");
663 }
664 }
665
666 /**
667 * pm_clk_notify - Notify routine for device addition and removal.
668 * @nb: Notifier block object this function is a member of.
669 * @action: Operation being carried out by the caller.
670 * @data: Device the routine is being run for.
671 *
672 * For this function to work, @nb must be a member of an object of type
673 * struct pm_clk_notifier_block containing all of the requisite data.
674 * Specifically, the con_ids member of that object is used to enable or disable
675 * the device's clocks, depending on @action.
676 */
pm_clk_notify(struct notifier_block * nb,unsigned long action,void * data)677 static int pm_clk_notify(struct notifier_block *nb,
678 unsigned long action, void *data)
679 {
680 struct pm_clk_notifier_block *clknb;
681 struct device *dev = data;
682 char **con_id;
683
684 dev_dbg(dev, "%s() %ld\n", __func__, action);
685
686 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
687
688 switch (action) {
689 case BUS_NOTIFY_BIND_DRIVER:
690 if (clknb->con_ids[0]) {
691 for (con_id = clknb->con_ids; *con_id; con_id++)
692 enable_clock(dev, *con_id);
693 } else {
694 enable_clock(dev, NULL);
695 }
696 break;
697 case BUS_NOTIFY_DRIVER_NOT_BOUND:
698 case BUS_NOTIFY_UNBOUND_DRIVER:
699 if (clknb->con_ids[0]) {
700 for (con_id = clknb->con_ids; *con_id; con_id++)
701 disable_clock(dev, *con_id);
702 } else {
703 disable_clock(dev, NULL);
704 }
705 break;
706 }
707
708 return 0;
709 }
710
711 #endif /* !CONFIG_PM_CLK */
712
713 /**
714 * pm_clk_add_notifier - Add bus type notifier for power management clocks.
715 * @bus: Bus type to add the notifier to.
716 * @clknb: Notifier to be added to the given bus type.
717 *
718 * The nb member of @clknb is not expected to be initialized and its
719 * notifier_call member will be replaced with pm_clk_notify(). However,
720 * the remaining members of @clknb should be populated prior to calling this
721 * routine.
722 */
pm_clk_add_notifier(const struct bus_type * bus,struct pm_clk_notifier_block * clknb)723 void pm_clk_add_notifier(const struct bus_type *bus,
724 struct pm_clk_notifier_block *clknb)
725 {
726 if (!bus || !clknb)
727 return;
728
729 clknb->nb.notifier_call = pm_clk_notify;
730 bus_register_notifier(bus, &clknb->nb);
731 }
732 EXPORT_SYMBOL_GPL(pm_clk_add_notifier);
733