Lines Matching +full:aoss +full:- +full:qmp
1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2022 Linaro Ltd.
38 * enum ipa_power_flag - IPA power flags
54 * struct ipa_power - IPA power management information
57 * @qmp: QMP handle for AOSS communication
66 struct qmp *qmp; member
82 interconnect = &power->interconnect[0]; in ipa_interconnect_init()
83 for (i = 0; i < power->interconnect_count; i++) { in ipa_interconnect_init()
84 /* interconnect->path is filled in by of_icc_bulk_get() */ in ipa_interconnect_init()
85 interconnect->name = data->name; in ipa_interconnect_init()
86 interconnect->avg_bw = data->average_bandwidth; in ipa_interconnect_init()
87 interconnect->peak_bw = data->peak_bandwidth; in ipa_interconnect_init()
92 ret = of_icc_bulk_get(power->dev, power->interconnect_count, in ipa_interconnect_init()
93 power->interconnect); in ipa_interconnect_init()
98 icc_bulk_disable(power->interconnect_count, power->interconnect); in ipa_interconnect_init()
101 ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect); in ipa_interconnect_init()
103 icc_bulk_put(power->interconnect_count, power->interconnect); in ipa_interconnect_init()
111 icc_bulk_put(power->interconnect_count, power->interconnect); in ipa_interconnect_exit()
117 struct ipa_power *power = ipa->power; in ipa_power_enable()
120 ret = icc_bulk_enable(power->interconnect_count, power->interconnect); in ipa_power_enable()
124 ret = clk_prepare_enable(power->core); in ipa_power_enable()
126 dev_err(power->dev, "error %d enabling core clock\n", ret); in ipa_power_enable()
127 icc_bulk_disable(power->interconnect_count, in ipa_power_enable()
128 power->interconnect); in ipa_power_enable()
137 struct ipa_power *power = ipa->power; in ipa_power_disable()
139 clk_disable_unprepare(power->core); in ipa_power_disable()
141 icc_bulk_disable(power->interconnect_count, power->interconnect); in ipa_power_disable()
149 if (ipa->setup_complete) { in ipa_runtime_suspend()
150 __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags); in ipa_runtime_suspend()
152 gsi_suspend(&ipa->gsi); in ipa_runtime_suspend()
170 if (ipa->setup_complete) { in ipa_runtime_resume()
171 gsi_resume(&ipa->gsi); in ipa_runtime_resume()
182 __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); in ipa_suspend()
185 * be re-enabled until the matching _enable call in in ipa_suspend()
205 __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); in ipa_resume()
219 return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0; in ipa_core_clock_rate()
228 if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags)) in ipa_power_suspend_handler()
229 if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags)) in ipa_power_suspend_handler()
230 pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); in ipa_power_suspend_handler()
233 ipa_interrupt_suspend_clear_all(ipa->interrupt); in ipa_power_suspend_handler()
257 * from the power ->runtime_resume operation.
261 struct ipa_power *power = ipa->power; in ipa_power_modem_queue_stop()
264 spin_lock_irqsave(&power->spinlock, flags); in ipa_power_modem_queue_stop()
266 if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) { in ipa_power_modem_queue_stop()
267 netif_stop_queue(ipa->modem_netdev); in ipa_power_modem_queue_stop()
268 __set_bit(IPA_POWER_FLAG_STOPPED, power->flags); in ipa_power_modem_queue_stop()
271 spin_unlock_irqrestore(&power->spinlock, flags); in ipa_power_modem_queue_stop()
281 struct ipa_power *power = ipa->power; in ipa_power_modem_queue_wake()
284 spin_lock_irqsave(&power->spinlock, flags); in ipa_power_modem_queue_wake()
286 if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) { in ipa_power_modem_queue_wake()
287 __set_bit(IPA_POWER_FLAG_STARTED, power->flags); in ipa_power_modem_queue_wake()
288 netif_wake_queue(ipa->modem_netdev); in ipa_power_modem_queue_wake()
291 spin_unlock_irqrestore(&power->spinlock, flags); in ipa_power_modem_queue_wake()
297 clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags); in ipa_power_modem_queue_active()
302 struct qmp *qmp = qmp_get(power->dev); in ipa_power_retention_init() local
304 if (IS_ERR(qmp)) { in ipa_power_retention_init()
305 if (PTR_ERR(qmp) == -EPROBE_DEFER) in ipa_power_retention_init()
306 return -EPROBE_DEFER; in ipa_power_retention_init()
309 qmp = NULL; in ipa_power_retention_init()
311 power->qmp = qmp; in ipa_power_retention_init()
318 qmp_put(power->qmp); in ipa_power_retention_exit()
319 power->qmp = NULL; in ipa_power_retention_exit()
326 struct ipa_power *power = ipa->power; in ipa_power_retention()
329 if (!power->qmp) in ipa_power_retention()
332 ret = qmp_send(power->qmp, fmt, enable ? '1' : '0'); in ipa_power_retention()
334 dev_err(power->dev, "error %d sending QMP %sable request\n", in ipa_power_retention()
344 ret = device_init_wakeup(&ipa->pdev->dev, true); in ipa_power_setup()
353 (void)device_init_wakeup(&ipa->pdev->dev, false); in ipa_power_teardown()
373 ret = clk_set_rate(clk, data->core_clock_rate); in ipa_power_init()
376 ret, data->core_clock_rate); in ipa_power_init()
380 size = struct_size(power, interconnect, data->interconnect_count); in ipa_power_init()
383 ret = -ENOMEM; in ipa_power_init()
386 power->dev = dev; in ipa_power_init()
387 power->core = clk; in ipa_power_init()
388 spin_lock_init(&power->spinlock); in ipa_power_init()
389 power->interconnect_count = data->interconnect_count; in ipa_power_init()
391 ret = ipa_interconnect_init(power, data->interconnect_data); in ipa_power_init()
418 struct device *dev = power->dev; in ipa_power_exit()
419 struct clk *clk = power->core; in ipa_power_exit()