Lines Matching +full:ipc +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
3 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
9 #include <linux/io-64-nonatomic-lo-hi.h>
23 struct avs_ipc *ipc = adev->ipc; in avs_dsp_set_d0ix() local
27 if (ipc->in_d0ix == enable) in avs_dsp_set_d0ix()
32 /* Prevent further d0ix attempts on conscious IPC failure. */ in avs_dsp_set_d0ix()
33 if (ret == -AVS_EIPC) in avs_dsp_set_d0ix()
34 atomic_inc(&ipc->d0ix_disable_depth); in avs_dsp_set_d0ix()
36 ipc->in_d0ix = false; in avs_dsp_set_d0ix()
40 ipc->in_d0ix = enable; in avs_dsp_set_d0ix()
46 if (atomic_read(&adev->ipc->d0ix_disable_depth)) in avs_dsp_schedule_d0ix()
49 mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work, in avs_dsp_schedule_d0ix()
55 struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work); in avs_dsp_d0ix_work() local
57 avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true); in avs_dsp_d0ix_work()
62 struct avs_ipc *ipc = adev->ipc; in avs_dsp_wake_d0i0() local
64 if (!atomic_read(&ipc->d0ix_disable_depth)) { in avs_dsp_wake_d0i0()
65 cancel_delayed_work_sync(&ipc->d0ix_work); in avs_dsp_wake_d0i0()
74 struct avs_ipc *ipc = adev->ipc; in avs_dsp_disable_d0ix() local
77 if (atomic_inc_return(&ipc->d0ix_disable_depth) == 1) { in avs_dsp_disable_d0ix()
78 cancel_delayed_work_sync(&ipc->d0ix_work); in avs_dsp_disable_d0ix()
87 struct avs_ipc *ipc = adev->ipc; in avs_dsp_enable_d0ix() local
89 if (atomic_dec_and_test(&ipc->d0ix_disable_depth)) in avs_dsp_enable_d0ix()
90 queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work, in avs_dsp_enable_d0ix()
101 mutex_lock(&adev->comp_list_mutex); in avs_dsp_recovery()
103 list_for_each_entry(acomp, &adev->comp_list, node) { in avs_dsp_recovery()
107 card = acomp->base.card; in avs_dsp_recovery()
115 pcm = rtd->pcm; in avs_dsp_recovery()
116 if (!pcm || rtd->dai_link->no_pcm) in avs_dsp_recovery()
122 substream = pcm->streams[dir].substream; in avs_dsp_recovery()
123 if (!substream || !substream->runtime) in avs_dsp_recovery()
133 mutex_unlock(&adev->comp_list_mutex); in avs_dsp_recovery()
136 core_mask = GENMASK(adev->hw_cfg.dsp_cores - 1, 0); in avs_dsp_recovery()
142 dev_err(adev->dev, "dsp reboot failed: %d\n", ret); in avs_dsp_recovery()
144 pm_runtime_mark_last_busy(adev->dev); in avs_dsp_recovery()
145 pm_runtime_enable(adev->dev); in avs_dsp_recovery()
146 pm_request_autosuspend(adev->dev); in avs_dsp_recovery()
148 atomic_set(&adev->ipc->recovering, 0); in avs_dsp_recovery()
153 struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work); in avs_dsp_recovery_work() local
155 avs_dsp_recovery(to_avs_dev(ipc->dev)); in avs_dsp_recovery_work()
160 struct avs_ipc *ipc = adev->ipc; in avs_dsp_exception_caught() local
162 /* Account for the double-exception case. */ in avs_dsp_exception_caught()
163 ipc->ready = false; in avs_dsp_exception_caught()
165 if (!atomic_add_unless(&ipc->recovering, 1, 1)) { in avs_dsp_exception_caught()
166 dev_err(adev->dev, "dsp recovery is already in progress\n"); in avs_dsp_exception_caught()
170 dev_crit(adev->dev, "communication severed, rebooting dsp..\n"); in avs_dsp_exception_caught()
172 cancel_delayed_work_sync(&ipc->d0ix_work); in avs_dsp_exception_caught()
173 ipc->in_d0ix = false; in avs_dsp_exception_caught()
174 /* Re-enabled on recovery completion. */ in avs_dsp_exception_caught()
175 pm_runtime_disable(adev->dev); in avs_dsp_exception_caught()
180 schedule_work(&ipc->recovery_work); in avs_dsp_exception_caught()
185 struct avs_ipc *ipc = adev->ipc; in avs_dsp_receive_rx() local
192 ipc->rx.header = header; in avs_dsp_receive_rx()
198 ipc->rx.size = min_t(u32, AVS_MAILBOX_SIZE, in avs_dsp_receive_rx()
201 memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size); in avs_dsp_receive_rx()
202 trace_avs_msg_payload(ipc->rx.data, ipc->rx.size); in avs_dsp_receive_rx()
218 if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) { in avs_dsp_process_notification()
219 dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary); in avs_dsp_process_notification()
247 dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary); in avs_dsp_process_notification()
260 /* Perform notification-specific operations. */ in avs_dsp_process_notification()
263 dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary); in avs_dsp_process_notification()
264 adev->ipc->ready = true; in avs_dsp_process_notification()
265 complete(&adev->fw_ready); in avs_dsp_process_notification()
285 struct avs_ipc *ipc = adev->ipc; in avs_dsp_process_response() local
288 * Response may either be solicited - a reply for a request that has in avs_dsp_process_response()
289 * been sent beforehand - or unsolicited (notification). in avs_dsp_process_response()
293 spin_lock_irq(&ipc->rx_lock); in avs_dsp_process_response()
295 ipc->rx_completed = true; in avs_dsp_process_response()
296 spin_unlock_irq(&ipc->rx_lock); in avs_dsp_process_response()
301 complete(&ipc->busy_completion); in avs_dsp_process_response()
307 struct avs_ipc *ipc = adev->ipc; in avs_dsp_irq_handler() local
327 complete(&ipc->done_completion); in avs_dsp_irq_handler()
379 static bool avs_ipc_is_busy(struct avs_ipc *ipc) in avs_ipc_is_busy() argument
381 struct avs_dev *adev = to_avs_dev(ipc->dev); in avs_ipc_is_busy()
388 static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout) in avs_ipc_wait_busy_completion() argument
394 ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout)); in avs_ipc_wait_busy_completion()
397 if (!ipc->ready) in avs_ipc_wait_busy_completion()
398 return -EPERM; in avs_ipc_wait_busy_completion()
401 if (!avs_ipc_is_busy(ipc)) in avs_ipc_wait_busy_completion()
402 return -ETIMEDOUT; in avs_ipc_wait_busy_completion()
405 * has been received - now wait until it's processed. in avs_ipc_wait_busy_completion()
407 wait_for_completion_killable(&ipc->busy_completion); in avs_ipc_wait_busy_completion()
410 /* Ongoing notification's bottom-half may cause early wakeup */ in avs_ipc_wait_busy_completion()
411 spin_lock(&ipc->rx_lock); in avs_ipc_wait_busy_completion()
412 if (!ipc->rx_completed) { in avs_ipc_wait_busy_completion()
415 repeats_left--; in avs_ipc_wait_busy_completion()
416 reinit_completion(&ipc->busy_completion); in avs_ipc_wait_busy_completion()
417 spin_unlock(&ipc->rx_lock); in avs_ipc_wait_busy_completion()
421 spin_unlock(&ipc->rx_lock); in avs_ipc_wait_busy_completion()
422 return -ETIMEDOUT; in avs_ipc_wait_busy_completion()
425 spin_unlock(&ipc->rx_lock); in avs_ipc_wait_busy_completion()
429 static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply) in avs_ipc_msg_init() argument
431 lockdep_assert_held(&ipc->rx_lock); in avs_ipc_msg_init()
433 ipc->rx.header = 0; in avs_ipc_msg_init()
434 ipc->rx.size = reply ? reply->size : 0; in avs_ipc_msg_init()
435 ipc->rx_completed = false; in avs_ipc_msg_init()
437 reinit_completion(&ipc->done_completion); in avs_ipc_msg_init()
438 reinit_completion(&ipc->busy_completion); in avs_ipc_msg_init()
445 tx->header |= SKL_ADSP_HIPCI_BUSY; in avs_dsp_send_tx()
451 if (tx->size) in avs_dsp_send_tx()
452 memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size); in avs_dsp_send_tx()
453 snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCIE, tx->header >> 32); in avs_dsp_send_tx()
454 snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCI, tx->header & UINT_MAX); in avs_dsp_send_tx()
460 struct avs_ipc *ipc = adev->ipc; in avs_dsp_do_send_msg() local
463 if (!ipc->ready) in avs_dsp_do_send_msg()
464 return -EPERM; in avs_dsp_do_send_msg()
466 mutex_lock(&ipc->msg_mutex); in avs_dsp_do_send_msg()
468 spin_lock(&ipc->rx_lock); in avs_dsp_do_send_msg()
469 avs_ipc_msg_init(ipc, reply); in avs_dsp_do_send_msg()
471 spin_unlock(&ipc->rx_lock); in avs_dsp_do_send_msg()
473 ret = avs_ipc_wait_busy_completion(ipc, timeout); in avs_dsp_do_send_msg()
475 if (ret == -ETIMEDOUT) { in avs_dsp_do_send_msg()
484 ret = ipc->rx.rsp.status; in avs_dsp_do_send_msg()
486 * If IPC channel is blocked e.g.: due to ongoing recovery, in avs_dsp_do_send_msg()
487 * -EPERM error code is expected and thus it's not an actual error. in avs_dsp_do_send_msg()
491 if (ret == -EPERM || ret == AVS_IPC_NOT_SUPPORTED) in avs_dsp_do_send_msg()
492 dev_dbg(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n", in avs_dsp_do_send_msg()
493 name, request->glb.primary, request->glb.ext.val, ret); in avs_dsp_do_send_msg()
495 dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n", in avs_dsp_do_send_msg()
496 name, request->glb.primary, request->glb.ext.val, ret); in avs_dsp_do_send_msg()
499 reply->header = ipc->rx.header; in avs_dsp_do_send_msg()
500 reply->size = ipc->rx.size; in avs_dsp_do_send_msg()
501 if (reply->data && ipc->rx.size) in avs_dsp_do_send_msg()
502 memcpy(reply->data, ipc->rx.data, reply->size); in avs_dsp_do_send_msg()
506 mutex_unlock(&ipc->msg_mutex); in avs_dsp_do_send_msg()
516 trace_avs_d0ix("wake", wake_d0i0, request->header); in avs_dsp_send_msg_sequence()
527 trace_avs_d0ix("schedule", schedule_d0ix, request->header); in avs_dsp_send_msg_sequence()
547 return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, name); in avs_dsp_send_msg()
560 return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, in avs_dsp_send_pm_msg()
567 struct avs_ipc *ipc = adev->ipc; in avs_dsp_do_send_rom_msg() local
570 mutex_lock(&ipc->msg_mutex); in avs_dsp_do_send_rom_msg()
572 spin_lock(&ipc->rx_lock); in avs_dsp_do_send_rom_msg()
573 avs_ipc_msg_init(ipc, NULL); in avs_dsp_do_send_rom_msg()
579 spin_unlock(&ipc->rx_lock); in avs_dsp_do_send_rom_msg()
584 ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout)); in avs_dsp_do_send_rom_msg()
585 ret = ret ? 0 : -ETIMEDOUT; in avs_dsp_do_send_rom_msg()
588 dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n", in avs_dsp_do_send_rom_msg()
589 name, request->glb.primary, request->glb.ext.val, ret); in avs_dsp_do_send_rom_msg()
591 mutex_unlock(&ipc->msg_mutex); in avs_dsp_do_send_rom_msg()
604 return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms, name); in avs_dsp_send_rom_msg()
613 * to have a functional SW <-> FW communication. in avs_dsp_interrupt_control()
623 int avs_ipc_init(struct avs_ipc *ipc, struct device *dev) in avs_ipc_init() argument
625 ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL); in avs_ipc_init()
626 if (!ipc->rx.data) in avs_ipc_init()
627 return -ENOMEM; in avs_ipc_init()
629 ipc->dev = dev; in avs_ipc_init()
630 ipc->ready = false; in avs_ipc_init()
631 ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS; in avs_ipc_init()
632 INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work); in avs_ipc_init()
633 INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work); in avs_ipc_init()
634 init_completion(&ipc->done_completion); in avs_ipc_init()
635 init_completion(&ipc->busy_completion); in avs_ipc_init()
636 spin_lock_init(&ipc->rx_lock); in avs_ipc_init()
637 mutex_init(&ipc->msg_mutex); in avs_ipc_init()
642 void avs_ipc_block(struct avs_ipc *ipc) in avs_ipc_block() argument
644 ipc->ready = false; in avs_ipc_block()
645 cancel_work_sync(&ipc->recovery_work); in avs_ipc_block()
646 cancel_delayed_work_sync(&ipc->d0ix_work); in avs_ipc_block()
647 ipc->in_d0ix = false; in avs_ipc_block()