1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8
9 #include <linux/devcoredump.h>
10 #include <linux/slab.h>
11 #include <sound/hdaudio_ext.h>
12 #include "avs.h"
13 #include "cldma.h"
14 #include "debug.h"
15 #include "messages.h"
16 #include "registers.h"
17
avs_skl_ipc_interrupt(struct avs_dev * adev)18 void avs_skl_ipc_interrupt(struct avs_dev *adev)
19 {
20 const struct avs_spec *spec = adev->spec;
21 u32 hipc_ack, hipc_rsp;
22
23 snd_hdac_adsp_updatel(adev, spec->hipc->ctl_offset,
24 AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY, 0);
25
26 hipc_ack = snd_hdac_adsp_readl(adev, spec->hipc->ack_offset);
27 hipc_rsp = snd_hdac_adsp_readl(adev, spec->hipc->rsp_offset);
28
29 /* DSP acked host's request. */
30 if (hipc_ack & spec->hipc->ack_done_mask) {
31 complete(&adev->ipc->done_completion);
32
33 /* Tell DSP it has our attention. */
34 snd_hdac_adsp_updatel(adev, spec->hipc->ack_offset, spec->hipc->ack_done_mask,
35 spec->hipc->ack_done_mask);
36 }
37
38 /* DSP sent new response to process */
39 if (hipc_rsp & spec->hipc->rsp_busy_mask) {
40 union avs_reply_msg msg;
41
42 msg.primary = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
43 msg.ext.val = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCTE);
44
45 avs_dsp_process_response(adev, msg.val);
46
47 /* Tell DSP we accepted its message. */
48 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCT, SKL_ADSP_HIPCT_BUSY,
49 SKL_ADSP_HIPCT_BUSY);
50 }
51
52 snd_hdac_adsp_updatel(adev, spec->hipc->ctl_offset,
53 AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY,
54 AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY);
55 }
56
avs_skl_dsp_interrupt(struct avs_dev * adev)57 static irqreturn_t avs_skl_dsp_interrupt(struct avs_dev *adev)
58 {
59 u32 adspis = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPIS);
60 irqreturn_t ret = IRQ_NONE;
61
62 if (adspis == UINT_MAX)
63 return ret;
64
65 if (adspis & AVS_ADSP_ADSPIS_CLDMA) {
66 hda_cldma_interrupt(&code_loader);
67 ret = IRQ_HANDLED;
68 }
69
70 if (adspis & AVS_ADSP_ADSPIS_IPC) {
71 avs_skl_ipc_interrupt(adev);
72 ret = IRQ_HANDLED;
73 }
74
75 return ret;
76 }
77
78 static int __maybe_unused
avs_skl_enable_logs(struct avs_dev * adev,enum avs_log_enable enable,u32 aging_period,u32 fifo_full_period,unsigned long resource_mask,u32 * priorities)79 avs_skl_enable_logs(struct avs_dev *adev, enum avs_log_enable enable, u32 aging_period,
80 u32 fifo_full_period, unsigned long resource_mask, u32 *priorities)
81 {
82 struct avs_skl_log_state_info *info;
83 u32 size, num_cores = adev->hw_cfg.dsp_cores;
84 int ret, i;
85
86 if (fls_long(resource_mask) > num_cores)
87 return -EINVAL;
88 size = struct_size(info, logs_core, num_cores);
89 info = kzalloc(size, GFP_KERNEL);
90 if (!info)
91 return -ENOMEM;
92
93 info->core_mask = resource_mask;
94 if (enable)
95 for_each_set_bit(i, &resource_mask, num_cores) {
96 info->logs_core[i].enable = enable;
97 info->logs_core[i].min_priority = *priorities++;
98 }
99 else
100 for_each_set_bit(i, &resource_mask, num_cores)
101 info->logs_core[i].enable = enable;
102
103 ret = avs_ipc_set_enable_logs(adev, (u8 *)info, size);
104 kfree(info);
105 if (ret)
106 return AVS_IPC_RET(ret);
107
108 return 0;
109 }
110
avs_skl_log_buffer_offset(struct avs_dev * adev,u32 core)111 int avs_skl_log_buffer_offset(struct avs_dev *adev, u32 core)
112 {
113 return core * avs_log_buffer_size(adev);
114 }
115
116 /* fw DbgLogWp registers */
117 #define FW_REGS_DBG_LOG_WP(core) (0x30 + 0x4 * core)
118
avs_skl_log_buffer_status(struct avs_dev * adev,union avs_notify_msg * msg)119 static int avs_skl_log_buffer_status(struct avs_dev *adev, union avs_notify_msg *msg)
120 {
121 void __iomem *buf;
122 u16 size, write, offset;
123
124 if (!avs_logging_fw(adev))
125 return 0;
126
127 size = avs_log_buffer_size(adev) / 2;
128 write = readl(avs_sram_addr(adev, AVS_FW_REGS_WINDOW) + FW_REGS_DBG_LOG_WP(msg->log.core));
129 /* determine buffer half */
130 offset = (write < size) ? size : 0;
131
132 /* Address is guaranteed to exist in SRAM2. */
133 buf = avs_log_buffer_addr(adev, msg->log.core) + offset;
134 avs_dump_fw_log_wakeup(adev, buf, size);
135
136 return 0;
137 }
138
avs_skl_coredump(struct avs_dev * adev,union avs_notify_msg * msg)139 static int avs_skl_coredump(struct avs_dev *adev, union avs_notify_msg *msg)
140 {
141 u8 *dump;
142
143 dump = vzalloc(AVS_FW_REGS_SIZE);
144 if (!dump)
145 return -ENOMEM;
146
147 memcpy_fromio(dump, avs_sram_addr(adev, AVS_FW_REGS_WINDOW), AVS_FW_REGS_SIZE);
148 dev_coredumpv(adev->dev, dump, AVS_FW_REGS_SIZE, GFP_KERNEL);
149
150 return 0;
151 }
152
avs_skl_d0ix_toggle(struct avs_dev * adev,struct avs_ipc_msg * tx,bool wake)153 static bool avs_skl_d0ix_toggle(struct avs_dev *adev, struct avs_ipc_msg *tx, bool wake)
154 {
155 /* unsupported on cAVS 1.5 hw */
156 return false;
157 }
158
avs_skl_set_d0ix(struct avs_dev * adev,bool enable)159 static int avs_skl_set_d0ix(struct avs_dev *adev, bool enable)
160 {
161 /* unsupported on cAVS 1.5 hw */
162 return 0;
163 }
164
165 const struct avs_dsp_ops avs_skl_dsp_ops = {
166 .power = avs_dsp_core_power,
167 .reset = avs_dsp_core_reset,
168 .stall = avs_dsp_core_stall,
169 .dsp_interrupt = avs_skl_dsp_interrupt,
170 .int_control = avs_dsp_interrupt_control,
171 .load_basefw = avs_cldma_load_basefw,
172 .load_lib = avs_cldma_load_library,
173 .transfer_mods = avs_cldma_transfer_modules,
174 .log_buffer_offset = avs_skl_log_buffer_offset,
175 .log_buffer_status = avs_skl_log_buffer_status,
176 .coredump = avs_skl_coredump,
177 .d0ix_toggle = avs_skl_d0ix_toggle,
178 .set_d0ix = avs_skl_set_d0ix,
179 AVS_SET_ENABLE_LOGS_OP(skl)
180 };
181