1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8
9 #include <linux/string_choices.h>
10 #include <sound/hdaudio_ext.h>
11 #include "avs.h"
12 #include "registers.h"
13 #include "trace.h"
14
15 #define AVS_ADSPCS_INTERVAL_US 500
16 #define AVS_ADSPCS_TIMEOUT_US 50000
17 #define AVS_ADSPCS_DELAY_US 1000
18
avs_dsp_core_power(struct avs_dev * adev,u32 core_mask,bool power)19 int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power)
20 {
21 u32 value, mask, reg;
22 int ret;
23
24 value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS);
25 trace_avs_dsp_core_op(value, core_mask, "power", power);
26
27 mask = AVS_ADSPCS_SPA_MASK(core_mask);
28 value = power ? mask : 0;
29
30 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
31 /* Delay the polling to avoid false positives. */
32 usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US);
33
34 mask = AVS_ADSPCS_CPA_MASK(core_mask);
35 value = power ? mask : 0;
36
37 ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
38 reg, (reg & mask) == value,
39 AVS_ADSPCS_INTERVAL_US,
40 AVS_ADSPCS_TIMEOUT_US);
41 if (ret)
42 dev_err(adev->dev, "core_mask %d power %s failed: %d\n",
43 core_mask, str_on_off(power), ret);
44
45 return ret;
46 }
47
avs_dsp_core_reset(struct avs_dev * adev,u32 core_mask,bool reset)48 int avs_dsp_core_reset(struct avs_dev *adev, u32 core_mask, bool reset)
49 {
50 u32 value, mask, reg;
51 int ret;
52
53 value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS);
54 trace_avs_dsp_core_op(value, core_mask, "reset", reset);
55
56 mask = AVS_ADSPCS_CRST_MASK(core_mask);
57 value = reset ? mask : 0;
58
59 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
60
61 ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
62 reg, (reg & mask) == value,
63 AVS_ADSPCS_INTERVAL_US,
64 AVS_ADSPCS_TIMEOUT_US);
65 if (ret)
66 dev_err(adev->dev, "core_mask %d %s reset failed: %d\n",
67 core_mask, reset ? "enter" : "exit", ret);
68
69 return ret;
70 }
71
avs_dsp_core_stall(struct avs_dev * adev,u32 core_mask,bool stall)72 int avs_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stall)
73 {
74 u32 value, mask, reg;
75 int ret;
76
77 value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS);
78 trace_avs_dsp_core_op(value, core_mask, "stall", stall);
79
80 mask = AVS_ADSPCS_CSTALL_MASK(core_mask);
81 value = stall ? mask : 0;
82
83 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
84
85 ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
86 reg, (reg & mask) == value,
87 AVS_ADSPCS_INTERVAL_US,
88 AVS_ADSPCS_TIMEOUT_US);
89 if (ret) {
90 dev_err(adev->dev, "core_mask %d %sstall failed: %d\n",
91 core_mask, stall ? "" : "un", ret);
92 return ret;
93 }
94
95 /* Give HW time to propagate the change. */
96 usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US);
97 return 0;
98 }
99
avs_dsp_core_enable(struct avs_dev * adev,u32 core_mask)100 int avs_dsp_core_enable(struct avs_dev *adev, u32 core_mask)
101 {
102 int ret;
103
104 ret = avs_dsp_op(adev, power, core_mask, true);
105 if (ret)
106 return ret;
107
108 ret = avs_dsp_op(adev, reset, core_mask, false);
109 if (ret)
110 return ret;
111
112 return avs_dsp_op(adev, stall, core_mask, false);
113 }
114
avs_dsp_core_disable(struct avs_dev * adev,u32 core_mask)115 int avs_dsp_core_disable(struct avs_dev *adev, u32 core_mask)
116 {
117 /* No error checks to allow for complete DSP shutdown. */
118 avs_dsp_op(adev, stall, core_mask, true);
119 avs_dsp_op(adev, reset, core_mask, true);
120
121 return avs_dsp_op(adev, power, core_mask, false);
122 }
123
avs_dsp_enable(struct avs_dev * adev,u32 core_mask)124 static int avs_dsp_enable(struct avs_dev *adev, u32 core_mask)
125 {
126 u32 mask;
127 int ret;
128
129 ret = avs_dsp_core_enable(adev, core_mask);
130 if (ret < 0)
131 return ret;
132
133 mask = core_mask & ~AVS_MAIN_CORE_MASK;
134 if (!mask)
135 /*
136 * without main core, fw is dead anyway
137 * so setting D0 for it is futile.
138 */
139 return 0;
140
141 ret = avs_ipc_set_dx(adev, mask, true);
142 return AVS_IPC_RET(ret);
143 }
144
avs_dsp_disable(struct avs_dev * adev,u32 core_mask)145 static int avs_dsp_disable(struct avs_dev *adev, u32 core_mask)
146 {
147 int ret;
148
149 ret = avs_ipc_set_dx(adev, core_mask, false);
150 if (ret)
151 return AVS_IPC_RET(ret);
152
153 return avs_dsp_core_disable(adev, core_mask);
154 }
155
avs_dsp_get_core(struct avs_dev * adev,u32 core_id)156 static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id)
157 {
158 u32 mask;
159 int ret;
160
161 mask = BIT_MASK(core_id);
162 if (mask == AVS_MAIN_CORE_MASK)
163 /* nothing to do for main core */
164 return 0;
165 if (core_id >= adev->hw_cfg.dsp_cores) {
166 ret = -EINVAL;
167 goto err;
168 }
169
170 adev->core_refs[core_id]++;
171 if (adev->core_refs[core_id] == 1) {
172 /*
173 * No cores other than main-core can be running for DSP
174 * to achieve d0ix. Conscious SET_D0IX IPC failure is permitted,
175 * simply d0ix power state will no longer be attempted.
176 */
177 ret = avs_dsp_disable_d0ix(adev);
178 if (ret && ret != -AVS_EIPC)
179 goto err_disable_d0ix;
180
181 ret = avs_dsp_enable(adev, mask);
182 if (ret)
183 goto err_enable_dsp;
184 }
185
186 return 0;
187
188 err_enable_dsp:
189 avs_dsp_enable_d0ix(adev);
190 err_disable_d0ix:
191 adev->core_refs[core_id]--;
192 err:
193 dev_err(adev->dev, "get core %d failed: %d\n", core_id, ret);
194 return ret;
195 }
196
avs_dsp_put_core(struct avs_dev * adev,u32 core_id)197 static int avs_dsp_put_core(struct avs_dev *adev, u32 core_id)
198 {
199 u32 mask;
200 int ret;
201
202 mask = BIT_MASK(core_id);
203 if (mask == AVS_MAIN_CORE_MASK)
204 /* nothing to do for main core */
205 return 0;
206 if (core_id >= adev->hw_cfg.dsp_cores) {
207 ret = -EINVAL;
208 goto err;
209 }
210
211 adev->core_refs[core_id]--;
212 if (!adev->core_refs[core_id]) {
213 ret = avs_dsp_disable(adev, mask);
214 if (ret)
215 goto err;
216
217 /* Match disable_d0ix in avs_dsp_get_core(). */
218 avs_dsp_enable_d0ix(adev);
219 }
220
221 return 0;
222 err:
223 dev_err(adev->dev, "put core %d failed: %d\n", core_id, ret);
224 return ret;
225 }
226
avs_dsp_init_module(struct avs_dev * adev,u16 module_id,u8 ppl_instance_id,u8 core_id,u8 domain,void * param,u32 param_size,u8 * instance_id)227 int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id,
228 u8 core_id, u8 domain, void *param, u32 param_size,
229 u8 *instance_id)
230 {
231 struct avs_module_entry mentry;
232 bool was_loaded = false;
233 int ret, id;
234
235 id = avs_module_id_alloc(adev, module_id);
236 if (id < 0)
237 return id;
238
239 ret = avs_get_module_id_entry(adev, module_id, &mentry);
240 if (ret)
241 goto err_mod_entry;
242
243 ret = avs_dsp_get_core(adev, core_id);
244 if (ret)
245 goto err_mod_entry;
246
247 /* Load code into memory if this is the first instance. */
248 if (!id && !avs_module_entry_is_loaded(&mentry)) {
249 ret = avs_dsp_op(adev, transfer_mods, true, &mentry, 1);
250 if (ret) {
251 dev_err(adev->dev, "load modules failed: %d\n", ret);
252 goto err_mod_entry;
253 }
254 was_loaded = true;
255 }
256
257 ret = avs_ipc_init_instance(adev, module_id, id, ppl_instance_id,
258 core_id, domain, param, param_size);
259 if (ret) {
260 ret = AVS_IPC_RET(ret);
261 goto err_ipc;
262 }
263
264 *instance_id = id;
265 return 0;
266
267 err_ipc:
268 if (was_loaded)
269 avs_dsp_op(adev, transfer_mods, false, &mentry, 1);
270 avs_dsp_put_core(adev, core_id);
271 err_mod_entry:
272 avs_module_id_free(adev, module_id, id);
273 return ret;
274 }
275
avs_dsp_delete_module(struct avs_dev * adev,u16 module_id,u8 instance_id,u8 ppl_instance_id,u8 core_id)276 void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u8 instance_id,
277 u8 ppl_instance_id, u8 core_id)
278 {
279 struct avs_module_entry mentry;
280 int ret;
281
282 /* Modules not owned by any pipeline need to be freed explicitly. */
283 if (ppl_instance_id == INVALID_PIPELINE_ID)
284 avs_ipc_delete_instance(adev, module_id, instance_id);
285
286 avs_module_id_free(adev, module_id, instance_id);
287
288 ret = avs_get_module_id_entry(adev, module_id, &mentry);
289 /* Unload occupied memory if this was the last instance. */
290 if (!ret && mentry.type.load_type == AVS_MODULE_LOAD_TYPE_LOADABLE) {
291 if (avs_is_module_ida_empty(adev, module_id)) {
292 ret = avs_dsp_op(adev, transfer_mods, false, &mentry, 1);
293 if (ret)
294 dev_err(adev->dev, "unload modules failed: %d\n", ret);
295 }
296 }
297
298 avs_dsp_put_core(adev, core_id);
299 }
300
avs_dsp_create_pipeline(struct avs_dev * adev,u16 req_size,u8 priority,bool lp,u16 attributes,u8 * instance_id)301 int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority,
302 bool lp, u16 attributes, u8 *instance_id)
303 {
304 struct avs_fw_cfg *fw_cfg = &adev->fw_cfg;
305 int ret, id;
306
307 id = ida_alloc_max(&adev->ppl_ida, fw_cfg->max_ppl_count - 1, GFP_KERNEL);
308 if (id < 0)
309 return id;
310
311 ret = avs_ipc_create_pipeline(adev, req_size, priority, id, lp, attributes);
312 if (ret) {
313 ida_free(&adev->ppl_ida, id);
314 return AVS_IPC_RET(ret);
315 }
316
317 *instance_id = id;
318 return 0;
319 }
320
avs_dsp_delete_pipeline(struct avs_dev * adev,u8 instance_id)321 int avs_dsp_delete_pipeline(struct avs_dev *adev, u8 instance_id)
322 {
323 int ret;
324
325 ret = avs_ipc_delete_pipeline(adev, instance_id);
326 if (ret)
327 ret = AVS_IPC_RET(ret);
328
329 ida_free(&adev->ppl_ida, instance_id);
330 return ret;
331 }
332