1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021 Intel Corporation
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 //          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8 
9 #include <linux/acpi.h>
10 #include <acpi/nhlt.h>
11 #include <sound/pcm_params.h>
12 #include <sound/soc.h>
13 #include "avs.h"
14 #include "control.h"
15 #include "path.h"
16 #include "topology.h"
17 
18 /* Must be called with adev->comp_list_mutex held. */
19 static struct avs_tplg *
avs_path_find_tplg(struct avs_dev * adev,const char * name)20 avs_path_find_tplg(struct avs_dev *adev, const char *name)
21 {
22 	struct avs_soc_component *acomp;
23 
24 	list_for_each_entry(acomp, &adev->comp_list, node)
25 		if (!strcmp(acomp->tplg->name, name))
26 			return acomp->tplg;
27 	return NULL;
28 }
29 
30 static struct avs_path_module *
avs_path_find_module(struct avs_path_pipeline * ppl,u32 template_id)31 avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id)
32 {
33 	struct avs_path_module *mod;
34 
35 	list_for_each_entry(mod, &ppl->mod_list, node)
36 		if (mod->template->id == template_id)
37 			return mod;
38 	return NULL;
39 }
40 
41 static struct avs_path_pipeline *
avs_path_find_pipeline(struct avs_path * path,u32 template_id)42 avs_path_find_pipeline(struct avs_path *path, u32 template_id)
43 {
44 	struct avs_path_pipeline *ppl;
45 
46 	list_for_each_entry(ppl, &path->ppl_list, node)
47 		if (ppl->template->id == template_id)
48 			return ppl;
49 	return NULL;
50 }
51 
52 static struct avs_path *
avs_path_find_path(struct avs_dev * adev,const char * name,u32 template_id)53 avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id)
54 {
55 	struct avs_tplg_path_template *pos, *template = NULL;
56 	struct avs_tplg *tplg;
57 	struct avs_path *path;
58 
59 	tplg = avs_path_find_tplg(adev, name);
60 	if (!tplg)
61 		return NULL;
62 
63 	list_for_each_entry(pos, &tplg->path_tmpl_list, node) {
64 		if (pos->id == template_id) {
65 			template = pos;
66 			break;
67 		}
68 	}
69 	if (!template)
70 		return NULL;
71 
72 	spin_lock(&adev->path_list_lock);
73 	/* Only one variant of given path template may be instantiated at a time. */
74 	list_for_each_entry(path, &adev->path_list, node) {
75 		if (path->template->owner == template) {
76 			spin_unlock(&adev->path_list_lock);
77 			return path;
78 		}
79 	}
80 
81 	spin_unlock(&adev->path_list_lock);
82 	return NULL;
83 }
84 
avs_test_hw_params(struct snd_pcm_hw_params * params,struct avs_audio_format * fmt)85 static bool avs_test_hw_params(struct snd_pcm_hw_params *params,
86 			       struct avs_audio_format *fmt)
87 {
88 	return (params_rate(params) == fmt->sampling_freq &&
89 		params_channels(params) == fmt->num_channels &&
90 		params_physical_width(params) == fmt->bit_depth &&
91 		snd_pcm_hw_params_bits(params) == fmt->valid_bit_depth);
92 }
93 
94 static struct avs_tplg_path *
avs_path_find_variant(struct avs_dev * adev,struct avs_tplg_path_template * template,struct snd_pcm_hw_params * fe_params,struct snd_pcm_hw_params * be_params)95 avs_path_find_variant(struct avs_dev *adev,
96 		      struct avs_tplg_path_template *template,
97 		      struct snd_pcm_hw_params *fe_params,
98 		      struct snd_pcm_hw_params *be_params)
99 {
100 	struct avs_tplg_path *variant;
101 
102 	list_for_each_entry(variant, &template->path_list, node) {
103 		dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n",
104 			variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels,
105 			variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth);
106 		dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n",
107 			variant->be_fmt->sampling_freq, variant->be_fmt->num_channels,
108 			variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth);
109 
110 		if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) &&
111 		    variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt))
112 			return variant;
113 	}
114 
115 	return NULL;
116 }
117 
118 static struct acpi_nhlt_config *
119 avs_nhlt_config_or_default(struct avs_dev *adev, struct avs_tplg_module *t);
120 
avs_path_set_constraint(struct avs_dev * adev,struct avs_tplg_path_template * template,struct snd_pcm_hw_constraint_list * rate_list,struct snd_pcm_hw_constraint_list * channels_list,struct snd_pcm_hw_constraint_list * sample_bits_list)121 int avs_path_set_constraint(struct avs_dev *adev, struct avs_tplg_path_template *template,
122 			    struct snd_pcm_hw_constraint_list *rate_list,
123 			    struct snd_pcm_hw_constraint_list *channels_list,
124 			    struct snd_pcm_hw_constraint_list *sample_bits_list)
125 {
126 	struct avs_tplg_path *path_template;
127 	unsigned int *rlist, *clist, *slist;
128 	size_t i;
129 
130 	i = 0;
131 	list_for_each_entry(path_template, &template->path_list, node)
132 		i++;
133 
134 	rlist = kcalloc(i, sizeof(rlist), GFP_KERNEL);
135 	clist = kcalloc(i, sizeof(clist), GFP_KERNEL);
136 	slist = kcalloc(i, sizeof(slist), GFP_KERNEL);
137 
138 	i = 0;
139 	list_for_each_entry(path_template, &template->path_list, node) {
140 		struct avs_tplg_pipeline *pipeline_template;
141 
142 		list_for_each_entry(pipeline_template, &path_template->ppl_list, node) {
143 			struct avs_tplg_module *module_template;
144 
145 			list_for_each_entry(module_template, &pipeline_template->mod_list, node) {
146 				const guid_t *type = &module_template->cfg_ext->type;
147 				struct acpi_nhlt_config *blob;
148 
149 				if (!guid_equal(type, &AVS_COPIER_MOD_UUID) &&
150 				    !guid_equal(type, &AVS_WOVHOSTM_MOD_UUID))
151 					continue;
152 
153 				switch (module_template->cfg_ext->copier.dma_type) {
154 				case AVS_DMA_DMIC_LINK_INPUT:
155 				case AVS_DMA_I2S_LINK_OUTPUT:
156 				case AVS_DMA_I2S_LINK_INPUT:
157 					break;
158 				default:
159 					continue;
160 				}
161 
162 				blob = avs_nhlt_config_or_default(adev, module_template);
163 				if (IS_ERR(blob))
164 					continue;
165 
166 				rlist[i] = path_template->fe_fmt->sampling_freq;
167 				clist[i] = path_template->fe_fmt->num_channels;
168 				slist[i] = path_template->fe_fmt->bit_depth;
169 				i++;
170 			}
171 		}
172 	}
173 
174 	if (i) {
175 		rate_list->count = i;
176 		rate_list->list = rlist;
177 		channels_list->count = i;
178 		channels_list->list = clist;
179 		sample_bits_list->count = i;
180 		sample_bits_list->list = slist;
181 	} else {
182 		kfree(rlist);
183 		kfree(clist);
184 		kfree(slist);
185 	}
186 
187 	return i;
188 }
189 
avs_init_node_id(union avs_connector_node_id * node_id,struct avs_tplg_modcfg_ext * te,u32 dma_id)190 static void avs_init_node_id(union avs_connector_node_id *node_id,
191 			     struct avs_tplg_modcfg_ext *te, u32 dma_id)
192 {
193 	node_id->val = 0;
194 	node_id->dma_type = te->copier.dma_type;
195 
196 	switch (node_id->dma_type) {
197 	case AVS_DMA_DMIC_LINK_INPUT:
198 	case AVS_DMA_I2S_LINK_OUTPUT:
199 	case AVS_DMA_I2S_LINK_INPUT:
200 		/* Gateway's virtual index is statically assigned in the topology. */
201 		node_id->vindex = te->copier.vindex.val;
202 		break;
203 
204 	case AVS_DMA_HDA_HOST_OUTPUT:
205 	case AVS_DMA_HDA_HOST_INPUT:
206 		/* Gateway's virtual index is dynamically assigned with DMA ID */
207 		node_id->vindex = dma_id;
208 		break;
209 
210 	case AVS_DMA_HDA_LINK_OUTPUT:
211 	case AVS_DMA_HDA_LINK_INPUT:
212 		node_id->vindex = te->copier.vindex.val | dma_id;
213 		break;
214 
215 	default:
216 		*node_id = INVALID_NODE_ID;
217 		break;
218 	}
219 }
220 
221 /* Every BLOB contains at least gateway attributes. */
222 static struct acpi_nhlt_config *default_blob = (struct acpi_nhlt_config *)&(u32[2]) {4};
223 
224 static struct acpi_nhlt_config *
avs_nhlt_config_or_default(struct avs_dev * adev,struct avs_tplg_module * t)225 avs_nhlt_config_or_default(struct avs_dev *adev, struct avs_tplg_module *t)
226 {
227 	struct acpi_nhlt_format_config *fmtcfg;
228 	struct avs_tplg_modcfg_ext *te;
229 	struct avs_audio_format *fmt;
230 	int link_type, dev_type;
231 	int bus_id, dir;
232 
233 	te = t->cfg_ext;
234 
235 	switch (te->copier.dma_type) {
236 	case AVS_DMA_I2S_LINK_OUTPUT:
237 		link_type = ACPI_NHLT_LINKTYPE_SSP;
238 		dev_type = ACPI_NHLT_DEVICETYPE_CODEC;
239 		bus_id = te->copier.vindex.i2s.instance;
240 		dir = SNDRV_PCM_STREAM_PLAYBACK;
241 		fmt = te->copier.out_fmt;
242 		break;
243 
244 	case AVS_DMA_I2S_LINK_INPUT:
245 		link_type = ACPI_NHLT_LINKTYPE_SSP;
246 		dev_type = ACPI_NHLT_DEVICETYPE_CODEC;
247 		bus_id = te->copier.vindex.i2s.instance;
248 		dir = SNDRV_PCM_STREAM_CAPTURE;
249 		fmt = t->in_fmt;
250 		break;
251 
252 	case AVS_DMA_DMIC_LINK_INPUT:
253 		link_type = ACPI_NHLT_LINKTYPE_PDM;
254 		dev_type = -1; /* ignored */
255 		bus_id = 0;
256 		dir = SNDRV_PCM_STREAM_CAPTURE;
257 		fmt = t->in_fmt;
258 		break;
259 
260 	default:
261 		return default_blob;
262 	}
263 
264 	/* Override format selection if necessary. */
265 	if (te->copier.blob_fmt)
266 		fmt = te->copier.blob_fmt;
267 
268 	fmtcfg = acpi_nhlt_find_fmtcfg(link_type, dev_type, dir, bus_id,
269 				       fmt->num_channels, fmt->sampling_freq, fmt->valid_bit_depth,
270 				       fmt->bit_depth);
271 	if (!fmtcfg) {
272 		dev_warn(adev->dev, "Endpoint format configuration not found.\n");
273 		return ERR_PTR(-ENOENT);
274 	}
275 
276 	if (fmtcfg->config.capabilities_size < default_blob->capabilities_size)
277 		return ERR_PTR(-ETOOSMALL);
278 	/* The firmware expects the payload to be DWORD-aligned. */
279 	if (fmtcfg->config.capabilities_size % sizeof(u32))
280 		return ERR_PTR(-EINVAL);
281 
282 	return &fmtcfg->config;
283 }
284 
avs_fill_gtw_config(struct avs_dev * adev,struct avs_copier_gtw_cfg * gtw,struct avs_tplg_module * t,size_t * cfg_size)285 static int avs_fill_gtw_config(struct avs_dev *adev, struct avs_copier_gtw_cfg *gtw,
286 			       struct avs_tplg_module *t, size_t *cfg_size)
287 {
288 	struct acpi_nhlt_config *blob;
289 	size_t gtw_size;
290 
291 	blob = avs_nhlt_config_or_default(adev, t);
292 	if (IS_ERR(blob))
293 		return PTR_ERR(blob);
294 
295 	gtw_size = blob->capabilities_size;
296 	if (*cfg_size + gtw_size > AVS_MAILBOX_SIZE)
297 		return -E2BIG;
298 
299 	gtw->config_length = gtw_size / sizeof(u32);
300 	memcpy(gtw->config.blob, blob->capabilities, blob->capabilities_size);
301 	*cfg_size += gtw_size;
302 
303 	return 0;
304 }
305 
avs_copier_create(struct avs_dev * adev,struct avs_path_module * mod)306 static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod)
307 {
308 	struct avs_tplg_module *t = mod->template;
309 	struct avs_tplg_modcfg_ext *te;
310 	struct avs_copier_cfg *cfg;
311 	size_t cfg_size;
312 	u32 dma_id;
313 	int ret;
314 
315 	te = t->cfg_ext;
316 	cfg = adev->modcfg_buf;
317 	dma_id = mod->owner->owner->dma_id;
318 	cfg_size = offsetof(struct avs_copier_cfg, gtw_cfg.config);
319 
320 	ret = avs_fill_gtw_config(adev, &cfg->gtw_cfg, t, &cfg_size);
321 	if (ret)
322 		return ret;
323 
324 	cfg->base.cpc = t->cfg_base->cpc;
325 	cfg->base.ibs = t->cfg_base->ibs;
326 	cfg->base.obs = t->cfg_base->obs;
327 	cfg->base.is_pages = t->cfg_base->is_pages;
328 	cfg->base.audio_fmt = *t->in_fmt;
329 	cfg->out_fmt = *te->copier.out_fmt;
330 	cfg->feature_mask = te->copier.feature_mask;
331 	avs_init_node_id(&cfg->gtw_cfg.node_id, te, dma_id);
332 	cfg->gtw_cfg.dma_buffer_size = te->copier.dma_buffer_size;
333 	mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
334 
335 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
336 				  t->domain, cfg, cfg_size, &mod->instance_id);
337 	return ret;
338 }
339 
avs_whm_create(struct avs_dev * adev,struct avs_path_module * mod)340 static int avs_whm_create(struct avs_dev *adev, struct avs_path_module *mod)
341 {
342 	struct avs_tplg_module *t = mod->template;
343 	struct avs_tplg_modcfg_ext *te;
344 	struct avs_whm_cfg *cfg;
345 	size_t cfg_size;
346 	u32 dma_id;
347 	int ret;
348 
349 	te = t->cfg_ext;
350 	cfg = adev->modcfg_buf;
351 	dma_id = mod->owner->owner->dma_id;
352 	cfg_size = offsetof(struct avs_whm_cfg, gtw_cfg.config);
353 
354 	ret = avs_fill_gtw_config(adev, &cfg->gtw_cfg, t, &cfg_size);
355 	if (ret)
356 		return ret;
357 
358 	cfg->base.cpc = t->cfg_base->cpc;
359 	cfg->base.ibs = t->cfg_base->ibs;
360 	cfg->base.obs = t->cfg_base->obs;
361 	cfg->base.is_pages = t->cfg_base->is_pages;
362 	cfg->base.audio_fmt = *t->in_fmt;
363 	cfg->ref_fmt = *te->whm.ref_fmt;
364 	cfg->out_fmt = *te->whm.out_fmt;
365 	cfg->wake_tick_period = te->whm.wake_tick_period;
366 	avs_init_node_id(&cfg->gtw_cfg.node_id, te, dma_id);
367 	cfg->gtw_cfg.dma_buffer_size = te->whm.dma_buffer_size;
368 	mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
369 
370 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
371 				  t->domain, cfg, cfg_size, &mod->instance_id);
372 	return ret;
373 }
374 
avs_get_module_control(struct avs_path_module * mod,const char * name)375 static struct soc_mixer_control *avs_get_module_control(struct avs_path_module *mod,
376 							const char *name)
377 {
378 	struct avs_tplg_module *t = mod->template;
379 	struct avs_tplg_path_template *path_tmpl;
380 	struct snd_soc_dapm_widget *w;
381 	int i;
382 
383 	path_tmpl = t->owner->owner->owner;
384 	w = path_tmpl->w;
385 
386 	for (i = 0; i < w->num_kcontrols; i++) {
387 		struct avs_control_data *ctl_data;
388 		struct soc_mixer_control *mc;
389 
390 		mc = (struct soc_mixer_control *)w->kcontrols[i]->private_value;
391 		ctl_data = (struct avs_control_data *)mc->dobj.private;
392 		if (ctl_data->id == t->ctl_id && strstr(w->kcontrols[i]->id.name, name))
393 			return mc;
394 	}
395 
396 	return NULL;
397 }
398 
avs_peakvol_set_volume(struct avs_dev * adev,struct avs_path_module * mod,struct soc_mixer_control * mc,long * input)399 int avs_peakvol_set_volume(struct avs_dev *adev, struct avs_path_module *mod,
400 			   struct soc_mixer_control *mc, long *input)
401 {
402 	struct avs_volume_cfg vols[SND_SOC_TPLG_MAX_CHAN] = {{0}};
403 	struct avs_control_data *ctl_data;
404 	struct avs_tplg_module *t;
405 	int ret, i;
406 
407 	ctl_data = mc->dobj.private;
408 	t = mod->template;
409 	if (!input)
410 		input = ctl_data->values;
411 
412 	if (mc->num_channels) {
413 		for (i = 0; i < mc->num_channels; i++) {
414 			vols[i].channel_id = i;
415 			vols[i].target_volume = input[i];
416 			vols[i].curve_type = t->cfg_ext->peakvol.curve_type;
417 			vols[i].curve_duration = t->cfg_ext->peakvol.curve_duration;
418 		}
419 
420 		ret = avs_ipc_peakvol_set_volumes(adev, mod->module_id, mod->instance_id, vols,
421 						  mc->num_channels);
422 		return AVS_IPC_RET(ret);
423 	}
424 
425 	/* Target all channels if no individual selected. */
426 	vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
427 	vols[0].target_volume = input[0];
428 	vols[0].curve_type = t->cfg_ext->peakvol.curve_type;
429 	vols[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
430 
431 	ret = avs_ipc_peakvol_set_volume(adev, mod->module_id, mod->instance_id, &vols[0]);
432 	return AVS_IPC_RET(ret);
433 }
434 
avs_peakvol_set_mute(struct avs_dev * adev,struct avs_path_module * mod,struct soc_mixer_control * mc,long * input)435 int avs_peakvol_set_mute(struct avs_dev *adev, struct avs_path_module *mod,
436 			 struct soc_mixer_control *mc, long *input)
437 {
438 	struct avs_mute_cfg mutes[SND_SOC_TPLG_MAX_CHAN] = {{0}};
439 	struct avs_control_data *ctl_data;
440 	struct avs_tplg_module *t;
441 	int ret, i;
442 
443 	ctl_data = mc->dobj.private;
444 	t = mod->template;
445 	if (!input)
446 		input = ctl_data->values;
447 
448 	if (mc->num_channels) {
449 		for (i = 0; i < mc->num_channels; i++) {
450 			mutes[i].channel_id = i;
451 			mutes[i].mute = !input[i];
452 			mutes[i].curve_type = t->cfg_ext->peakvol.curve_type;
453 			mutes[i].curve_duration = t->cfg_ext->peakvol.curve_duration;
454 		}
455 
456 		ret = avs_ipc_peakvol_set_mutes(adev, mod->module_id, mod->instance_id, mutes,
457 						mc->num_channels);
458 		return AVS_IPC_RET(ret);
459 	}
460 
461 	/* Target all channels if no individual selected. */
462 	mutes[0].channel_id = AVS_ALL_CHANNELS_MASK;
463 	mutes[0].mute = !input[0];
464 	mutes[0].curve_type = t->cfg_ext->peakvol.curve_type;
465 	mutes[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
466 
467 	ret = avs_ipc_peakvol_set_mute(adev, mod->module_id, mod->instance_id, &mutes[0]);
468 	return AVS_IPC_RET(ret);
469 }
470 
avs_peakvol_create(struct avs_dev * adev,struct avs_path_module * mod)471 static int avs_peakvol_create(struct avs_dev *adev, struct avs_path_module *mod)
472 {
473 	struct avs_tplg_module *t = mod->template;
474 	struct soc_mixer_control *mc;
475 	struct avs_peakvol_cfg *cfg;
476 	size_t cfg_size;
477 	int ret;
478 
479 	cfg_size = struct_size(cfg, vols, 1);
480 	if (cfg_size > AVS_MAILBOX_SIZE)
481 		return -EINVAL;
482 
483 	cfg = adev->modcfg_buf;
484 	memset(cfg, 0, cfg_size);
485 	cfg->base.cpc = t->cfg_base->cpc;
486 	cfg->base.ibs = t->cfg_base->ibs;
487 	cfg->base.obs = t->cfg_base->obs;
488 	cfg->base.is_pages = t->cfg_base->is_pages;
489 	cfg->base.audio_fmt = *t->in_fmt;
490 	cfg->vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
491 	cfg->vols[0].target_volume = S32_MAX;
492 	cfg->vols[0].curve_type = t->cfg_ext->peakvol.curve_type;
493 	cfg->vols[0].curve_duration = t->cfg_ext->peakvol.curve_duration;
494 
495 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
496 				  t->domain, cfg, cfg_size, &mod->instance_id);
497 	if (ret)
498 		return ret;
499 
500 	/* Now configure both VOLUME and MUTE parameters. */
501 	mc = avs_get_module_control(mod, "Volume");
502 	if (mc) {
503 		ret = avs_peakvol_set_volume(adev, mod, mc, NULL);
504 		if (ret)
505 			return ret;
506 	}
507 
508 	mc = avs_get_module_control(mod, "Switch");
509 	if (mc)
510 		return avs_peakvol_set_mute(adev, mod, mc, NULL);
511 	return 0;
512 }
513 
avs_updown_mix_create(struct avs_dev * adev,struct avs_path_module * mod)514 static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod)
515 {
516 	struct avs_tplg_module *t = mod->template;
517 	struct avs_updown_mixer_cfg cfg;
518 	int i;
519 
520 	cfg.base.cpc = t->cfg_base->cpc;
521 	cfg.base.ibs = t->cfg_base->ibs;
522 	cfg.base.obs = t->cfg_base->obs;
523 	cfg.base.is_pages = t->cfg_base->is_pages;
524 	cfg.base.audio_fmt = *t->in_fmt;
525 	cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config;
526 	cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select;
527 	for (i = 0; i < AVS_CHANNELS_MAX; i++)
528 		cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i];
529 	cfg.channel_map = t->cfg_ext->updown_mix.channel_map;
530 
531 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
532 				   t->core_id, t->domain, &cfg, sizeof(cfg),
533 				   &mod->instance_id);
534 }
535 
avs_src_create(struct avs_dev * adev,struct avs_path_module * mod)536 static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod)
537 {
538 	struct avs_tplg_module *t = mod->template;
539 	struct avs_src_cfg cfg;
540 
541 	cfg.base.cpc = t->cfg_base->cpc;
542 	cfg.base.ibs = t->cfg_base->ibs;
543 	cfg.base.obs = t->cfg_base->obs;
544 	cfg.base.is_pages = t->cfg_base->is_pages;
545 	cfg.base.audio_fmt = *t->in_fmt;
546 	cfg.out_freq = t->cfg_ext->src.out_freq;
547 
548 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
549 				   t->core_id, t->domain, &cfg, sizeof(cfg),
550 				   &mod->instance_id);
551 }
552 
avs_asrc_create(struct avs_dev * adev,struct avs_path_module * mod)553 static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
554 {
555 	struct avs_tplg_module *t = mod->template;
556 	struct avs_asrc_cfg cfg;
557 
558 	memset(&cfg, 0, sizeof(cfg));
559 	cfg.base.cpc = t->cfg_base->cpc;
560 	cfg.base.ibs = t->cfg_base->ibs;
561 	cfg.base.obs = t->cfg_base->obs;
562 	cfg.base.is_pages = t->cfg_base->is_pages;
563 	cfg.base.audio_fmt = *t->in_fmt;
564 	cfg.out_freq = t->cfg_ext->asrc.out_freq;
565 	cfg.mode = t->cfg_ext->asrc.mode;
566 	cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer;
567 
568 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
569 				   t->core_id, t->domain, &cfg, sizeof(cfg),
570 				   &mod->instance_id);
571 }
572 
avs_aec_create(struct avs_dev * adev,struct avs_path_module * mod)573 static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod)
574 {
575 	struct avs_tplg_module *t = mod->template;
576 	struct avs_aec_cfg cfg;
577 
578 	cfg.base.cpc = t->cfg_base->cpc;
579 	cfg.base.ibs = t->cfg_base->ibs;
580 	cfg.base.obs = t->cfg_base->obs;
581 	cfg.base.is_pages = t->cfg_base->is_pages;
582 	cfg.base.audio_fmt = *t->in_fmt;
583 	cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt;
584 	cfg.out_fmt = *t->cfg_ext->aec.out_fmt;
585 	cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode;
586 
587 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
588 				   t->core_id, t->domain, &cfg, sizeof(cfg),
589 				   &mod->instance_id);
590 }
591 
avs_mux_create(struct avs_dev * adev,struct avs_path_module * mod)592 static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod)
593 {
594 	struct avs_tplg_module *t = mod->template;
595 	struct avs_mux_cfg cfg;
596 
597 	cfg.base.cpc = t->cfg_base->cpc;
598 	cfg.base.ibs = t->cfg_base->ibs;
599 	cfg.base.obs = t->cfg_base->obs;
600 	cfg.base.is_pages = t->cfg_base->is_pages;
601 	cfg.base.audio_fmt = *t->in_fmt;
602 	cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt;
603 	cfg.out_fmt = *t->cfg_ext->mux.out_fmt;
604 
605 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
606 				   t->core_id, t->domain, &cfg, sizeof(cfg),
607 				   &mod->instance_id);
608 }
609 
avs_wov_create(struct avs_dev * adev,struct avs_path_module * mod)610 static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod)
611 {
612 	struct avs_tplg_module *t = mod->template;
613 	struct avs_wov_cfg cfg;
614 
615 	cfg.base.cpc = t->cfg_base->cpc;
616 	cfg.base.ibs = t->cfg_base->ibs;
617 	cfg.base.obs = t->cfg_base->obs;
618 	cfg.base.is_pages = t->cfg_base->is_pages;
619 	cfg.base.audio_fmt = *t->in_fmt;
620 	cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode;
621 
622 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
623 				   t->core_id, t->domain, &cfg, sizeof(cfg),
624 				   &mod->instance_id);
625 }
626 
avs_micsel_create(struct avs_dev * adev,struct avs_path_module * mod)627 static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod)
628 {
629 	struct avs_tplg_module *t = mod->template;
630 	struct avs_micsel_cfg cfg;
631 
632 	cfg.base.cpc = t->cfg_base->cpc;
633 	cfg.base.ibs = t->cfg_base->ibs;
634 	cfg.base.obs = t->cfg_base->obs;
635 	cfg.base.is_pages = t->cfg_base->is_pages;
636 	cfg.base.audio_fmt = *t->in_fmt;
637 	cfg.out_fmt = *t->cfg_ext->micsel.out_fmt;
638 
639 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
640 				   t->core_id, t->domain, &cfg, sizeof(cfg),
641 				   &mod->instance_id);
642 }
643 
avs_modbase_create(struct avs_dev * adev,struct avs_path_module * mod)644 static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod)
645 {
646 	struct avs_tplg_module *t = mod->template;
647 	struct avs_modcfg_base cfg;
648 
649 	cfg.cpc = t->cfg_base->cpc;
650 	cfg.ibs = t->cfg_base->ibs;
651 	cfg.obs = t->cfg_base->obs;
652 	cfg.is_pages = t->cfg_base->is_pages;
653 	cfg.audio_fmt = *t->in_fmt;
654 
655 	return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
656 				   t->core_id, t->domain, &cfg, sizeof(cfg),
657 				   &mod->instance_id);
658 }
659 
avs_modext_create(struct avs_dev * adev,struct avs_path_module * mod)660 static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod)
661 {
662 	struct avs_tplg_module *t = mod->template;
663 	struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext;
664 	struct avs_modcfg_ext *cfg;
665 	size_t cfg_size, num_pins;
666 	int ret, i;
667 
668 	num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins;
669 	cfg_size = struct_size(cfg, pin_fmts, num_pins);
670 
671 	if (cfg_size > AVS_MAILBOX_SIZE)
672 		return -EINVAL;
673 
674 	cfg = adev->modcfg_buf;
675 	memset(cfg, 0, cfg_size);
676 	cfg->base.cpc = t->cfg_base->cpc;
677 	cfg->base.ibs = t->cfg_base->ibs;
678 	cfg->base.obs = t->cfg_base->obs;
679 	cfg->base.is_pages = t->cfg_base->is_pages;
680 	cfg->base.audio_fmt = *t->in_fmt;
681 	cfg->num_input_pins = tcfg->generic.num_input_pins;
682 	cfg->num_output_pins = tcfg->generic.num_output_pins;
683 
684 	/* configure pin formats */
685 	for (i = 0; i < num_pins; i++) {
686 		struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i];
687 		struct avs_pin_format *pin = &cfg->pin_fmts[i];
688 
689 		pin->pin_index = tpin->pin_index;
690 		pin->iobs = tpin->iobs;
691 		pin->audio_fmt = *tpin->fmt;
692 	}
693 
694 	ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
695 				  t->core_id, t->domain, cfg, cfg_size,
696 				  &mod->instance_id);
697 	return ret;
698 }
699 
avs_probe_create(struct avs_dev * adev,struct avs_path_module * mod)700 static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod)
701 {
702 	dev_err(adev->dev, "Probe module can't be instantiated by topology");
703 	return -EINVAL;
704 }
705 
706 struct avs_module_create {
707 	guid_t *guid;
708 	int (*create)(struct avs_dev *adev, struct avs_path_module *mod);
709 };
710 
711 static struct avs_module_create avs_module_create[] = {
712 	{ &AVS_MIXIN_MOD_UUID, avs_modbase_create },
713 	{ &AVS_MIXOUT_MOD_UUID, avs_modbase_create },
714 	{ &AVS_KPBUFF_MOD_UUID, avs_modbase_create },
715 	{ &AVS_COPIER_MOD_UUID, avs_copier_create },
716 	{ &AVS_PEAKVOL_MOD_UUID, avs_peakvol_create },
717 	{ &AVS_GAIN_MOD_UUID, avs_peakvol_create },
718 	{ &AVS_MICSEL_MOD_UUID, avs_micsel_create },
719 	{ &AVS_MUX_MOD_UUID, avs_mux_create },
720 	{ &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create },
721 	{ &AVS_SRCINTC_MOD_UUID, avs_src_create },
722 	{ &AVS_AEC_MOD_UUID, avs_aec_create },
723 	{ &AVS_ASRC_MOD_UUID, avs_asrc_create },
724 	{ &AVS_INTELWOV_MOD_UUID, avs_wov_create },
725 	{ &AVS_PROBE_MOD_UUID, avs_probe_create },
726 	{ &AVS_WOVHOSTM_MOD_UUID, avs_whm_create },
727 };
728 
avs_path_module_type_create(struct avs_dev * adev,struct avs_path_module * mod)729 static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod)
730 {
731 	const guid_t *type = &mod->template->cfg_ext->type;
732 
733 	for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++)
734 		if (guid_equal(type, avs_module_create[i].guid))
735 			return avs_module_create[i].create(adev, mod);
736 
737 	return avs_modext_create(adev, mod);
738 }
739 
avs_path_module_send_init_configs(struct avs_dev * adev,struct avs_path_module * mod)740 static int avs_path_module_send_init_configs(struct avs_dev *adev, struct avs_path_module *mod)
741 {
742 	struct avs_soc_component *acomp;
743 
744 	acomp = to_avs_soc_component(mod->template->owner->owner->owner->owner->comp);
745 
746 	u32 num_ids = mod->template->num_config_ids;
747 	u32 *ids = mod->template->config_ids;
748 
749 	for (int i = 0; i < num_ids; i++) {
750 		struct avs_tplg_init_config *config = &acomp->tplg->init_configs[ids[i]];
751 		size_t len = config->length;
752 		void *data = config->data;
753 		u32 param = config->param;
754 		int ret;
755 
756 		ret = avs_ipc_set_large_config(adev, mod->module_id, mod->instance_id,
757 					       param, data, len);
758 		if (ret) {
759 			dev_err(adev->dev, "send initial module config failed: %d\n", ret);
760 			return AVS_IPC_RET(ret);
761 		}
762 	}
763 
764 	return 0;
765 }
766 
avs_path_module_free(struct avs_dev * adev,struct avs_path_module * mod)767 static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod)
768 {
769 	kfree(mod);
770 }
771 
772 static struct avs_path_module *
avs_path_module_create(struct avs_dev * adev,struct avs_path_pipeline * owner,struct avs_tplg_module * template)773 avs_path_module_create(struct avs_dev *adev,
774 		       struct avs_path_pipeline *owner,
775 		       struct avs_tplg_module *template)
776 {
777 	struct avs_path_module *mod;
778 	int module_id, ret;
779 
780 	module_id = avs_get_module_id(adev, &template->cfg_ext->type);
781 	if (module_id < 0)
782 		return ERR_PTR(module_id);
783 
784 	mod = kzalloc(sizeof(*mod), GFP_KERNEL);
785 	if (!mod)
786 		return ERR_PTR(-ENOMEM);
787 
788 	mod->template = template;
789 	mod->module_id = module_id;
790 	mod->owner = owner;
791 	INIT_LIST_HEAD(&mod->node);
792 
793 	ret = avs_path_module_type_create(adev, mod);
794 	if (ret) {
795 		dev_err(adev->dev, "module-type create failed: %d\n", ret);
796 		kfree(mod);
797 		return ERR_PTR(ret);
798 	}
799 
800 	ret = avs_path_module_send_init_configs(adev, mod);
801 	if (ret) {
802 		kfree(mod);
803 		return ERR_PTR(ret);
804 	}
805 
806 	return mod;
807 }
808 
avs_path_binding_arm(struct avs_dev * adev,struct avs_path_binding * binding)809 static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding)
810 {
811 	struct avs_path_module *this_mod, *target_mod;
812 	struct avs_path_pipeline *target_ppl;
813 	struct avs_path *target_path;
814 	struct avs_tplg_binding *t;
815 
816 	t = binding->template;
817 	this_mod = avs_path_find_module(binding->owner,
818 					t->mod_id);
819 	if (!this_mod) {
820 		dev_err(adev->dev, "path mod %d not found\n", t->mod_id);
821 		return -EINVAL;
822 	}
823 
824 	/* update with target_tplg_name too */
825 	target_path = avs_path_find_path(adev, t->target_tplg_name,
826 					 t->target_path_tmpl_id);
827 	if (!target_path) {
828 		dev_err(adev->dev, "target path %s:%d not found\n",
829 			t->target_tplg_name, t->target_path_tmpl_id);
830 		return -EINVAL;
831 	}
832 
833 	target_ppl = avs_path_find_pipeline(target_path,
834 					    t->target_ppl_id);
835 	if (!target_ppl) {
836 		dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id);
837 		return -EINVAL;
838 	}
839 
840 	target_mod = avs_path_find_module(target_ppl, t->target_mod_id);
841 	if (!target_mod) {
842 		dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id);
843 		return -EINVAL;
844 	}
845 
846 	if (t->is_sink) {
847 		binding->sink = this_mod;
848 		binding->sink_pin = t->mod_pin;
849 		binding->source = target_mod;
850 		binding->source_pin = t->target_mod_pin;
851 	} else {
852 		binding->sink = target_mod;
853 		binding->sink_pin = t->target_mod_pin;
854 		binding->source = this_mod;
855 		binding->source_pin = t->mod_pin;
856 	}
857 
858 	return 0;
859 }
860 
avs_path_binding_free(struct avs_dev * adev,struct avs_path_binding * binding)861 static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding)
862 {
863 	kfree(binding);
864 }
865 
avs_path_binding_create(struct avs_dev * adev,struct avs_path_pipeline * owner,struct avs_tplg_binding * t)866 static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev,
867 							struct avs_path_pipeline *owner,
868 							struct avs_tplg_binding *t)
869 {
870 	struct avs_path_binding *binding;
871 
872 	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
873 	if (!binding)
874 		return ERR_PTR(-ENOMEM);
875 
876 	binding->template = t;
877 	binding->owner = owner;
878 	INIT_LIST_HEAD(&binding->node);
879 
880 	return binding;
881 }
882 
avs_path_pipeline_arm(struct avs_dev * adev,struct avs_path_pipeline * ppl)883 static int avs_path_pipeline_arm(struct avs_dev *adev,
884 				 struct avs_path_pipeline *ppl)
885 {
886 	struct avs_path_module *mod;
887 
888 	list_for_each_entry(mod, &ppl->mod_list, node) {
889 		struct avs_path_module *source, *sink;
890 		int ret;
891 
892 		/*
893 		 * Only one module (so it's implicitly last) or it is the last
894 		 * one, either way we don't have next module to bind it to.
895 		 */
896 		if (mod == list_last_entry(&ppl->mod_list,
897 					   struct avs_path_module, node))
898 			break;
899 
900 		/* bind current module to next module on list */
901 		source = mod;
902 		sink = list_next_entry(mod, node);
903 
904 		ret = avs_ipc_bind(adev, source->module_id, source->instance_id,
905 				   sink->module_id, sink->instance_id, 0, 0);
906 		if (ret)
907 			return AVS_IPC_RET(ret);
908 	}
909 
910 	return 0;
911 }
912 
avs_path_pipeline_free(struct avs_dev * adev,struct avs_path_pipeline * ppl)913 static void avs_path_pipeline_free(struct avs_dev *adev,
914 				   struct avs_path_pipeline *ppl)
915 {
916 	struct avs_path_binding *binding, *bsave;
917 	struct avs_path_module *mod, *save;
918 
919 	list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) {
920 		list_del(&binding->node);
921 		avs_path_binding_free(adev, binding);
922 	}
923 
924 	avs_dsp_delete_pipeline(adev, ppl->instance_id);
925 
926 	/* Unload resources occupied by owned modules */
927 	list_for_each_entry_safe(mod, save, &ppl->mod_list, node) {
928 		avs_dsp_delete_module(adev, mod->module_id, mod->instance_id,
929 				      mod->owner->instance_id,
930 				      mod->template->core_id);
931 		avs_path_module_free(adev, mod);
932 	}
933 
934 	list_del(&ppl->node);
935 	kfree(ppl);
936 }
937 
938 static struct avs_path_pipeline *
avs_path_pipeline_create(struct avs_dev * adev,struct avs_path * owner,struct avs_tplg_pipeline * template)939 avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner,
940 			 struct avs_tplg_pipeline *template)
941 {
942 	struct avs_path_pipeline *ppl;
943 	struct avs_tplg_pplcfg *cfg = template->cfg;
944 	struct avs_tplg_module *tmod;
945 	int ret, i;
946 
947 	ppl = kzalloc(sizeof(*ppl), GFP_KERNEL);
948 	if (!ppl)
949 		return ERR_PTR(-ENOMEM);
950 
951 	ppl->template = template;
952 	ppl->owner = owner;
953 	INIT_LIST_HEAD(&ppl->binding_list);
954 	INIT_LIST_HEAD(&ppl->mod_list);
955 	INIT_LIST_HEAD(&ppl->node);
956 
957 	ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority,
958 				      cfg->lp, cfg->attributes,
959 				      &ppl->instance_id);
960 	if (ret) {
961 		dev_err(adev->dev, "error creating pipeline %d\n", ret);
962 		kfree(ppl);
963 		return ERR_PTR(ret);
964 	}
965 
966 	list_for_each_entry(tmod, &template->mod_list, node) {
967 		struct avs_path_module *mod;
968 
969 		mod = avs_path_module_create(adev, ppl, tmod);
970 		if (IS_ERR(mod)) {
971 			ret = PTR_ERR(mod);
972 			dev_err(adev->dev, "error creating module %d\n", ret);
973 			goto init_err;
974 		}
975 
976 		list_add_tail(&mod->node, &ppl->mod_list);
977 	}
978 
979 	for (i = 0; i < template->num_bindings; i++) {
980 		struct avs_path_binding *binding;
981 
982 		binding = avs_path_binding_create(adev, ppl, template->bindings[i]);
983 		if (IS_ERR(binding)) {
984 			ret = PTR_ERR(binding);
985 			dev_err(adev->dev, "error creating binding %d\n", ret);
986 			goto init_err;
987 		}
988 
989 		list_add_tail(&binding->node, &ppl->binding_list);
990 	}
991 
992 	return ppl;
993 
994 init_err:
995 	avs_path_pipeline_free(adev, ppl);
996 	return ERR_PTR(ret);
997 }
998 
avs_path_init(struct avs_dev * adev,struct avs_path * path,struct avs_tplg_path * template,u32 dma_id)999 static int avs_path_init(struct avs_dev *adev, struct avs_path *path,
1000 			 struct avs_tplg_path *template, u32 dma_id)
1001 {
1002 	struct avs_tplg_pipeline *tppl;
1003 
1004 	path->owner = adev;
1005 	path->template = template;
1006 	path->dma_id = dma_id;
1007 	INIT_LIST_HEAD(&path->ppl_list);
1008 	INIT_LIST_HEAD(&path->node);
1009 
1010 	/* create all the pipelines */
1011 	list_for_each_entry(tppl, &template->ppl_list, node) {
1012 		struct avs_path_pipeline *ppl;
1013 
1014 		ppl = avs_path_pipeline_create(adev, path, tppl);
1015 		if (IS_ERR(ppl))
1016 			return PTR_ERR(ppl);
1017 
1018 		list_add_tail(&ppl->node, &path->ppl_list);
1019 	}
1020 
1021 	spin_lock(&adev->path_list_lock);
1022 	list_add_tail(&path->node, &adev->path_list);
1023 	spin_unlock(&adev->path_list_lock);
1024 
1025 	return 0;
1026 }
1027 
avs_path_arm(struct avs_dev * adev,struct avs_path * path)1028 static int avs_path_arm(struct avs_dev *adev, struct avs_path *path)
1029 {
1030 	struct avs_path_pipeline *ppl;
1031 	struct avs_path_binding *binding;
1032 	int ret;
1033 
1034 	list_for_each_entry(ppl, &path->ppl_list, node) {
1035 		/*
1036 		 * Arm all ppl bindings before binding internal modules
1037 		 * as it costs no IPCs which isn't true for the latter.
1038 		 */
1039 		list_for_each_entry(binding, &ppl->binding_list, node) {
1040 			ret = avs_path_binding_arm(adev, binding);
1041 			if (ret < 0)
1042 				return ret;
1043 		}
1044 
1045 		ret = avs_path_pipeline_arm(adev, ppl);
1046 		if (ret < 0)
1047 			return ret;
1048 	}
1049 
1050 	return 0;
1051 }
1052 
avs_path_free_unlocked(struct avs_path * path)1053 static void avs_path_free_unlocked(struct avs_path *path)
1054 {
1055 	struct avs_path_pipeline *ppl, *save;
1056 
1057 	spin_lock(&path->owner->path_list_lock);
1058 	list_del(&path->node);
1059 	spin_unlock(&path->owner->path_list_lock);
1060 
1061 	list_for_each_entry_safe(ppl, save, &path->ppl_list, node)
1062 		avs_path_pipeline_free(path->owner, ppl);
1063 
1064 	kfree(path);
1065 }
1066 
avs_path_create_unlocked(struct avs_dev * adev,u32 dma_id,struct avs_tplg_path * template)1067 static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id,
1068 						 struct avs_tplg_path *template)
1069 {
1070 	struct avs_path *path;
1071 	int ret;
1072 
1073 	path = kzalloc(sizeof(*path), GFP_KERNEL);
1074 	if (!path)
1075 		return ERR_PTR(-ENOMEM);
1076 
1077 	ret = avs_path_init(adev, path, template, dma_id);
1078 	if (ret < 0)
1079 		goto err;
1080 
1081 	ret = avs_path_arm(adev, path);
1082 	if (ret < 0)
1083 		goto err;
1084 
1085 	path->state = AVS_PPL_STATE_INVALID;
1086 	return path;
1087 err:
1088 	avs_path_free_unlocked(path);
1089 	return ERR_PTR(ret);
1090 }
1091 
avs_path_free(struct avs_path * path)1092 void avs_path_free(struct avs_path *path)
1093 {
1094 	struct avs_dev *adev = path->owner;
1095 
1096 	mutex_lock(&adev->path_mutex);
1097 	avs_path_free_unlocked(path);
1098 	mutex_unlock(&adev->path_mutex);
1099 }
1100 
avs_path_create(struct avs_dev * adev,u32 dma_id,struct avs_tplg_path_template * template,struct snd_pcm_hw_params * fe_params,struct snd_pcm_hw_params * be_params)1101 struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id,
1102 				 struct avs_tplg_path_template *template,
1103 				 struct snd_pcm_hw_params *fe_params,
1104 				 struct snd_pcm_hw_params *be_params)
1105 {
1106 	struct avs_tplg_path *variant;
1107 	struct avs_path *path;
1108 
1109 	variant = avs_path_find_variant(adev, template, fe_params, be_params);
1110 	if (!variant) {
1111 		dev_err(adev->dev, "no matching variant found\n");
1112 		return ERR_PTR(-ENOENT);
1113 	}
1114 
1115 	/* Serialize path and its components creation. */
1116 	mutex_lock(&adev->path_mutex);
1117 	/* Satisfy needs of avs_path_find_tplg(). */
1118 	mutex_lock(&adev->comp_list_mutex);
1119 
1120 	path = avs_path_create_unlocked(adev, dma_id, variant);
1121 
1122 	mutex_unlock(&adev->comp_list_mutex);
1123 	mutex_unlock(&adev->path_mutex);
1124 
1125 	return path;
1126 }
1127 
avs_path_bind_prepare(struct avs_dev * adev,struct avs_path_binding * binding)1128 static int avs_path_bind_prepare(struct avs_dev *adev,
1129 				 struct avs_path_binding *binding)
1130 {
1131 	const struct avs_audio_format *src_fmt, *sink_fmt;
1132 	struct avs_tplg_module *tsource = binding->source->template;
1133 	struct avs_path_module *source = binding->source;
1134 	int ret;
1135 
1136 	/*
1137 	 * only copier modules about to be bound
1138 	 * to output pin other than 0 need preparation
1139 	 */
1140 	if (!binding->source_pin)
1141 		return 0;
1142 	if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID))
1143 		return 0;
1144 
1145 	src_fmt = tsource->in_fmt;
1146 	sink_fmt = binding->sink->template->in_fmt;
1147 
1148 	ret = avs_ipc_copier_set_sink_format(adev, source->module_id,
1149 					     source->instance_id, binding->source_pin,
1150 					     src_fmt, sink_fmt);
1151 	if (ret) {
1152 		dev_err(adev->dev, "config copier failed: %d\n", ret);
1153 		return AVS_IPC_RET(ret);
1154 	}
1155 
1156 	return 0;
1157 }
1158 
avs_path_bind(struct avs_path * path)1159 int avs_path_bind(struct avs_path *path)
1160 {
1161 	struct avs_path_pipeline *ppl;
1162 	struct avs_dev *adev = path->owner;
1163 	int ret;
1164 
1165 	list_for_each_entry(ppl, &path->ppl_list, node) {
1166 		struct avs_path_binding *binding;
1167 
1168 		list_for_each_entry(binding, &ppl->binding_list, node) {
1169 			struct avs_path_module *source, *sink;
1170 
1171 			source = binding->source;
1172 			sink = binding->sink;
1173 
1174 			ret = avs_path_bind_prepare(adev, binding);
1175 			if (ret < 0)
1176 				return ret;
1177 
1178 			ret = avs_ipc_bind(adev, source->module_id,
1179 					   source->instance_id, sink->module_id,
1180 					   sink->instance_id, binding->sink_pin,
1181 					   binding->source_pin);
1182 			if (ret) {
1183 				dev_err(adev->dev, "bind path failed: %d\n", ret);
1184 				return AVS_IPC_RET(ret);
1185 			}
1186 		}
1187 	}
1188 
1189 	return 0;
1190 }
1191 
avs_path_unbind(struct avs_path * path)1192 int avs_path_unbind(struct avs_path *path)
1193 {
1194 	struct avs_path_pipeline *ppl;
1195 	struct avs_dev *adev = path->owner;
1196 	int ret;
1197 
1198 	list_for_each_entry(ppl, &path->ppl_list, node) {
1199 		struct avs_path_binding *binding;
1200 
1201 		list_for_each_entry(binding, &ppl->binding_list, node) {
1202 			struct avs_path_module *source, *sink;
1203 
1204 			source = binding->source;
1205 			sink = binding->sink;
1206 
1207 			ret = avs_ipc_unbind(adev, source->module_id,
1208 					     source->instance_id, sink->module_id,
1209 					     sink->instance_id, binding->sink_pin,
1210 					     binding->source_pin);
1211 			if (ret) {
1212 				dev_err(adev->dev, "unbind path failed: %d\n", ret);
1213 				return AVS_IPC_RET(ret);
1214 			}
1215 		}
1216 	}
1217 
1218 	return 0;
1219 }
1220 
avs_path_reset(struct avs_path * path)1221 int avs_path_reset(struct avs_path *path)
1222 {
1223 	struct avs_path_pipeline *ppl;
1224 	struct avs_dev *adev = path->owner;
1225 	int ret;
1226 
1227 	if (path->state == AVS_PPL_STATE_RESET)
1228 		return 0;
1229 
1230 	list_for_each_entry(ppl, &path->ppl_list, node) {
1231 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1232 						 AVS_PPL_STATE_RESET);
1233 		if (ret) {
1234 			dev_err(adev->dev, "reset path failed: %d\n", ret);
1235 			path->state = AVS_PPL_STATE_INVALID;
1236 			return AVS_IPC_RET(ret);
1237 		}
1238 	}
1239 
1240 	path->state = AVS_PPL_STATE_RESET;
1241 	return 0;
1242 }
1243 
avs_path_pause(struct avs_path * path)1244 int avs_path_pause(struct avs_path *path)
1245 {
1246 	struct avs_path_pipeline *ppl;
1247 	struct avs_dev *adev = path->owner;
1248 	int ret;
1249 
1250 	if (path->state == AVS_PPL_STATE_PAUSED)
1251 		return 0;
1252 
1253 	list_for_each_entry_reverse(ppl, &path->ppl_list, node) {
1254 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1255 						 AVS_PPL_STATE_PAUSED);
1256 		if (ret) {
1257 			dev_err(adev->dev, "pause path failed: %d\n", ret);
1258 			path->state = AVS_PPL_STATE_INVALID;
1259 			return AVS_IPC_RET(ret);
1260 		}
1261 	}
1262 
1263 	path->state = AVS_PPL_STATE_PAUSED;
1264 	return 0;
1265 }
1266 
avs_path_run(struct avs_path * path,int trigger)1267 int avs_path_run(struct avs_path *path, int trigger)
1268 {
1269 	struct avs_path_pipeline *ppl;
1270 	struct avs_dev *adev = path->owner;
1271 	int ret;
1272 
1273 	if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO)
1274 		return 0;
1275 
1276 	list_for_each_entry(ppl, &path->ppl_list, node) {
1277 		if (ppl->template->cfg->trigger != trigger)
1278 			continue;
1279 
1280 		ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1281 						 AVS_PPL_STATE_RUNNING);
1282 		if (ret) {
1283 			dev_err(adev->dev, "run path failed: %d\n", ret);
1284 			path->state = AVS_PPL_STATE_INVALID;
1285 			return AVS_IPC_RET(ret);
1286 		}
1287 	}
1288 
1289 	path->state = AVS_PPL_STATE_RUNNING;
1290 	return 0;
1291 }
1292