1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 // Copyright(c) 2023 Intel Corporation
3
4 /*
5 * Soundwire Intel ops for LunarLake
6 */
7
8 #include <linux/acpi.h>
9 #include <linux/cleanup.h>
10 #include <linux/device.h>
11 #include <linux/soundwire/sdw_registers.h>
12 #include <linux/soundwire/sdw.h>
13 #include <linux/soundwire/sdw_intel.h>
14 #include <sound/hdaudio.h>
15 #include <sound/hda-mlink.h>
16 #include <sound/hda-sdw-bpt.h>
17 #include <sound/hda_register.h>
18 #include <sound/pcm_params.h>
19 #include "cadence_master.h"
20 #include "bus.h"
21 #include "intel.h"
22
sdw_slave_bpt_stream_add(struct sdw_slave * slave,struct sdw_stream_runtime * stream)23 static int sdw_slave_bpt_stream_add(struct sdw_slave *slave, struct sdw_stream_runtime *stream)
24 {
25 struct sdw_stream_config sconfig = {0};
26 struct sdw_port_config pconfig = {0};
27 int ret;
28
29 /* arbitrary configuration */
30 sconfig.frame_rate = 16000;
31 sconfig.ch_count = 1;
32 sconfig.bps = 32; /* this is required for BPT/BRA */
33 sconfig.direction = SDW_DATA_DIR_RX;
34 sconfig.type = SDW_STREAM_BPT;
35
36 pconfig.num = 0;
37 pconfig.ch_mask = BIT(0);
38
39 ret = sdw_stream_add_slave(slave, &sconfig, &pconfig, 1, stream);
40 if (ret)
41 dev_err(&slave->dev, "%s: failed: %d\n", __func__, ret);
42
43 return ret;
44 }
45
intel_ace2x_bpt_open_stream(struct sdw_intel * sdw,struct sdw_slave * slave,struct sdw_bpt_msg * msg)46 static int intel_ace2x_bpt_open_stream(struct sdw_intel *sdw, struct sdw_slave *slave,
47 struct sdw_bpt_msg *msg)
48 {
49 struct sdw_cdns *cdns = &sdw->cdns;
50 struct sdw_bus *bus = &cdns->bus;
51 struct sdw_master_prop *prop = &bus->prop;
52 struct sdw_stream_runtime *stream;
53 struct sdw_stream_config sconfig;
54 struct sdw_port_config *pconfig;
55 unsigned int pdi0_buffer_size;
56 unsigned int tx_dma_bandwidth;
57 unsigned int pdi1_buffer_size;
58 unsigned int rx_dma_bandwidth;
59 unsigned int data_per_frame;
60 unsigned int tx_total_bytes;
61 struct sdw_cdns_pdi *pdi0;
62 struct sdw_cdns_pdi *pdi1;
63 unsigned int num_frames;
64 int command;
65 int ret1;
66 int ret;
67 int dir;
68 int i;
69
70 stream = sdw_alloc_stream("BPT", SDW_STREAM_BPT);
71 if (!stream)
72 return -ENOMEM;
73
74 cdns->bus.bpt_stream = stream;
75
76 ret = sdw_slave_bpt_stream_add(slave, stream);
77 if (ret < 0)
78 goto release_stream;
79
80 /* handle PDI0 first */
81 dir = SDW_DATA_DIR_TX;
82
83 pdi0 = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, 1, dir, 0);
84 if (!pdi0) {
85 dev_err(cdns->dev, "%s: sdw_cdns_alloc_pdi0 failed\n", __func__);
86 ret = -EINVAL;
87 goto remove_slave;
88 }
89
90 sdw_cdns_config_stream(cdns, 1, dir, pdi0);
91
92 /* handle PDI1 */
93 dir = SDW_DATA_DIR_RX;
94
95 pdi1 = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, 1, dir, 1);
96 if (!pdi1) {
97 dev_err(cdns->dev, "%s: sdw_cdns_alloc_pdi1 failed\n", __func__);
98 ret = -EINVAL;
99 goto remove_slave;
100 }
101
102 sdw_cdns_config_stream(cdns, 1, dir, pdi1);
103
104 /*
105 * the port config direction, number of channels and frame
106 * rate is totally arbitrary
107 */
108 sconfig.direction = dir;
109 sconfig.ch_count = 1;
110 sconfig.frame_rate = 16000;
111 sconfig.type = SDW_STREAM_BPT;
112 sconfig.bps = 32; /* this is required for BPT/BRA */
113
114 /* Port configuration */
115 pconfig = kcalloc(2, sizeof(*pconfig), GFP_KERNEL);
116 if (!pconfig) {
117 ret = -ENOMEM;
118 goto remove_slave;
119 }
120
121 for (i = 0; i < 2 /* num_pdi */; i++) {
122 pconfig[i].num = i;
123 pconfig[i].ch_mask = 1;
124 }
125
126 ret = sdw_stream_add_master(&cdns->bus, &sconfig, pconfig, 2, stream);
127 kfree(pconfig);
128
129 if (ret < 0) {
130 dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
131 goto remove_slave;
132 }
133
134 ret = sdw_prepare_stream(cdns->bus.bpt_stream);
135 if (ret < 0)
136 goto remove_master;
137
138 command = (msg->flags & SDW_MSG_FLAG_WRITE) ? 0 : 1;
139
140 ret = sdw_cdns_bpt_find_buffer_sizes(command, cdns->bus.params.row, cdns->bus.params.col,
141 msg->len, SDW_BPT_MSG_MAX_BYTES, &data_per_frame,
142 &pdi0_buffer_size, &pdi1_buffer_size, &num_frames);
143 if (ret < 0)
144 goto deprepare_stream;
145
146 sdw->bpt_ctx.pdi0_buffer_size = pdi0_buffer_size;
147 sdw->bpt_ctx.pdi1_buffer_size = pdi1_buffer_size;
148 sdw->bpt_ctx.num_frames = num_frames;
149 sdw->bpt_ctx.data_per_frame = data_per_frame;
150 tx_dma_bandwidth = div_u64((u64)pdi0_buffer_size * 8 * (u64)prop->default_frame_rate,
151 num_frames);
152 rx_dma_bandwidth = div_u64((u64)pdi1_buffer_size * 8 * (u64)prop->default_frame_rate,
153 num_frames);
154
155 dev_dbg(cdns->dev, "Message len %d transferred in %d frames (%d per frame)\n",
156 msg->len, num_frames, data_per_frame);
157 dev_dbg(cdns->dev, "sizes pdi0 %d pdi1 %d tx_bandwidth %d rx_bandwidth %d\n",
158 pdi0_buffer_size, pdi1_buffer_size, tx_dma_bandwidth, rx_dma_bandwidth);
159
160 ret = hda_sdw_bpt_open(cdns->dev->parent, /* PCI device */
161 sdw->instance, &sdw->bpt_ctx.bpt_tx_stream,
162 &sdw->bpt_ctx.dmab_tx_bdl, pdi0_buffer_size, tx_dma_bandwidth,
163 &sdw->bpt_ctx.bpt_rx_stream, &sdw->bpt_ctx.dmab_rx_bdl,
164 pdi1_buffer_size, rx_dma_bandwidth);
165 if (ret < 0) {
166 dev_err(cdns->dev, "%s: hda_sdw_bpt_open failed %d\n", __func__, ret);
167 goto deprepare_stream;
168 }
169
170 if (!command) {
171 ret = sdw_cdns_prepare_write_dma_buffer(msg->dev_num, msg->addr, msg->buf,
172 msg->len, data_per_frame,
173 sdw->bpt_ctx.dmab_tx_bdl.area,
174 pdi0_buffer_size, &tx_total_bytes);
175 } else {
176 ret = sdw_cdns_prepare_read_dma_buffer(msg->dev_num, msg->addr, msg->len,
177 data_per_frame,
178 sdw->bpt_ctx.dmab_tx_bdl.area,
179 pdi0_buffer_size, &tx_total_bytes);
180 }
181
182 if (!ret)
183 return 0;
184
185 dev_err(cdns->dev, "%s: sdw_prepare_%s_dma_buffer failed %d\n",
186 __func__, command ? "read" : "write", ret);
187
188 ret1 = hda_sdw_bpt_close(cdns->dev->parent, /* PCI device */
189 sdw->bpt_ctx.bpt_tx_stream, &sdw->bpt_ctx.dmab_tx_bdl,
190 sdw->bpt_ctx.bpt_rx_stream, &sdw->bpt_ctx.dmab_rx_bdl);
191 if (ret1 < 0)
192 dev_err(cdns->dev, "%s: hda_sdw_bpt_close failed: ret %d\n",
193 __func__, ret1);
194
195 deprepare_stream:
196 sdw_deprepare_stream(cdns->bus.bpt_stream);
197
198 remove_master:
199 ret1 = sdw_stream_remove_master(&cdns->bus, cdns->bus.bpt_stream);
200 if (ret1 < 0)
201 dev_err(cdns->dev, "%s: remove master failed: %d\n",
202 __func__, ret1);
203
204 remove_slave:
205 ret1 = sdw_stream_remove_slave(slave, cdns->bus.bpt_stream);
206 if (ret1 < 0)
207 dev_err(cdns->dev, "%s: remove slave failed: %d\n",
208 __func__, ret1);
209
210 release_stream:
211 sdw_release_stream(cdns->bus.bpt_stream);
212 cdns->bus.bpt_stream = NULL;
213
214 return ret;
215 }
216
intel_ace2x_bpt_close_stream(struct sdw_intel * sdw,struct sdw_slave * slave,struct sdw_bpt_msg * msg)217 static void intel_ace2x_bpt_close_stream(struct sdw_intel *sdw, struct sdw_slave *slave,
218 struct sdw_bpt_msg *msg)
219 {
220 struct sdw_cdns *cdns = &sdw->cdns;
221 int ret;
222
223 ret = hda_sdw_bpt_close(cdns->dev->parent /* PCI device */, sdw->bpt_ctx.bpt_tx_stream,
224 &sdw->bpt_ctx.dmab_tx_bdl, sdw->bpt_ctx.bpt_rx_stream,
225 &sdw->bpt_ctx.dmab_rx_bdl);
226 if (ret < 0)
227 dev_err(cdns->dev, "%s: hda_sdw_bpt_close failed: ret %d\n",
228 __func__, ret);
229
230 ret = sdw_deprepare_stream(cdns->bus.bpt_stream);
231 if (ret < 0)
232 dev_err(cdns->dev, "%s: sdw_deprepare_stream failed: ret %d\n",
233 __func__, ret);
234
235 ret = sdw_stream_remove_master(&cdns->bus, cdns->bus.bpt_stream);
236 if (ret < 0)
237 dev_err(cdns->dev, "%s: remove master failed: %d\n",
238 __func__, ret);
239
240 ret = sdw_stream_remove_slave(slave, cdns->bus.bpt_stream);
241 if (ret < 0)
242 dev_err(cdns->dev, "%s: remove slave failed: %d\n",
243 __func__, ret);
244
245 cdns->bus.bpt_stream = NULL;
246 }
247
248 #define INTEL_BPT_MSG_BYTE_ALIGNMENT 32
249
intel_ace2x_bpt_send_async(struct sdw_intel * sdw,struct sdw_slave * slave,struct sdw_bpt_msg * msg)250 static int intel_ace2x_bpt_send_async(struct sdw_intel *sdw, struct sdw_slave *slave,
251 struct sdw_bpt_msg *msg)
252 {
253 struct sdw_cdns *cdns = &sdw->cdns;
254 int ret;
255
256 if (msg->len % INTEL_BPT_MSG_BYTE_ALIGNMENT) {
257 dev_err(cdns->dev, "BPT message length %d is not a multiple of %d bytes\n",
258 msg->len, INTEL_BPT_MSG_BYTE_ALIGNMENT);
259 return -EINVAL;
260 }
261
262 dev_dbg(cdns->dev, "BPT Transfer start\n");
263
264 ret = intel_ace2x_bpt_open_stream(sdw, slave, msg);
265 if (ret < 0)
266 return ret;
267
268 ret = hda_sdw_bpt_send_async(cdns->dev->parent, /* PCI device */
269 sdw->bpt_ctx.bpt_tx_stream, sdw->bpt_ctx.bpt_rx_stream);
270 if (ret < 0) {
271 dev_err(cdns->dev, "%s: hda_sdw_bpt_send_async failed: %d\n",
272 __func__, ret);
273
274 intel_ace2x_bpt_close_stream(sdw, slave, msg);
275
276 return ret;
277 }
278
279 ret = sdw_enable_stream(cdns->bus.bpt_stream);
280 if (ret < 0) {
281 dev_err(cdns->dev, "%s: sdw_stream_enable failed: %d\n",
282 __func__, ret);
283 intel_ace2x_bpt_close_stream(sdw, slave, msg);
284 }
285
286 return ret;
287 }
288
intel_ace2x_bpt_wait(struct sdw_intel * sdw,struct sdw_slave * slave,struct sdw_bpt_msg * msg)289 static int intel_ace2x_bpt_wait(struct sdw_intel *sdw, struct sdw_slave *slave,
290 struct sdw_bpt_msg *msg)
291 {
292 struct sdw_cdns *cdns = &sdw->cdns;
293 int ret;
294
295 dev_dbg(cdns->dev, "BPT Transfer wait\n");
296
297 ret = hda_sdw_bpt_wait(cdns->dev->parent, /* PCI device */
298 sdw->bpt_ctx.bpt_tx_stream, sdw->bpt_ctx.bpt_rx_stream);
299 if (ret < 0)
300 dev_err(cdns->dev, "%s: hda_sdw_bpt_wait failed: %d\n", __func__, ret);
301
302 ret = sdw_disable_stream(cdns->bus.bpt_stream);
303 if (ret < 0) {
304 dev_err(cdns->dev, "%s: sdw_stream_enable failed: %d\n",
305 __func__, ret);
306 goto err;
307 }
308
309 if (msg->flags & SDW_MSG_FLAG_WRITE) {
310 ret = sdw_cdns_check_write_response(cdns->dev, sdw->bpt_ctx.dmab_rx_bdl.area,
311 sdw->bpt_ctx.pdi1_buffer_size,
312 sdw->bpt_ctx.num_frames);
313 if (ret < 0)
314 dev_err(cdns->dev, "%s: BPT Write failed %d\n", __func__, ret);
315 } else {
316 ret = sdw_cdns_check_read_response(cdns->dev, sdw->bpt_ctx.dmab_rx_bdl.area,
317 sdw->bpt_ctx.pdi1_buffer_size,
318 msg->buf, msg->len, sdw->bpt_ctx.num_frames,
319 sdw->bpt_ctx.data_per_frame);
320 if (ret < 0)
321 dev_err(cdns->dev, "%s: BPT Read failed %d\n", __func__, ret);
322 }
323
324 err:
325 intel_ace2x_bpt_close_stream(sdw, slave, msg);
326
327 return ret;
328 }
329
330 /*
331 * shim vendor-specific (vs) ops
332 */
333
intel_shim_vs_init(struct sdw_intel * sdw)334 static void intel_shim_vs_init(struct sdw_intel *sdw)
335 {
336 void __iomem *shim_vs = sdw->link_res->shim_vs;
337 struct sdw_bus *bus = &sdw->cdns.bus;
338 struct sdw_intel_prop *intel_prop;
339 u16 clde;
340 u16 doaise2;
341 u16 dodse2;
342 u16 clds;
343 u16 clss;
344 u16 doaise;
345 u16 doais;
346 u16 dodse;
347 u16 dods;
348 u16 act;
349
350 intel_prop = bus->vendor_specific_prop;
351 clde = intel_prop->clde;
352 doaise2 = intel_prop->doaise2;
353 dodse2 = intel_prop->dodse2;
354 clds = intel_prop->clds;
355 clss = intel_prop->clss;
356 doaise = intel_prop->doaise;
357 doais = intel_prop->doais;
358 dodse = intel_prop->dodse;
359 dods = intel_prop->dods;
360
361 act = intel_readw(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL);
362 u16p_replace_bits(&act, clde, SDW_SHIM3_INTEL_VS_ACTMCTL_CLDE);
363 u16p_replace_bits(&act, doaise2, SDW_SHIM3_INTEL_VS_ACTMCTL_DOAISE2);
364 u16p_replace_bits(&act, dodse2, SDW_SHIM3_INTEL_VS_ACTMCTL_DODSE2);
365 u16p_replace_bits(&act, clds, SDW_SHIM3_INTEL_VS_ACTMCTL_CLDS);
366 u16p_replace_bits(&act, clss, SDW_SHIM3_INTEL_VS_ACTMCTL_CLSS);
367 u16p_replace_bits(&act, doaise, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAISE);
368 u16p_replace_bits(&act, doais, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS);
369 u16p_replace_bits(&act, dodse, SDW_SHIM2_INTEL_VS_ACTMCTL_DODSE);
370 u16p_replace_bits(&act, dods, SDW_SHIM2_INTEL_VS_ACTMCTL_DODS);
371 act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE;
372 intel_writew(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL, act);
373 usleep_range(10, 15);
374 }
375
intel_shim_vs_set_clock_source(struct sdw_intel * sdw,u32 source)376 static void intel_shim_vs_set_clock_source(struct sdw_intel *sdw, u32 source)
377 {
378 void __iomem *shim_vs = sdw->link_res->shim_vs;
379 u32 val;
380
381 val = intel_readl(shim_vs, SDW_SHIM2_INTEL_VS_LVSCTL);
382
383 u32p_replace_bits(&val, source, SDW_SHIM2_INTEL_VS_LVSCTL_MLCS);
384
385 intel_writel(shim_vs, SDW_SHIM2_INTEL_VS_LVSCTL, val);
386
387 dev_dbg(sdw->cdns.dev, "clock source %d LVSCTL %#x\n", source, val);
388 }
389
intel_shim_check_wake(struct sdw_intel * sdw)390 static int intel_shim_check_wake(struct sdw_intel *sdw)
391 {
392 /*
393 * We follow the HDaudio example and resume unconditionally
394 * without checking the WAKESTS bit for that specific link
395 */
396
397 return 1;
398 }
399
intel_shim_wake(struct sdw_intel * sdw,bool wake_enable)400 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
401 {
402 u16 lsdiid = 0;
403 u16 wake_en;
404 u16 wake_sts;
405 int ret;
406
407 mutex_lock(sdw->link_res->shim_lock);
408
409 ret = hdac_bus_eml_sdw_get_lsdiid_unlocked(sdw->link_res->hbus, sdw->instance, &lsdiid);
410 if (ret < 0)
411 goto unlock;
412
413 wake_en = snd_hdac_chip_readw(sdw->link_res->hbus, WAKEEN);
414
415 if (wake_enable) {
416 /* Enable the wakeup */
417 wake_en |= lsdiid;
418
419 snd_hdac_chip_writew(sdw->link_res->hbus, WAKEEN, wake_en);
420 } else {
421 /* Disable the wake up interrupt */
422 wake_en &= ~lsdiid;
423 snd_hdac_chip_writew(sdw->link_res->hbus, WAKEEN, wake_en);
424
425 /* Clear wake status (W1C) */
426 wake_sts = snd_hdac_chip_readw(sdw->link_res->hbus, STATESTS);
427 wake_sts |= lsdiid;
428 snd_hdac_chip_writew(sdw->link_res->hbus, STATESTS, wake_sts);
429 }
430 unlock:
431 mutex_unlock(sdw->link_res->shim_lock);
432 }
433
intel_link_power_up(struct sdw_intel * sdw)434 static int intel_link_power_up(struct sdw_intel *sdw)
435 {
436 struct sdw_bus *bus = &sdw->cdns.bus;
437 struct sdw_master_prop *prop = &bus->prop;
438 u32 *shim_mask = sdw->link_res->shim_mask;
439 unsigned int link_id = sdw->instance;
440 u32 clock_source;
441 u32 syncprd;
442 int ret;
443
444 if (prop->mclk_freq % 6000000) {
445 if (prop->mclk_freq % 2400000) {
446 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24_576;
447 clock_source = SDW_SHIM2_MLCS_CARDINAL_CLK;
448 } else {
449 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
450 clock_source = SDW_SHIM2_MLCS_XTAL_CLK;
451 }
452 } else {
453 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_96;
454 clock_source = SDW_SHIM2_MLCS_AUDIO_PLL_CLK;
455 }
456
457 mutex_lock(sdw->link_res->shim_lock);
458
459 ret = hdac_bus_eml_sdw_power_up_unlocked(sdw->link_res->hbus, link_id);
460 if (ret < 0) {
461 dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_power_up failed: %d\n",
462 __func__, ret);
463 goto out;
464 }
465
466 intel_shim_vs_set_clock_source(sdw, clock_source);
467
468 if (!*shim_mask) {
469 /* we first need to program the SyncPRD/CPU registers */
470 dev_dbg(sdw->cdns.dev, "first link up, programming SYNCPRD\n");
471
472 ret = hdac_bus_eml_sdw_set_syncprd_unlocked(sdw->link_res->hbus, syncprd);
473 if (ret < 0) {
474 dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_set_syncprd failed: %d\n",
475 __func__, ret);
476 goto out;
477 }
478
479 /* SYNCPU will change once link is active */
480 ret = hdac_bus_eml_sdw_wait_syncpu_unlocked(sdw->link_res->hbus);
481 if (ret < 0) {
482 dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_wait_syncpu failed: %d\n",
483 __func__, ret);
484 goto out;
485 }
486
487 hdac_bus_eml_enable_interrupt_unlocked(sdw->link_res->hbus, true,
488 AZX_REG_ML_LEPTR_ID_SDW, true);
489 }
490
491 *shim_mask |= BIT(link_id);
492
493 sdw->cdns.link_up = true;
494
495 intel_shim_vs_init(sdw);
496
497 out:
498 mutex_unlock(sdw->link_res->shim_lock);
499
500 return ret;
501 }
502
intel_link_power_down(struct sdw_intel * sdw)503 static int intel_link_power_down(struct sdw_intel *sdw)
504 {
505 u32 *shim_mask = sdw->link_res->shim_mask;
506 unsigned int link_id = sdw->instance;
507 int ret;
508
509 mutex_lock(sdw->link_res->shim_lock);
510
511 sdw->cdns.link_up = false;
512
513 *shim_mask &= ~BIT(link_id);
514
515 if (!*shim_mask)
516 hdac_bus_eml_enable_interrupt_unlocked(sdw->link_res->hbus, true,
517 AZX_REG_ML_LEPTR_ID_SDW, false);
518
519 ret = hdac_bus_eml_sdw_power_down_unlocked(sdw->link_res->hbus, link_id);
520 if (ret < 0) {
521 dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_power_down failed: %d\n",
522 __func__, ret);
523
524 /*
525 * we leave the sdw->cdns.link_up flag as false since we've disabled
526 * the link at this point and cannot handle interrupts any longer.
527 */
528 }
529
530 mutex_unlock(sdw->link_res->shim_lock);
531
532 return ret;
533 }
534
intel_sync_arm(struct sdw_intel * sdw)535 static void intel_sync_arm(struct sdw_intel *sdw)
536 {
537 unsigned int link_id = sdw->instance;
538
539 mutex_lock(sdw->link_res->shim_lock);
540
541 hdac_bus_eml_sdw_sync_arm_unlocked(sdw->link_res->hbus, link_id);
542
543 mutex_unlock(sdw->link_res->shim_lock);
544 }
545
intel_sync_go_unlocked(struct sdw_intel * sdw)546 static int intel_sync_go_unlocked(struct sdw_intel *sdw)
547 {
548 int ret;
549
550 ret = hdac_bus_eml_sdw_sync_go_unlocked(sdw->link_res->hbus);
551 if (ret < 0)
552 dev_err(sdw->cdns.dev, "%s: SyncGO clear failed: %d\n", __func__, ret);
553
554 return ret;
555 }
556
intel_sync_go(struct sdw_intel * sdw)557 static int intel_sync_go(struct sdw_intel *sdw)
558 {
559 int ret;
560
561 mutex_lock(sdw->link_res->shim_lock);
562
563 ret = intel_sync_go_unlocked(sdw);
564
565 mutex_unlock(sdw->link_res->shim_lock);
566
567 return ret;
568 }
569
intel_check_cmdsync_unlocked(struct sdw_intel * sdw)570 static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw)
571 {
572 return hdac_bus_eml_sdw_check_cmdsync_unlocked(sdw->link_res->hbus);
573 }
574
575 /* DAI callbacks */
intel_params_stream(struct sdw_intel * sdw,struct snd_pcm_substream * substream,struct snd_soc_dai * dai,struct snd_pcm_hw_params * hw_params,int link_id,int alh_stream_id)576 static int intel_params_stream(struct sdw_intel *sdw,
577 struct snd_pcm_substream *substream,
578 struct snd_soc_dai *dai,
579 struct snd_pcm_hw_params *hw_params,
580 int link_id, int alh_stream_id)
581 {
582 struct sdw_intel_link_res *res = sdw->link_res;
583 struct sdw_intel_stream_params_data params_data;
584
585 params_data.substream = substream;
586 params_data.dai = dai;
587 params_data.hw_params = hw_params;
588 params_data.link_id = link_id;
589 params_data.alh_stream_id = alh_stream_id;
590
591 if (res->ops && res->ops->params_stream && res->dev)
592 return res->ops->params_stream(res->dev,
593 ¶ms_data);
594 return -EIO;
595 }
596
intel_free_stream(struct sdw_intel * sdw,struct snd_pcm_substream * substream,struct snd_soc_dai * dai,int link_id)597 static int intel_free_stream(struct sdw_intel *sdw,
598 struct snd_pcm_substream *substream,
599 struct snd_soc_dai *dai,
600 int link_id)
601
602 {
603 struct sdw_intel_link_res *res = sdw->link_res;
604 struct sdw_intel_stream_free_data free_data;
605
606 free_data.substream = substream;
607 free_data.dai = dai;
608 free_data.link_id = link_id;
609
610 if (res->ops && res->ops->free_stream && res->dev)
611 return res->ops->free_stream(res->dev,
612 &free_data);
613
614 return 0;
615 }
616
617 /*
618 * DAI operations
619 */
intel_hw_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params,struct snd_soc_dai * dai)620 static int intel_hw_params(struct snd_pcm_substream *substream,
621 struct snd_pcm_hw_params *params,
622 struct snd_soc_dai *dai)
623 {
624 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
625 struct sdw_intel *sdw = cdns_to_intel(cdns);
626 struct sdw_cdns_dai_runtime *dai_runtime;
627 struct sdw_cdns_pdi *pdi;
628 struct sdw_stream_config sconfig;
629 int ch, dir;
630 int ret;
631
632 dai_runtime = cdns->dai_runtime_array[dai->id];
633 if (!dai_runtime)
634 return -EIO;
635
636 ch = params_channels(params);
637 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
638 dir = SDW_DATA_DIR_RX;
639 else
640 dir = SDW_DATA_DIR_TX;
641
642 pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
643 if (!pdi)
644 return -EINVAL;
645
646 /* use same definitions for alh_id as previous generations */
647 pdi->intel_alh_id = (sdw->instance * 16) + pdi->num + 3;
648 if (pdi->num >= 2)
649 pdi->intel_alh_id += 2;
650
651 /* the SHIM will be configured in the callback functions */
652
653 sdw_cdns_config_stream(cdns, ch, dir, pdi);
654
655 /* store pdi and state, may be needed in prepare step */
656 dai_runtime->paused = false;
657 dai_runtime->suspended = false;
658 dai_runtime->pdi = pdi;
659
660 /* Inform DSP about PDI stream number */
661 ret = intel_params_stream(sdw, substream, dai, params,
662 sdw->instance,
663 pdi->intel_alh_id);
664 if (ret)
665 return ret;
666
667 sconfig.direction = dir;
668 sconfig.ch_count = ch;
669 sconfig.frame_rate = params_rate(params);
670 sconfig.type = dai_runtime->stream_type;
671
672 sconfig.bps = snd_pcm_format_width(params_format(params));
673
674 /* Port configuration */
675 struct sdw_port_config *pconfig __free(kfree) = kzalloc(sizeof(*pconfig),
676 GFP_KERNEL);
677 if (!pconfig)
678 return -ENOMEM;
679
680 pconfig->num = pdi->num;
681 pconfig->ch_mask = (1 << ch) - 1;
682
683 ret = sdw_stream_add_master(&cdns->bus, &sconfig,
684 pconfig, 1, dai_runtime->stream);
685 if (ret)
686 dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
687
688 return ret;
689 }
690
intel_prepare(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)691 static int intel_prepare(struct snd_pcm_substream *substream,
692 struct snd_soc_dai *dai)
693 {
694 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
695 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
696 struct sdw_intel *sdw = cdns_to_intel(cdns);
697 struct sdw_cdns_dai_runtime *dai_runtime;
698 struct snd_pcm_hw_params *hw_params;
699 int ch, dir;
700
701 dai_runtime = cdns->dai_runtime_array[dai->id];
702 if (!dai_runtime) {
703 dev_err(dai->dev, "failed to get dai runtime in %s\n",
704 __func__);
705 return -EIO;
706 }
707
708 hw_params = &rtd->dpcm[substream->stream].hw_params;
709 if (dai_runtime->suspended) {
710 dai_runtime->suspended = false;
711
712 /*
713 * .prepare() is called after system resume, where we
714 * need to reinitialize the SHIM/ALH/Cadence IP.
715 * .prepare() is also called to deal with underflows,
716 * but in those cases we cannot touch ALH/SHIM
717 * registers
718 */
719
720 /* configure stream */
721 ch = params_channels(hw_params);
722 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
723 dir = SDW_DATA_DIR_RX;
724 else
725 dir = SDW_DATA_DIR_TX;
726
727 /* the SHIM will be configured in the callback functions */
728
729 sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
730 }
731
732 /* Inform DSP about PDI stream number */
733 return intel_params_stream(sdw, substream, dai, hw_params, sdw->instance,
734 dai_runtime->pdi->intel_alh_id);
735 }
736
737 static int
intel_hw_free(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)738 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
739 {
740 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
741 struct sdw_intel *sdw = cdns_to_intel(cdns);
742 struct sdw_cdns_dai_runtime *dai_runtime;
743 int ret;
744
745 dai_runtime = cdns->dai_runtime_array[dai->id];
746 if (!dai_runtime)
747 return -EIO;
748
749 /*
750 * The sdw stream state will transition to RELEASED when stream->
751 * master_list is empty. So the stream state will transition to
752 * DEPREPARED for the first cpu-dai and to RELEASED for the last
753 * cpu-dai.
754 */
755 ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
756 if (ret < 0) {
757 dev_err(dai->dev, "remove master from stream %s failed: %d\n",
758 dai_runtime->stream->name, ret);
759 return ret;
760 }
761
762 ret = intel_free_stream(sdw, substream, dai, sdw->instance);
763 if (ret < 0) {
764 dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
765 return ret;
766 }
767
768 dai_runtime->pdi = NULL;
769
770 return 0;
771 }
772
intel_pcm_set_sdw_stream(struct snd_soc_dai * dai,void * stream,int direction)773 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
774 void *stream, int direction)
775 {
776 return cdns_set_sdw_stream(dai, stream, direction);
777 }
778
intel_get_sdw_stream(struct snd_soc_dai * dai,int direction)779 static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
780 int direction)
781 {
782 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
783 struct sdw_cdns_dai_runtime *dai_runtime;
784
785 dai_runtime = cdns->dai_runtime_array[dai->id];
786 if (!dai_runtime)
787 return ERR_PTR(-EINVAL);
788
789 return dai_runtime->stream;
790 }
791
intel_trigger(struct snd_pcm_substream * substream,int cmd,struct snd_soc_dai * dai)792 static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
793 {
794 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
795 struct sdw_intel *sdw = cdns_to_intel(cdns);
796 struct sdw_intel_link_res *res = sdw->link_res;
797 struct sdw_cdns_dai_runtime *dai_runtime;
798 int ret = 0;
799
800 /*
801 * The .trigger callback is used to program HDaudio DMA and send required IPC to audio
802 * firmware.
803 */
804 if (res->ops && res->ops->trigger) {
805 ret = res->ops->trigger(substream, cmd, dai);
806 if (ret < 0)
807 return ret;
808 }
809
810 dai_runtime = cdns->dai_runtime_array[dai->id];
811 if (!dai_runtime) {
812 dev_err(dai->dev, "failed to get dai runtime in %s\n",
813 __func__);
814 return -EIO;
815 }
816
817 switch (cmd) {
818 case SNDRV_PCM_TRIGGER_SUSPEND:
819
820 /*
821 * The .prepare callback is used to deal with xruns and resume operations.
822 * In the case of xruns, the DMAs and SHIM registers cannot be touched,
823 * but for resume operations the DMAs and SHIM registers need to be initialized.
824 * the .trigger callback is used to track the suspend case only.
825 */
826
827 dai_runtime->suspended = true;
828
829 break;
830
831 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
832 dai_runtime->paused = true;
833 break;
834 case SNDRV_PCM_TRIGGER_STOP:
835 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
836 dai_runtime->paused = false;
837 break;
838 default:
839 break;
840 }
841
842 return ret;
843 }
844
845 static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
846 .hw_params = intel_hw_params,
847 .prepare = intel_prepare,
848 .hw_free = intel_hw_free,
849 .trigger = intel_trigger,
850 .set_stream = intel_pcm_set_sdw_stream,
851 .get_stream = intel_get_sdw_stream,
852 };
853
854 static const struct snd_soc_component_driver dai_component = {
855 .name = "soundwire",
856 };
857
858 /*
859 * PDI routines
860 */
intel_pdi_init(struct sdw_intel * sdw,struct sdw_cdns_stream_config * config)861 static void intel_pdi_init(struct sdw_intel *sdw,
862 struct sdw_cdns_stream_config *config)
863 {
864 void __iomem *shim = sdw->link_res->shim;
865 int pcm_cap;
866
867 /* PCM Stream Capability */
868 pcm_cap = intel_readw(shim, SDW_SHIM2_PCMSCAP);
869
870 config->pcm_bd = FIELD_GET(SDW_SHIM2_PCMSCAP_BSS, pcm_cap);
871 config->pcm_in = FIELD_GET(SDW_SHIM2_PCMSCAP_ISS, pcm_cap);
872 config->pcm_out = FIELD_GET(SDW_SHIM2_PCMSCAP_ISS, pcm_cap);
873
874 dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
875 config->pcm_bd, config->pcm_in, config->pcm_out);
876 }
877
878 static int
intel_pdi_get_ch_cap(struct sdw_intel * sdw,unsigned int pdi_num)879 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
880 {
881 void __iomem *shim = sdw->link_res->shim;
882
883 /* zero based values for channel count in register */
884 return intel_readw(shim, SDW_SHIM2_PCMSYCHC(pdi_num)) + 1;
885 }
886
intel_pdi_get_ch_update(struct sdw_intel * sdw,struct sdw_cdns_pdi * pdi,unsigned int num_pdi,unsigned int * num_ch)887 static void intel_pdi_get_ch_update(struct sdw_intel *sdw,
888 struct sdw_cdns_pdi *pdi,
889 unsigned int num_pdi,
890 unsigned int *num_ch)
891 {
892 int ch_count = 0;
893 int i;
894
895 for (i = 0; i < num_pdi; i++) {
896 pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
897 ch_count += pdi->ch_count;
898 pdi++;
899 }
900
901 *num_ch = ch_count;
902 }
903
intel_pdi_stream_ch_update(struct sdw_intel * sdw,struct sdw_cdns_streams * stream)904 static void intel_pdi_stream_ch_update(struct sdw_intel *sdw,
905 struct sdw_cdns_streams *stream)
906 {
907 intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
908 &stream->num_ch_bd);
909
910 intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
911 &stream->num_ch_in);
912
913 intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
914 &stream->num_ch_out);
915 }
916
intel_create_dai(struct sdw_cdns * cdns,struct snd_soc_dai_driver * dais,enum intel_pdi_type type,u32 num,u32 off,u32 max_ch)917 static int intel_create_dai(struct sdw_cdns *cdns,
918 struct snd_soc_dai_driver *dais,
919 enum intel_pdi_type type,
920 u32 num, u32 off, u32 max_ch)
921 {
922 int i;
923
924 if (!num)
925 return 0;
926
927 for (i = off; i < (off + num); i++) {
928 dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
929 "SDW%d Pin%d",
930 cdns->instance, i);
931 if (!dais[i].name)
932 return -ENOMEM;
933
934 if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
935 dais[i].playback.channels_min = 1;
936 dais[i].playback.channels_max = max_ch;
937 }
938
939 if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
940 dais[i].capture.channels_min = 1;
941 dais[i].capture.channels_max = max_ch;
942 }
943
944 dais[i].ops = &intel_pcm_dai_ops;
945 }
946
947 return 0;
948 }
949
intel_register_dai(struct sdw_intel * sdw)950 static int intel_register_dai(struct sdw_intel *sdw)
951 {
952 struct sdw_cdns_dai_runtime **dai_runtime_array;
953 struct sdw_cdns_stream_config config;
954 struct sdw_cdns *cdns = &sdw->cdns;
955 struct sdw_cdns_streams *stream;
956 struct snd_soc_dai_driver *dais;
957 int num_dai;
958 int ret;
959 int off = 0;
960
961 /* Read the PDI config and initialize cadence PDI */
962 intel_pdi_init(sdw, &config);
963 ret = sdw_cdns_pdi_init(cdns, config);
964 if (ret)
965 return ret;
966
967 intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
968
969 /* DAIs are created based on total number of PDIs supported */
970 num_dai = cdns->pcm.num_pdi;
971
972 dai_runtime_array = devm_kcalloc(cdns->dev, num_dai,
973 sizeof(struct sdw_cdns_dai_runtime *),
974 GFP_KERNEL);
975 if (!dai_runtime_array)
976 return -ENOMEM;
977 cdns->dai_runtime_array = dai_runtime_array;
978
979 dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
980 if (!dais)
981 return -ENOMEM;
982
983 /* Create PCM DAIs */
984 stream = &cdns->pcm;
985
986 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
987 off, stream->num_ch_in);
988 if (ret)
989 return ret;
990
991 off += cdns->pcm.num_in;
992 ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
993 off, stream->num_ch_out);
994 if (ret)
995 return ret;
996
997 off += cdns->pcm.num_out;
998 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
999 off, stream->num_ch_bd);
1000 if (ret)
1001 return ret;
1002
1003 return devm_snd_soc_register_component(cdns->dev, &dai_component,
1004 dais, num_dai);
1005 }
1006
intel_program_sdi(struct sdw_intel * sdw,int dev_num)1007 static void intel_program_sdi(struct sdw_intel *sdw, int dev_num)
1008 {
1009 int ret;
1010
1011 ret = hdac_bus_eml_sdw_set_lsdiid(sdw->link_res->hbus, sdw->instance, dev_num);
1012 if (ret < 0)
1013 dev_err(sdw->cdns.dev, "%s: could not set lsdiid for link %d %d\n",
1014 __func__, sdw->instance, dev_num);
1015 }
1016
intel_get_link_count(struct sdw_intel * sdw)1017 static int intel_get_link_count(struct sdw_intel *sdw)
1018 {
1019 int ret;
1020
1021 ret = hdac_bus_eml_get_count(sdw->link_res->hbus, true, AZX_REG_ML_LEPTR_ID_SDW);
1022 if (!ret) {
1023 dev_err(sdw->cdns.dev, "%s: could not retrieve link count\n", __func__);
1024 return -ENODEV;
1025 }
1026
1027 if (ret > SDW_INTEL_MAX_LINKS) {
1028 dev_err(sdw->cdns.dev, "%s: link count %d exceed max %d\n", __func__, ret, SDW_INTEL_MAX_LINKS);
1029 return -EINVAL;
1030 }
1031
1032 return ret;
1033 }
1034
1035 const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops = {
1036 .debugfs_init = intel_ace2x_debugfs_init,
1037 .debugfs_exit = intel_ace2x_debugfs_exit,
1038
1039 .get_link_count = intel_get_link_count,
1040
1041 .register_dai = intel_register_dai,
1042
1043 .check_clock_stop = intel_check_clock_stop,
1044 .start_bus = intel_start_bus,
1045 .start_bus_after_reset = intel_start_bus_after_reset,
1046 .start_bus_after_clock_stop = intel_start_bus_after_clock_stop,
1047 .stop_bus = intel_stop_bus,
1048
1049 .link_power_up = intel_link_power_up,
1050 .link_power_down = intel_link_power_down,
1051
1052 .shim_check_wake = intel_shim_check_wake,
1053 .shim_wake = intel_shim_wake,
1054
1055 .pre_bank_switch = intel_pre_bank_switch,
1056 .post_bank_switch = intel_post_bank_switch,
1057
1058 .sync_arm = intel_sync_arm,
1059 .sync_go_unlocked = intel_sync_go_unlocked,
1060 .sync_go = intel_sync_go,
1061 .sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked,
1062
1063 .program_sdi = intel_program_sdi,
1064
1065 .bpt_send_async = intel_ace2x_bpt_send_async,
1066 .bpt_wait = intel_ace2x_bpt_wait,
1067 };
1068 EXPORT_SYMBOL_NS(sdw_intel_lnl_hw_ops, "SOUNDWIRE_INTEL");
1069
1070 MODULE_IMPORT_NS("SND_SOC_SOF_HDA_MLINK");
1071 MODULE_IMPORT_NS("SND_SOC_SOF_INTEL_HDA_SDW_BPT");
1072