xref: /linux/sound/soc/sof/amd/acp.c (revision c93913c70809898aa5e450e4aad0b99750d9f082)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license. When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2021, 2023 Advanced Micro Devices, Inc. All rights reserved.
7 //
8 // Authors: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
9 //	    Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
10 
11 /*
12  * Hardware interface for generic AMD ACP processor
13  */
14 
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 
19 #include <asm/amd/node.h>
20 
21 #include "../ops.h"
22 #include "acp.h"
23 #include "acp-dsp-offset.h"
24 
25 static bool enable_fw_debug;
26 module_param(enable_fw_debug, bool, 0444);
27 MODULE_PARM_DESC(enable_fw_debug, "Enable Firmware debug");
28 
29 static struct acp_quirk_entry quirk_valve_galileo = {
30 	.signed_fw_image = true,
31 	.skip_iram_dram_size_mod = true,
32 	.post_fw_run_delay = true,
33 };
34 
35 const struct dmi_system_id acp_sof_quirk_table[] = {
36 	{
37 		/* Steam Deck OLED device */
38 		.matches = {
39 			DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
40 			DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"),
41 		},
42 		.driver_data = &quirk_valve_galileo,
43 	},
44 	{}
45 };
46 EXPORT_SYMBOL_GPL(acp_sof_quirk_table);
47 
init_dma_descriptor(struct acp_dev_data * adata)48 static void init_dma_descriptor(struct acp_dev_data *adata)
49 {
50 	struct snd_sof_dev *sdev = adata->dev;
51 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
52 	struct acp_dev_data *acp_data = sdev->pdata->hw_pdata;
53 	unsigned int addr;
54 	unsigned int acp_dma_desc_base_addr, acp_dma_desc_max_num_dscr;
55 
56 	addr = desc->sram_pte_offset + sdev->debug_box.offset +
57 	       offsetof(struct scratch_reg_conf, dma_desc);
58 
59 	switch (acp_data->pci_rev) {
60 	case ACP70_PCI_ID:
61 	case ACP71_PCI_ID:
62 	case ACP72_PCI_ID:
63 		acp_dma_desc_base_addr = ACP70_DMA_DESC_BASE_ADDR;
64 		acp_dma_desc_max_num_dscr = ACP70_DMA_DESC_MAX_NUM_DSCR;
65 		break;
66 	default:
67 		acp_dma_desc_base_addr = ACP_DMA_DESC_BASE_ADDR;
68 		acp_dma_desc_max_num_dscr = ACP_DMA_DESC_MAX_NUM_DSCR;
69 	}
70 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_desc_base_addr, addr);
71 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_desc_max_num_dscr, ACP_MAX_DESC_CNT);
72 }
73 
configure_dma_descriptor(struct acp_dev_data * adata,unsigned short idx,struct dma_descriptor * dscr_info)74 static void configure_dma_descriptor(struct acp_dev_data *adata, unsigned short idx,
75 				     struct dma_descriptor *dscr_info)
76 {
77 	struct snd_sof_dev *sdev = adata->dev;
78 	unsigned int offset;
79 
80 	offset = ACP_SCRATCH_REG_0 + sdev->debug_box.offset +
81 		offsetof(struct scratch_reg_conf, dma_desc) +
82 		idx * sizeof(struct dma_descriptor);
83 
84 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset, dscr_info->src_addr);
85 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x4, dscr_info->dest_addr);
86 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x8, dscr_info->tx_cnt.u32_all);
87 }
88 
config_dma_channel(struct acp_dev_data * adata,unsigned int ch,unsigned int idx,unsigned int dscr_count)89 static int config_dma_channel(struct acp_dev_data *adata, unsigned int ch,
90 			      unsigned int idx, unsigned int dscr_count)
91 {
92 	struct snd_sof_dev *sdev = adata->dev;
93 	struct acp_dev_data *acp_data = sdev->pdata->hw_pdata;
94 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
95 	unsigned int val, status;
96 	unsigned int acp_dma_cntl_0, acp_dma_ch_rst_sts, acp_dma_dscr_err_sts_0;
97 	unsigned int acp_dma_dscr_cnt_0, acp_dma_prio_0, acp_dma_dscr_strt_idx_0;
98 	int ret;
99 
100 	switch (acp_data->pci_rev) {
101 	case ACP70_PCI_ID:
102 	case ACP71_PCI_ID:
103 	case ACP72_PCI_ID:
104 		acp_dma_cntl_0 = ACP70_DMA_CNTL_0;
105 		acp_dma_ch_rst_sts = ACP70_DMA_CH_RST_STS;
106 		acp_dma_dscr_err_sts_0 = ACP70_DMA_ERR_STS_0;
107 		acp_dma_dscr_cnt_0 = ACP70_DMA_DSCR_CNT_0;
108 		acp_dma_prio_0 = ACP70_DMA_PRIO_0;
109 		acp_dma_dscr_strt_idx_0 = ACP70_DMA_DSCR_STRT_IDX_0;
110 		break;
111 	default:
112 		acp_dma_cntl_0 = ACP_DMA_CNTL_0;
113 		acp_dma_ch_rst_sts = ACP_DMA_CH_RST_STS;
114 		acp_dma_dscr_err_sts_0 = ACP_DMA_ERR_STS_0;
115 		acp_dma_dscr_cnt_0 = ACP_DMA_DSCR_CNT_0;
116 		acp_dma_prio_0 = ACP_DMA_PRIO_0;
117 		acp_dma_dscr_strt_idx_0 = ACP_DMA_DSCR_STRT_IDX_0;
118 	}
119 
120 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_cntl_0 + ch * sizeof(u32),
121 			  ACP_DMA_CH_RST | ACP_DMA_CH_GRACEFUL_RST_EN);
122 
123 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, acp_dma_ch_rst_sts, val,
124 					    val & (1 << ch), ACP_REG_POLL_INTERVAL,
125 					    ACP_REG_POLL_TIMEOUT_US);
126 	if (ret < 0) {
127 		status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->acp_error_stat);
128 		val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, acp_dma_dscr_err_sts_0 +
129 				       ch * sizeof(u32));
130 
131 		dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status);
132 		return ret;
133 	}
134 
135 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, (acp_dma_cntl_0 + ch * sizeof(u32)), 0);
136 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_dscr_cnt_0 + ch * sizeof(u32), dscr_count);
137 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_dscr_strt_idx_0 + ch * sizeof(u32), idx);
138 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_prio_0 + ch * sizeof(u32), 0);
139 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_cntl_0 + ch * sizeof(u32), ACP_DMA_CH_RUN);
140 
141 	return ret;
142 }
143 
acpbus_dma_start(struct acp_dev_data * adata,unsigned int ch,unsigned int dscr_count,struct dma_descriptor * dscr_info)144 static int acpbus_dma_start(struct acp_dev_data *adata, unsigned int ch,
145 			    unsigned int dscr_count, struct dma_descriptor *dscr_info)
146 {
147 	struct snd_sof_dev *sdev = adata->dev;
148 	int ret;
149 	u16 dscr;
150 
151 	if (!dscr_info || !dscr_count)
152 		return -EINVAL;
153 
154 	for (dscr = 0; dscr < dscr_count; dscr++)
155 		configure_dma_descriptor(adata, dscr, dscr_info++);
156 
157 	ret = config_dma_channel(adata, ch, 0, dscr_count);
158 	if (ret < 0)
159 		dev_err(sdev->dev, "config dma ch failed:%d\n", ret);
160 
161 	return ret;
162 }
163 
configure_and_run_dma(struct acp_dev_data * adata,unsigned int src_addr,unsigned int dest_addr,int dsp_data_size)164 int configure_and_run_dma(struct acp_dev_data *adata, unsigned int src_addr,
165 			  unsigned int dest_addr, int dsp_data_size)
166 {
167 	struct snd_sof_dev *sdev = adata->dev;
168 	unsigned int desc_count, index;
169 	int ret;
170 
171 	for (desc_count = 0; desc_count < ACP_MAX_DESC && dsp_data_size >= 0;
172 	     desc_count++, dsp_data_size -= ACP_PAGE_SIZE) {
173 		adata->dscr_info[desc_count].src_addr = src_addr + desc_count * ACP_PAGE_SIZE;
174 		adata->dscr_info[desc_count].dest_addr = dest_addr + desc_count * ACP_PAGE_SIZE;
175 		adata->dscr_info[desc_count].tx_cnt.bits.count = ACP_PAGE_SIZE;
176 		if (dsp_data_size < ACP_PAGE_SIZE)
177 			adata->dscr_info[desc_count].tx_cnt.bits.count = dsp_data_size;
178 	}
179 
180 	ret = acpbus_dma_start(adata, 0, desc_count, adata->dscr_info);
181 	if (ret)
182 		dev_err(sdev->dev, "acpbus_dma_start failed\n");
183 
184 	/* Clear descriptor array */
185 	for (index = 0; index < desc_count; index++)
186 		memset(&adata->dscr_info[index], 0x00, sizeof(struct dma_descriptor));
187 
188 	return ret;
189 }
190 
191 /*
192  * psp_mbox_ready- function to poll ready bit of psp mbox
193  * @adata: acp device data
194  * @ack: bool variable to check ready bit status or psp ack
195  */
196 
psp_mbox_ready(struct acp_dev_data * adata,bool ack)197 static int psp_mbox_ready(struct acp_dev_data *adata, bool ack)
198 {
199 	struct snd_sof_dev *sdev = adata->dev;
200 	int ret, data;
201 
202 	ret = read_poll_timeout(smn_read_register, data, data > 0 && data & MBOX_READY_MASK,
203 				MBOX_DELAY_US, ACP_PSP_TIMEOUT_US, false, MP0_C2PMSG_114_REG);
204 
205 	if (!ret)
206 		return 0;
207 
208 	dev_err(sdev->dev, "PSP error status %x\n", data & MBOX_STATUS_MASK);
209 
210 	if (ack)
211 		return -ETIMEDOUT;
212 
213 	return -EBUSY;
214 }
215 
216 /*
217  * psp_send_cmd - function to send psp command over mbox
218  * @adata: acp device data
219  * @cmd: non zero integer value for command type
220  */
221 
psp_send_cmd(struct acp_dev_data * adata,int cmd)222 static int psp_send_cmd(struct acp_dev_data *adata, int cmd)
223 {
224 	struct snd_sof_dev *sdev = adata->dev;
225 	int ret;
226 	u32 data;
227 
228 	if (!cmd)
229 		return -EINVAL;
230 
231 	/* Get a non-zero Doorbell value from PSP */
232 	ret = read_poll_timeout(smn_read_register, data, data > 0, MBOX_DELAY_US,
233 				ACP_PSP_TIMEOUT_US, false, MP0_C2PMSG_73_REG);
234 
235 	if (ret) {
236 		dev_err(sdev->dev, "Failed to get Doorbell from MBOX %x\n", MP0_C2PMSG_73_REG);
237 		return ret;
238 	}
239 
240 	/* Check if PSP is ready for new command */
241 	ret = psp_mbox_ready(adata, 0);
242 	if (ret)
243 		return ret;
244 
245 	ret = amd_smn_write(0, MP0_C2PMSG_114_REG, cmd);
246 	if (ret)
247 		return ret;
248 
249 	/* Ring the Doorbell for PSP */
250 	ret = amd_smn_write(0, MP0_C2PMSG_73_REG, data);
251 	if (ret)
252 		return ret;
253 
254 	/* Check MBOX ready as PSP ack */
255 	ret = psp_mbox_ready(adata, 1);
256 
257 	return ret;
258 }
259 
configure_and_run_sha_dma(struct acp_dev_data * adata,void * image_addr,unsigned int start_addr,unsigned int dest_addr,unsigned int image_length)260 int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
261 			      unsigned int start_addr, unsigned int dest_addr,
262 			      unsigned int image_length)
263 {
264 	struct snd_sof_dev *sdev = adata->dev;
265 	unsigned int tx_count, fw_qualifier, val;
266 	int ret;
267 
268 	if (!image_addr) {
269 		dev_err(sdev->dev, "SHA DMA image address is NULL\n");
270 		return -EINVAL;
271 	}
272 
273 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD);
274 	if (val & ACP_SHA_RUN) {
275 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RESET);
276 		ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD_STS,
277 						    val, val & ACP_SHA_RESET,
278 						    ACP_REG_POLL_INTERVAL,
279 						    ACP_REG_POLL_TIMEOUT_US);
280 		if (ret < 0) {
281 			dev_err(sdev->dev, "SHA DMA Failed to Reset\n");
282 			return ret;
283 		}
284 	}
285 
286 	if (adata->quirks && adata->quirks->signed_fw_image)
287 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_INCLUDE_HDR, ACP_SHA_HEADER);
288 
289 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr);
290 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr);
291 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length);
292 
293 	/* psp_send_cmd only required for vangogh platform */
294 	if (adata->pci_rev == ACP_VANGOGH_PCI_ID &&
295 	    !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) {
296 		/* Modify IRAM and DRAM size */
297 		ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2);
298 		if (ret)
299 			return ret;
300 		ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | MBOX_ISREADY_FLAG);
301 		if (ret)
302 			return ret;
303 	}
304 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN);
305 
306 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT,
307 					    tx_count, tx_count == image_length,
308 					    ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
309 	if (ret < 0) {
310 		dev_err(sdev->dev, "SHA DMA Failed to Transfer Length %x\n", tx_count);
311 		return ret;
312 	}
313 
314 	/* psp_send_cmd only required for renoir platform*/
315 	if (adata->pci_rev == ACP_RN_PCI_ID) {
316 		ret = psp_send_cmd(adata, MBOX_ACP_SHA_DMA_COMMAND);
317 		if (ret)
318 			return ret;
319 	}
320 
321 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DSP_FW_QUALIFIER,
322 					    fw_qualifier, fw_qualifier & DSP_FW_RUN_ENABLE,
323 					    ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
324 	if (ret < 0) {
325 		val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_PSP_ACK);
326 		dev_err(sdev->dev, "PSP validation failed: fw_qualifier = %#x, ACP_SHA_PSP_ACK = %#x\n",
327 			fw_qualifier, val);
328 		return ret;
329 	}
330 
331 	return 0;
332 }
333 
acp_dma_status(struct acp_dev_data * adata,unsigned char ch)334 int acp_dma_status(struct acp_dev_data *adata, unsigned char ch)
335 {
336 	struct snd_sof_dev *sdev = adata->dev;
337 	unsigned int val;
338 	unsigned int acp_dma_ch_sts;
339 	int ret = 0;
340 
341 	switch (adata->pci_rev) {
342 	case ACP70_PCI_ID:
343 	case ACP71_PCI_ID:
344 	case ACP72_PCI_ID:
345 		acp_dma_ch_sts = ACP70_DMA_CH_STS;
346 		break;
347 	default:
348 		acp_dma_ch_sts = ACP_DMA_CH_STS;
349 	}
350 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32));
351 	if (val & ACP_DMA_CH_RUN) {
352 		ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, acp_dma_ch_sts, val, !val,
353 						    ACP_REG_POLL_INTERVAL,
354 						    ACP_DMA_COMPLETE_TIMEOUT_US);
355 		if (ret < 0)
356 			dev_err(sdev->dev, "DMA_CHANNEL %d status timeout\n", ch);
357 	}
358 
359 	return ret;
360 }
361 
memcpy_from_scratch(struct snd_sof_dev * sdev,u32 offset,unsigned int * dst,size_t bytes)362 void memcpy_from_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *dst, size_t bytes)
363 {
364 	unsigned int reg_offset = offset + ACP_SCRATCH_REG_0;
365 	int i, j;
366 
367 	for (i = 0, j = 0; i < bytes; i = i + 4, j++)
368 		dst[j] = snd_sof_dsp_read(sdev, ACP_DSP_BAR, reg_offset + i);
369 }
370 
memcpy_to_scratch(struct snd_sof_dev * sdev,u32 offset,unsigned int * src,size_t bytes)371 void memcpy_to_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *src, size_t bytes)
372 {
373 	unsigned int reg_offset = offset + ACP_SCRATCH_REG_0;
374 	int i, j;
375 
376 	for (i = 0, j = 0; i < bytes; i = i + 4, j++)
377 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, reg_offset + i, src[j]);
378 }
379 
acp_memory_init(struct snd_sof_dev * sdev)380 static int acp_memory_init(struct snd_sof_dev *sdev)
381 {
382 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
383 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
384 
385 	snd_sof_dsp_update_bits(sdev, ACP_DSP_BAR, desc->dsp_intr_base + DSP_SW_INTR_CNTL_OFFSET,
386 				ACP_DSP_INTR_EN_MASK, ACP_DSP_INTR_EN_MASK);
387 	init_dma_descriptor(adata);
388 
389 	return 0;
390 }
391 
amd_sof_handle_acp70_sdw_wake_event(struct acp_dev_data * adata)392 static void amd_sof_handle_acp70_sdw_wake_event(struct acp_dev_data *adata)
393 {
394 	struct amd_sdw_manager *amd_manager;
395 
396 	if (adata->acp70_sdw0_wake_event) {
397 		amd_manager = dev_get_drvdata(&adata->sdw->pdev[0]->dev);
398 		if (amd_manager)
399 			pm_request_resume(amd_manager->dev);
400 		adata->acp70_sdw0_wake_event = 0;
401 	}
402 
403 	if (adata->acp70_sdw1_wake_event) {
404 		amd_manager = dev_get_drvdata(&adata->sdw->pdev[1]->dev);
405 		if (amd_manager)
406 			pm_request_resume(amd_manager->dev);
407 		adata->acp70_sdw1_wake_event = 0;
408 	}
409 }
410 
amd_sof_check_and_handle_acp70_sdw_wake_irq(struct snd_sof_dev * sdev)411 static int amd_sof_check_and_handle_acp70_sdw_wake_irq(struct snd_sof_dev *sdev)
412 {
413 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
414 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
415 	u32 ext_intr_stat1;
416 	int irq_flag = 0;
417 	bool sdw_wake_irq = false;
418 
419 	ext_intr_stat1 = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat1);
420 	if (ext_intr_stat1 & ACP70_SDW0_HOST_WAKE_STAT) {
421 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat1,
422 				  ACP70_SDW0_HOST_WAKE_STAT);
423 		adata->acp70_sdw0_wake_event = true;
424 		sdw_wake_irq = true;
425 	}
426 
427 	if (ext_intr_stat1 & ACP70_SDW1_HOST_WAKE_STAT) {
428 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat1,
429 				  ACP70_SDW1_HOST_WAKE_STAT);
430 		adata->acp70_sdw1_wake_event = true;
431 		sdw_wake_irq = true;
432 	}
433 
434 	if (ext_intr_stat1 & ACP70_SDW0_PME_STAT) {
435 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP70_SW0_WAKE_EN, 0);
436 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat1, ACP70_SDW0_PME_STAT);
437 		adata->acp70_sdw0_wake_event = true;
438 		sdw_wake_irq = true;
439 	}
440 
441 	if (ext_intr_stat1 & ACP70_SDW1_PME_STAT) {
442 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP70_SW1_WAKE_EN, 0);
443 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat1, ACP70_SDW1_PME_STAT);
444 		adata->acp70_sdw1_wake_event = true;
445 		sdw_wake_irq = true;
446 	}
447 
448 	if (sdw_wake_irq) {
449 		amd_sof_handle_acp70_sdw_wake_event(adata);
450 		irq_flag = 1;
451 	}
452 	return irq_flag;
453 }
454 
acp_irq_thread(int irq,void * context)455 static irqreturn_t acp_irq_thread(int irq, void *context)
456 {
457 	struct snd_sof_dev *sdev = context;
458 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
459 	unsigned int count = ACP_HW_SEM_RETRY_COUNT;
460 
461 	spin_lock_irq(&sdev->ipc_lock);
462 	/* Wait until acquired HW Semaphore lock or timeout */
463 	while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset) && --count)
464 		;
465 	spin_unlock_irq(&sdev->ipc_lock);
466 
467 	if (!count) {
468 		dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
469 		return IRQ_NONE;
470 	}
471 
472 	sof_ops(sdev)->irq_thread(irq, sdev);
473 	/* Unlock or Release HW Semaphore */
474 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
475 
476 	return IRQ_HANDLED;
477 };
478 
acp_irq_handler(int irq,void * dev_id)479 static irqreturn_t acp_irq_handler(int irq, void *dev_id)
480 {
481 	struct amd_sdw_manager *amd_manager;
482 	struct snd_sof_dev *sdev = dev_id;
483 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
484 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
485 	unsigned int base = desc->dsp_intr_base;
486 	unsigned int val;
487 	int irq_flag = 0, wake_irq_flag = 0;
488 
489 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
490 	if (val & ACP_DSP_TO_HOST_IRQ) {
491 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET,
492 				  ACP_DSP_TO_HOST_IRQ);
493 		return IRQ_WAKE_THREAD;
494 	}
495 
496 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat);
497 	if (val & ACP_SDW0_IRQ_MASK) {
498 		amd_manager = dev_get_drvdata(&adata->sdw->pdev[0]->dev);
499 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, ACP_SDW0_IRQ_MASK);
500 		if (amd_manager)
501 			schedule_work(&amd_manager->amd_sdw_irq_thread);
502 		irq_flag = 1;
503 	}
504 
505 	if (val & ACP_ERROR_IRQ_MASK) {
506 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, ACP_ERROR_IRQ_MASK);
507 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_sw0_i2s_err_reason, 0);
508 		/* ACP_SW1_I2S_ERROR_REASON is newly added register from rmb platform onwards */
509 		if (adata->pci_rev >= ACP_RMB_PCI_ID)
510 			snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SW1_I2S_ERROR_REASON, 0);
511 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_error_stat, 0);
512 		irq_flag = 1;
513 	}
514 
515 	if (desc->ext_intr_stat1) {
516 		val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat1);
517 		if (val & ACP_SDW1_IRQ_MASK) {
518 			amd_manager = dev_get_drvdata(&adata->sdw->pdev[1]->dev);
519 			snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat1,
520 					  ACP_SDW1_IRQ_MASK);
521 			if (amd_manager)
522 				schedule_work(&amd_manager->amd_sdw_irq_thread);
523 			irq_flag = 1;
524 		}
525 		switch (adata->pci_rev) {
526 		case ACP70_PCI_ID:
527 		case ACP71_PCI_ID:
528 		case ACP72_PCI_ID:
529 			wake_irq_flag = amd_sof_check_and_handle_acp70_sdw_wake_irq(sdev);
530 			break;
531 		}
532 	}
533 	if (irq_flag || wake_irq_flag)
534 		return IRQ_HANDLED;
535 	else
536 		return IRQ_NONE;
537 }
538 
acp_power_on(struct snd_sof_dev * sdev)539 static int acp_power_on(struct snd_sof_dev *sdev)
540 {
541 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
542 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
543 	unsigned int base = desc->pgfsm_base;
544 	unsigned int val;
545 	unsigned int acp_pgfsm_status_mask, acp_pgfsm_cntl_mask;
546 	int ret;
547 
548 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET);
549 
550 	if (val == ACP_POWERED_ON)
551 		return 0;
552 
553 	switch (adata->pci_rev) {
554 	case ACP_RN_PCI_ID:
555 	case ACP_VANGOGH_PCI_ID:
556 		acp_pgfsm_status_mask = ACP3X_PGFSM_STATUS_MASK;
557 		acp_pgfsm_cntl_mask = ACP3X_PGFSM_CNTL_POWER_ON_MASK;
558 		break;
559 	case ACP_RMB_PCI_ID:
560 	case ACP63_PCI_ID:
561 		acp_pgfsm_status_mask = ACP6X_PGFSM_STATUS_MASK;
562 		acp_pgfsm_cntl_mask = ACP6X_PGFSM_CNTL_POWER_ON_MASK;
563 		break;
564 	case ACP70_PCI_ID:
565 	case ACP71_PCI_ID:
566 	case ACP72_PCI_ID:
567 		acp_pgfsm_status_mask = ACP70_PGFSM_STATUS_MASK;
568 		acp_pgfsm_cntl_mask = ACP70_PGFSM_CNTL_POWER_ON_MASK;
569 		break;
570 	default:
571 		return -EINVAL;
572 	}
573 
574 	if (val & acp_pgfsm_status_mask)
575 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET,
576 				  acp_pgfsm_cntl_mask);
577 
578 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val,
579 					    !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
580 	if (ret < 0)
581 		dev_err(sdev->dev, "timeout in ACP_PGFSM_STATUS read\n");
582 
583 	return ret;
584 }
585 
acp_reset(struct snd_sof_dev * sdev)586 static int acp_reset(struct snd_sof_dev *sdev)
587 {
588 	unsigned int val;
589 	int ret;
590 
591 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_ASSERT_RESET);
592 
593 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val,
594 					    val & ACP_SOFT_RESET_DONE_MASK,
595 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
596 	if (ret < 0) {
597 		dev_err(sdev->dev, "timeout asserting reset\n");
598 		return ret;
599 	}
600 
601 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_RELEASE_RESET);
602 
603 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val,
604 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
605 	if (ret < 0)
606 		dev_err(sdev->dev, "timeout in releasing reset\n");
607 
608 	return ret;
609 }
610 
acp_dsp_reset(struct snd_sof_dev * sdev)611 static int acp_dsp_reset(struct snd_sof_dev *sdev)
612 {
613 	unsigned int val;
614 	int ret;
615 
616 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_DSP_ASSERT_RESET);
617 
618 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val,
619 					    val & ACP_DSP_SOFT_RESET_DONE_MASK,
620 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
621 	if (ret < 0) {
622 		dev_err(sdev->dev, "timeout asserting reset\n");
623 		return ret;
624 	}
625 
626 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_DSP_RELEASE_RESET);
627 
628 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val,
629 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
630 	if (ret < 0)
631 		dev_err(sdev->dev, "timeout in releasing reset\n");
632 
633 	return ret;
634 }
635 
acp_init(struct snd_sof_dev * sdev)636 static int acp_init(struct snd_sof_dev *sdev)
637 {
638 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
639 	struct acp_dev_data *acp_data;
640 	unsigned int sdw0_wake_en, sdw1_wake_en;
641 	int ret;
642 
643 	/* power on */
644 	acp_data = sdev->pdata->hw_pdata;
645 	ret = acp_power_on(sdev);
646 	if (ret) {
647 		dev_err(sdev->dev, "ACP power on failed\n");
648 		return ret;
649 	}
650 
651 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x01);
652 	/* Reset */
653 	ret = acp_reset(sdev);
654 	if (ret)
655 		return ret;
656 
657 	if (desc->acp_clkmux_sel)
658 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_clkmux_sel, ACP_CLOCK_ACLK);
659 
660 	if (desc->ext_intr_enb)
661 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_enb, 0x01);
662 
663 	if (desc->ext_intr_cntl)
664 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_cntl, ACP_ERROR_IRQ_MASK);
665 
666 	switch (acp_data->pci_rev) {
667 	case ACP70_PCI_ID:
668 	case ACP71_PCI_ID:
669 	case ACP72_PCI_ID:
670 		sdw0_wake_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP70_SW0_WAKE_EN);
671 		sdw1_wake_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP70_SW1_WAKE_EN);
672 		if (sdw0_wake_en || sdw1_wake_en)
673 			snd_sof_dsp_update_bits(sdev, ACP_DSP_BAR, ACP70_EXTERNAL_INTR_CNTL1,
674 						ACP70_SDW_HOST_WAKE_MASK, ACP70_SDW_HOST_WAKE_MASK);
675 
676 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP70_PME_EN, 1);
677 		break;
678 	}
679 	return 0;
680 }
681 
check_acp_sdw_enable_status(struct snd_sof_dev * sdev)682 static bool check_acp_sdw_enable_status(struct snd_sof_dev *sdev)
683 {
684 	struct acp_dev_data *acp_data;
685 	u32 sdw0_en, sdw1_en;
686 
687 	acp_data = sdev->pdata->hw_pdata;
688 	if (!acp_data->sdw)
689 		return false;
690 
691 	sdw0_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SW0_EN);
692 	sdw1_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SW1_EN);
693 	acp_data->sdw_en_stat = sdw0_en || sdw1_en;
694 	return acp_data->sdw_en_stat;
695 }
696 
amd_sof_acp_suspend(struct snd_sof_dev * sdev,u32 target_state)697 int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state)
698 {
699 	struct acp_dev_data *acp_data;
700 	int ret;
701 	bool enable = false;
702 
703 	acp_data = sdev->pdata->hw_pdata;
704 	/* When acp_reset() function is invoked, it will apply ACP SOFT reset and
705 	 * DSP reset. ACP Soft reset sequence will cause all ACP IP registers will
706 	 * be reset to default values which will break the ClockStop Mode functionality.
707 	 * Add a condition check to apply DSP reset when SoundWire ClockStop mode
708 	 * is selected. For the rest of the scenarios, apply acp reset sequence.
709 	 */
710 	if (check_acp_sdw_enable_status(sdev))
711 		return acp_dsp_reset(sdev);
712 
713 	ret = acp_reset(sdev);
714 	if (ret) {
715 		dev_err(sdev->dev, "ACP Reset failed\n");
716 		return ret;
717 	}
718 	switch (acp_data->pci_rev) {
719 	case ACP70_PCI_ID:
720 	case ACP71_PCI_ID:
721 	case ACP72_PCI_ID:
722 		enable = true;
723 		break;
724 	}
725 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, enable);
726 
727 	return 0;
728 }
729 EXPORT_SYMBOL_NS(amd_sof_acp_suspend, "SND_SOC_SOF_AMD_COMMON");
730 
amd_sof_acp_resume(struct snd_sof_dev * sdev)731 int amd_sof_acp_resume(struct snd_sof_dev *sdev)
732 {
733 	int ret;
734 	struct acp_dev_data *acp_data;
735 
736 	acp_data = sdev->pdata->hw_pdata;
737 	if (!acp_data->sdw_en_stat) {
738 		ret = acp_init(sdev);
739 		if (ret) {
740 			dev_err(sdev->dev, "ACP Init failed\n");
741 			return ret;
742 		}
743 		return acp_memory_init(sdev);
744 	}
745 	switch (acp_data->pci_rev) {
746 	case ACP70_PCI_ID:
747 	case ACP71_PCI_ID:
748 	case ACP72_PCI_ID:
749 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP70_PME_EN, 1);
750 		break;
751 	}
752 
753 	return acp_dsp_reset(sdev);
754 }
755 EXPORT_SYMBOL_NS(amd_sof_acp_resume, "SND_SOC_SOF_AMD_COMMON");
756 
757 #if IS_ENABLED(CONFIG_SND_SOC_SOF_AMD_SOUNDWIRE)
acp_sof_scan_sdw_devices(struct snd_sof_dev * sdev,u64 addr)758 static int acp_sof_scan_sdw_devices(struct snd_sof_dev *sdev, u64 addr)
759 {
760 	struct acpi_device *sdw_dev;
761 	struct acp_dev_data *acp_data;
762 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
763 
764 	if (!addr)
765 		return -ENODEV;
766 
767 	acp_data = sdev->pdata->hw_pdata;
768 	sdw_dev = acpi_find_child_device(ACPI_COMPANION(sdev->dev), addr, 0);
769 	if (!sdw_dev)
770 		return -ENODEV;
771 
772 	acp_data->info.handle = sdw_dev->handle;
773 	acp_data->info.count = desc->sdw_max_link_count;
774 
775 	return amd_sdw_scan_controller(&acp_data->info);
776 }
777 
amd_sof_sdw_probe(struct snd_sof_dev * sdev)778 static int amd_sof_sdw_probe(struct snd_sof_dev *sdev)
779 {
780 	struct acp_dev_data *acp_data;
781 	struct sdw_amd_res sdw_res;
782 	int ret;
783 
784 	acp_data = sdev->pdata->hw_pdata;
785 
786 	memset(&sdw_res, 0, sizeof(sdw_res));
787 	sdw_res.addr = acp_data->addr;
788 	sdw_res.reg_range = acp_data->reg_range;
789 	sdw_res.handle = acp_data->info.handle;
790 	sdw_res.parent = sdev->dev;
791 	sdw_res.dev = sdev->dev;
792 	sdw_res.acp_lock = &acp_data->acp_lock;
793 	sdw_res.count = acp_data->info.count;
794 	sdw_res.link_mask = acp_data->info.link_mask;
795 	sdw_res.mmio_base = sdev->bar[ACP_DSP_BAR];
796 	sdw_res.acp_rev = acp_data->pci_rev;
797 
798 	ret = sdw_amd_probe(&sdw_res, &acp_data->sdw);
799 	if (ret)
800 		dev_err(sdev->dev, "SoundWire probe failed\n");
801 	return ret;
802 }
803 
amd_sof_sdw_exit(struct snd_sof_dev * sdev)804 static int amd_sof_sdw_exit(struct snd_sof_dev *sdev)
805 {
806 	struct acp_dev_data *acp_data;
807 
808 	acp_data = sdev->pdata->hw_pdata;
809 	if (acp_data->sdw)
810 		sdw_amd_exit(acp_data->sdw);
811 	acp_data->sdw = NULL;
812 
813 	return 0;
814 }
815 
816 #else
acp_sof_scan_sdw_devices(struct snd_sof_dev * sdev,u64 addr)817 static int acp_sof_scan_sdw_devices(struct snd_sof_dev *sdev, u64 addr)
818 {
819 	return 0;
820 }
821 
amd_sof_sdw_probe(struct snd_sof_dev * sdev)822 static int amd_sof_sdw_probe(struct snd_sof_dev *sdev)
823 {
824 	return 0;
825 }
826 
amd_sof_sdw_exit(struct snd_sof_dev * sdev)827 static int amd_sof_sdw_exit(struct snd_sof_dev *sdev)
828 {
829 	return 0;
830 }
831 #endif
832 
amd_sof_acp_probe(struct snd_sof_dev * sdev)833 int amd_sof_acp_probe(struct snd_sof_dev *sdev)
834 {
835 	struct pci_dev *pci = to_pci_dev(sdev->dev);
836 	struct acp_dev_data *adata;
837 	const struct sof_amd_acp_desc *chip;
838 	const struct dmi_system_id *dmi_id;
839 	unsigned int addr;
840 	int ret;
841 
842 	chip = get_chip_info(sdev->pdata);
843 	if (!chip) {
844 		dev_err(sdev->dev, "no such device supported, chip id:%x\n", pci->device);
845 		return -EIO;
846 	}
847 	adata = devm_kzalloc(sdev->dev, sizeof(struct acp_dev_data),
848 			     GFP_KERNEL);
849 	if (!adata)
850 		return -ENOMEM;
851 
852 	adata->dev = sdev;
853 	adata->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec",
854 							PLATFORM_DEVID_NONE, NULL, 0);
855 	if (IS_ERR(adata->dmic_dev)) {
856 		dev_err(sdev->dev, "failed to register platform for dmic codec\n");
857 		return PTR_ERR(adata->dmic_dev);
858 	}
859 	addr = pci_resource_start(pci, ACP_DSP_BAR);
860 	sdev->bar[ACP_DSP_BAR] = devm_ioremap(sdev->dev, addr, pci_resource_len(pci, ACP_DSP_BAR));
861 	if (!sdev->bar[ACP_DSP_BAR]) {
862 		dev_err(sdev->dev, "ioremap error\n");
863 		ret = -ENXIO;
864 		goto unregister_dev;
865 	}
866 
867 	pci_set_master(pci);
868 	adata->addr = addr;
869 	adata->reg_range = chip->reg_end_addr - chip->reg_start_addr;
870 	adata->pci_rev = pci->revision;
871 	mutex_init(&adata->acp_lock);
872 	sdev->pdata->hw_pdata = adata;
873 
874 	ret = acp_init(sdev);
875 	if (ret < 0)
876 		goto unregister_dev;
877 
878 	sdev->ipc_irq = pci->irq;
879 	ret = request_threaded_irq(sdev->ipc_irq, acp_irq_handler, acp_irq_thread,
880 				   IRQF_SHARED, "AudioDSP", sdev);
881 	if (ret < 0) {
882 		dev_err(sdev->dev, "failed to register IRQ %d\n",
883 			sdev->ipc_irq);
884 		goto unregister_dev;
885 	}
886 
887 	/* scan SoundWire capabilities exposed by DSDT */
888 	ret = acp_sof_scan_sdw_devices(sdev, chip->sdw_acpi_dev_addr);
889 	if (ret < 0) {
890 		dev_dbg(sdev->dev, "skipping SoundWire, not detected with ACPI scan\n");
891 		goto skip_soundwire;
892 	}
893 	ret = amd_sof_sdw_probe(sdev);
894 	if (ret < 0) {
895 		dev_err(sdev->dev, "error: SoundWire probe error\n");
896 		free_irq(sdev->ipc_irq, sdev);
897 		return ret;
898 	}
899 
900 skip_soundwire:
901 	sdev->dsp_box.offset = 0;
902 	sdev->dsp_box.size = BOX_SIZE_512;
903 
904 	sdev->host_box.offset = sdev->dsp_box.offset + sdev->dsp_box.size;
905 	sdev->host_box.size = BOX_SIZE_512;
906 
907 	sdev->debug_box.offset = sdev->host_box.offset + sdev->host_box.size;
908 	sdev->debug_box.size = BOX_SIZE_1024;
909 
910 	dmi_id = dmi_first_match(acp_sof_quirk_table);
911 	if (dmi_id) {
912 		adata->quirks = dmi_id->driver_data;
913 
914 		if (adata->quirks->signed_fw_image) {
915 			adata->fw_code_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
916 							    "sof-%s-code.bin",
917 							    chip->name);
918 			if (!adata->fw_code_bin) {
919 				ret = -ENOMEM;
920 				goto free_ipc_irq;
921 			}
922 
923 			adata->fw_data_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
924 							    "sof-%s-data.bin",
925 							    chip->name);
926 			if (!adata->fw_data_bin) {
927 				ret = -ENOMEM;
928 				goto free_ipc_irq;
929 			}
930 		}
931 	}
932 
933 	adata->enable_fw_debug = enable_fw_debug;
934 	acp_memory_init(sdev);
935 
936 	acp_dsp_stream_init(sdev);
937 
938 	return 0;
939 
940 free_ipc_irq:
941 	free_irq(sdev->ipc_irq, sdev);
942 unregister_dev:
943 	platform_device_unregister(adata->dmic_dev);
944 	return ret;
945 }
946 EXPORT_SYMBOL_NS(amd_sof_acp_probe, "SND_SOC_SOF_AMD_COMMON");
947 
amd_sof_acp_remove(struct snd_sof_dev * sdev)948 void amd_sof_acp_remove(struct snd_sof_dev *sdev)
949 {
950 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
951 
952 	if (adata->sdw)
953 		amd_sof_sdw_exit(sdev);
954 
955 	if (sdev->ipc_irq)
956 		free_irq(sdev->ipc_irq, sdev);
957 
958 	if (adata->dmic_dev)
959 		platform_device_unregister(adata->dmic_dev);
960 
961 	acp_reset(sdev);
962 }
963 EXPORT_SYMBOL_NS(amd_sof_acp_remove, "SND_SOC_SOF_AMD_COMMON");
964 
965 MODULE_LICENSE("Dual BSD/GPL");
966 MODULE_DESCRIPTION("AMD ACP sof driver");
967 MODULE_IMPORT_NS("SOUNDWIRE_AMD_INIT");
968 MODULE_IMPORT_NS("SND_AMD_SOUNDWIRE_ACPI");
969