1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2021, 2023 Advanced Micro Devices, Inc. All rights reserved. 7 // 8 // Authors: Vijendar Mukunda <Vijendar.Mukunda@amd.com> 9 // Ajit Kumar Pandey <AjitKumar.Pandey@amd.com> 10 11 /* 12 * Hardware interface for generic AMD ACP processor 13 */ 14 15 #include <linux/io.h> 16 #include <linux/module.h> 17 #include <linux/pci.h> 18 19 #include <asm/amd_node.h> 20 21 #include "../ops.h" 22 #include "acp.h" 23 #include "acp-dsp-offset.h" 24 25 static bool enable_fw_debug; 26 module_param(enable_fw_debug, bool, 0444); 27 MODULE_PARM_DESC(enable_fw_debug, "Enable Firmware debug"); 28 29 static struct acp_quirk_entry quirk_valve_galileo = { 30 .signed_fw_image = true, 31 .skip_iram_dram_size_mod = true, 32 .post_fw_run_delay = true, 33 }; 34 35 const struct dmi_system_id acp_sof_quirk_table[] = { 36 { 37 /* Steam Deck OLED device */ 38 .matches = { 39 DMI_MATCH(DMI_SYS_VENDOR, "Valve"), 40 DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"), 41 }, 42 .driver_data = &quirk_valve_galileo, 43 }, 44 {} 45 }; 46 EXPORT_SYMBOL_GPL(acp_sof_quirk_table); 47 48 static void init_dma_descriptor(struct acp_dev_data *adata) 49 { 50 struct snd_sof_dev *sdev = adata->dev; 51 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 52 struct acp_dev_data *acp_data = sdev->pdata->hw_pdata; 53 unsigned int addr; 54 unsigned int acp_dma_desc_base_addr, acp_dma_desc_max_num_dscr; 55 56 addr = desc->sram_pte_offset + sdev->debug_box.offset + 57 offsetof(struct scratch_reg_conf, dma_desc); 58 59 switch (acp_data->pci_rev) { 60 case ACP70_PCI_ID: 61 case ACP71_PCI_ID: 62 acp_dma_desc_base_addr = ACP70_DMA_DESC_BASE_ADDR; 63 acp_dma_desc_max_num_dscr = ACP70_DMA_DESC_MAX_NUM_DSCR; 64 break; 65 default: 66 acp_dma_desc_base_addr = ACP_DMA_DESC_BASE_ADDR; 67 acp_dma_desc_max_num_dscr = ACP_DMA_DESC_MAX_NUM_DSCR; 68 } 69 snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_desc_base_addr, addr); 70 snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_desc_max_num_dscr, ACP_MAX_DESC_CNT); 71 } 72 73 static void configure_dma_descriptor(struct acp_dev_data *adata, unsigned short idx, 74 struct dma_descriptor *dscr_info) 75 { 76 struct snd_sof_dev *sdev = adata->dev; 77 unsigned int offset; 78 79 offset = ACP_SCRATCH_REG_0 + sdev->debug_box.offset + 80 offsetof(struct scratch_reg_conf, dma_desc) + 81 idx * sizeof(struct dma_descriptor); 82 83 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset, dscr_info->src_addr); 84 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x4, dscr_info->dest_addr); 85 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x8, dscr_info->tx_cnt.u32_all); 86 } 87 88 static int config_dma_channel(struct acp_dev_data *adata, unsigned int ch, 89 unsigned int idx, unsigned int dscr_count) 90 { 91 struct snd_sof_dev *sdev = adata->dev; 92 struct acp_dev_data *acp_data = sdev->pdata->hw_pdata; 93 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 94 unsigned int val, status; 95 unsigned int acp_dma_cntl_0, acp_dma_ch_rst_sts, acp_dma_dscr_err_sts_0; 96 unsigned int acp_dma_dscr_cnt_0, acp_dma_prio_0, acp_dma_dscr_strt_idx_0; 97 int ret; 98 99 switch (acp_data->pci_rev) { 100 case ACP70_PCI_ID: 101 case ACP71_PCI_ID: 102 acp_dma_cntl_0 = ACP70_DMA_CNTL_0; 103 acp_dma_ch_rst_sts = ACP70_DMA_CH_RST_STS; 104 acp_dma_dscr_err_sts_0 = ACP70_DMA_ERR_STS_0; 105 acp_dma_dscr_cnt_0 = ACP70_DMA_DSCR_CNT_0; 106 acp_dma_prio_0 = ACP70_DMA_PRIO_0; 107 acp_dma_dscr_strt_idx_0 = ACP70_DMA_DSCR_STRT_IDX_0; 108 break; 109 default: 110 acp_dma_cntl_0 = ACP_DMA_CNTL_0; 111 acp_dma_ch_rst_sts = ACP_DMA_CH_RST_STS; 112 acp_dma_dscr_err_sts_0 = ACP_DMA_ERR_STS_0; 113 acp_dma_dscr_cnt_0 = ACP_DMA_DSCR_CNT_0; 114 acp_dma_prio_0 = ACP_DMA_PRIO_0; 115 acp_dma_dscr_strt_idx_0 = ACP_DMA_DSCR_STRT_IDX_0; 116 } 117 118 snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_cntl_0 + ch * sizeof(u32), 119 ACP_DMA_CH_RST | ACP_DMA_CH_GRACEFUL_RST_EN); 120 121 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, acp_dma_ch_rst_sts, val, 122 val & (1 << ch), ACP_REG_POLL_INTERVAL, 123 ACP_REG_POLL_TIMEOUT_US); 124 if (ret < 0) { 125 status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->acp_error_stat); 126 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, acp_dma_dscr_err_sts_0 + 127 ch * sizeof(u32)); 128 129 dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status); 130 return ret; 131 } 132 133 snd_sof_dsp_write(sdev, ACP_DSP_BAR, (acp_dma_cntl_0 + ch * sizeof(u32)), 0); 134 snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_dscr_cnt_0 + ch * sizeof(u32), dscr_count); 135 snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_dscr_strt_idx_0 + ch * sizeof(u32), idx); 136 snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_prio_0 + ch * sizeof(u32), 0); 137 snd_sof_dsp_write(sdev, ACP_DSP_BAR, acp_dma_cntl_0 + ch * sizeof(u32), ACP_DMA_CH_RUN); 138 139 return ret; 140 } 141 142 static int acpbus_dma_start(struct acp_dev_data *adata, unsigned int ch, 143 unsigned int dscr_count, struct dma_descriptor *dscr_info) 144 { 145 struct snd_sof_dev *sdev = adata->dev; 146 int ret; 147 u16 dscr; 148 149 if (!dscr_info || !dscr_count) 150 return -EINVAL; 151 152 for (dscr = 0; dscr < dscr_count; dscr++) 153 configure_dma_descriptor(adata, dscr, dscr_info++); 154 155 ret = config_dma_channel(adata, ch, 0, dscr_count); 156 if (ret < 0) 157 dev_err(sdev->dev, "config dma ch failed:%d\n", ret); 158 159 return ret; 160 } 161 162 int configure_and_run_dma(struct acp_dev_data *adata, unsigned int src_addr, 163 unsigned int dest_addr, int dsp_data_size) 164 { 165 struct snd_sof_dev *sdev = adata->dev; 166 unsigned int desc_count, index; 167 int ret; 168 169 for (desc_count = 0; desc_count < ACP_MAX_DESC && dsp_data_size >= 0; 170 desc_count++, dsp_data_size -= ACP_PAGE_SIZE) { 171 adata->dscr_info[desc_count].src_addr = src_addr + desc_count * ACP_PAGE_SIZE; 172 adata->dscr_info[desc_count].dest_addr = dest_addr + desc_count * ACP_PAGE_SIZE; 173 adata->dscr_info[desc_count].tx_cnt.bits.count = ACP_PAGE_SIZE; 174 if (dsp_data_size < ACP_PAGE_SIZE) 175 adata->dscr_info[desc_count].tx_cnt.bits.count = dsp_data_size; 176 } 177 178 ret = acpbus_dma_start(adata, 0, desc_count, adata->dscr_info); 179 if (ret) 180 dev_err(sdev->dev, "acpbus_dma_start failed\n"); 181 182 /* Clear descriptor array */ 183 for (index = 0; index < desc_count; index++) 184 memset(&adata->dscr_info[index], 0x00, sizeof(struct dma_descriptor)); 185 186 return ret; 187 } 188 189 /* 190 * psp_mbox_ready- function to poll ready bit of psp mbox 191 * @adata: acp device data 192 * @ack: bool variable to check ready bit status or psp ack 193 */ 194 195 static int psp_mbox_ready(struct acp_dev_data *adata, bool ack) 196 { 197 struct snd_sof_dev *sdev = adata->dev; 198 int ret, data; 199 200 ret = read_poll_timeout(smn_read_register, data, data > 0 && data & MBOX_READY_MASK, 201 MBOX_DELAY_US, ACP_PSP_TIMEOUT_US, false, MP0_C2PMSG_114_REG); 202 203 if (!ret) 204 return 0; 205 206 dev_err(sdev->dev, "PSP error status %x\n", data & MBOX_STATUS_MASK); 207 208 if (ack) 209 return -ETIMEDOUT; 210 211 return -EBUSY; 212 } 213 214 /* 215 * psp_send_cmd - function to send psp command over mbox 216 * @adata: acp device data 217 * @cmd: non zero integer value for command type 218 */ 219 220 static int psp_send_cmd(struct acp_dev_data *adata, int cmd) 221 { 222 struct snd_sof_dev *sdev = adata->dev; 223 int ret; 224 u32 data; 225 226 if (!cmd) 227 return -EINVAL; 228 229 /* Get a non-zero Doorbell value from PSP */ 230 ret = read_poll_timeout(smn_read_register, data, data > 0, MBOX_DELAY_US, 231 ACP_PSP_TIMEOUT_US, false, MP0_C2PMSG_73_REG); 232 233 if (ret) { 234 dev_err(sdev->dev, "Failed to get Doorbell from MBOX %x\n", MP0_C2PMSG_73_REG); 235 return ret; 236 } 237 238 /* Check if PSP is ready for new command */ 239 ret = psp_mbox_ready(adata, 0); 240 if (ret) 241 return ret; 242 243 ret = amd_smn_write(0, MP0_C2PMSG_114_REG, cmd); 244 if (ret) 245 return ret; 246 247 /* Ring the Doorbell for PSP */ 248 ret = amd_smn_write(0, MP0_C2PMSG_73_REG, data); 249 if (ret) 250 return ret; 251 252 /* Check MBOX ready as PSP ack */ 253 ret = psp_mbox_ready(adata, 1); 254 255 return ret; 256 } 257 258 int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr, 259 unsigned int start_addr, unsigned int dest_addr, 260 unsigned int image_length) 261 { 262 struct snd_sof_dev *sdev = adata->dev; 263 unsigned int tx_count, fw_qualifier, val; 264 int ret; 265 266 if (!image_addr) { 267 dev_err(sdev->dev, "SHA DMA image address is NULL\n"); 268 return -EINVAL; 269 } 270 271 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD); 272 if (val & ACP_SHA_RUN) { 273 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RESET); 274 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD_STS, 275 val, val & ACP_SHA_RESET, 276 ACP_REG_POLL_INTERVAL, 277 ACP_REG_POLL_TIMEOUT_US); 278 if (ret < 0) { 279 dev_err(sdev->dev, "SHA DMA Failed to Reset\n"); 280 return ret; 281 } 282 } 283 284 if (adata->quirks && adata->quirks->signed_fw_image) 285 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_INCLUDE_HDR, ACP_SHA_HEADER); 286 287 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr); 288 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr); 289 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length); 290 291 /* psp_send_cmd only required for vangogh platform */ 292 if (adata->pci_rev == ACP_VANGOGH_PCI_ID && 293 !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) { 294 /* Modify IRAM and DRAM size */ 295 ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2); 296 if (ret) 297 return ret; 298 ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | MBOX_ISREADY_FLAG); 299 if (ret) 300 return ret; 301 } 302 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN); 303 304 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT, 305 tx_count, tx_count == image_length, 306 ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US); 307 if (ret < 0) { 308 dev_err(sdev->dev, "SHA DMA Failed to Transfer Length %x\n", tx_count); 309 return ret; 310 } 311 312 /* psp_send_cmd only required for renoir platform*/ 313 if (adata->pci_rev == ACP_RN_PCI_ID) { 314 ret = psp_send_cmd(adata, MBOX_ACP_SHA_DMA_COMMAND); 315 if (ret) 316 return ret; 317 } 318 319 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DSP_FW_QUALIFIER, 320 fw_qualifier, fw_qualifier & DSP_FW_RUN_ENABLE, 321 ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US); 322 if (ret < 0) { 323 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_PSP_ACK); 324 dev_err(sdev->dev, "PSP validation failed: fw_qualifier = %#x, ACP_SHA_PSP_ACK = %#x\n", 325 fw_qualifier, val); 326 return ret; 327 } 328 329 return 0; 330 } 331 332 int acp_dma_status(struct acp_dev_data *adata, unsigned char ch) 333 { 334 struct snd_sof_dev *sdev = adata->dev; 335 unsigned int val; 336 unsigned int acp_dma_ch_sts; 337 int ret = 0; 338 339 switch (adata->pci_rev) { 340 case ACP70_PCI_ID: 341 case ACP71_PCI_ID: 342 acp_dma_ch_sts = ACP70_DMA_CH_STS; 343 break; 344 default: 345 acp_dma_ch_sts = ACP_DMA_CH_STS; 346 } 347 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32)); 348 if (val & ACP_DMA_CH_RUN) { 349 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, acp_dma_ch_sts, val, !val, 350 ACP_REG_POLL_INTERVAL, 351 ACP_DMA_COMPLETE_TIMEOUT_US); 352 if (ret < 0) 353 dev_err(sdev->dev, "DMA_CHANNEL %d status timeout\n", ch); 354 } 355 356 return ret; 357 } 358 359 void memcpy_from_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *dst, size_t bytes) 360 { 361 unsigned int reg_offset = offset + ACP_SCRATCH_REG_0; 362 int i, j; 363 364 for (i = 0, j = 0; i < bytes; i = i + 4, j++) 365 dst[j] = snd_sof_dsp_read(sdev, ACP_DSP_BAR, reg_offset + i); 366 } 367 368 void memcpy_to_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *src, size_t bytes) 369 { 370 unsigned int reg_offset = offset + ACP_SCRATCH_REG_0; 371 int i, j; 372 373 for (i = 0, j = 0; i < bytes; i = i + 4, j++) 374 snd_sof_dsp_write(sdev, ACP_DSP_BAR, reg_offset + i, src[j]); 375 } 376 377 static int acp_memory_init(struct snd_sof_dev *sdev) 378 { 379 struct acp_dev_data *adata = sdev->pdata->hw_pdata; 380 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 381 382 snd_sof_dsp_update_bits(sdev, ACP_DSP_BAR, desc->dsp_intr_base + DSP_SW_INTR_CNTL_OFFSET, 383 ACP_DSP_INTR_EN_MASK, ACP_DSP_INTR_EN_MASK); 384 init_dma_descriptor(adata); 385 386 return 0; 387 } 388 389 static irqreturn_t acp_irq_thread(int irq, void *context) 390 { 391 struct snd_sof_dev *sdev = context; 392 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 393 unsigned int count = ACP_HW_SEM_RETRY_COUNT; 394 395 spin_lock_irq(&sdev->ipc_lock); 396 /* Wait until acquired HW Semaphore lock or timeout */ 397 while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset) && --count) 398 ; 399 spin_unlock_irq(&sdev->ipc_lock); 400 401 if (!count) { 402 dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__); 403 return IRQ_NONE; 404 } 405 406 sof_ops(sdev)->irq_thread(irq, sdev); 407 /* Unlock or Release HW Semaphore */ 408 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0); 409 410 return IRQ_HANDLED; 411 }; 412 413 static irqreturn_t acp_irq_handler(int irq, void *dev_id) 414 { 415 struct amd_sdw_manager *amd_manager; 416 struct snd_sof_dev *sdev = dev_id; 417 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 418 struct acp_dev_data *adata = sdev->pdata->hw_pdata; 419 unsigned int base = desc->dsp_intr_base; 420 unsigned int val; 421 int irq_flag = 0; 422 423 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET); 424 if (val & ACP_DSP_TO_HOST_IRQ) { 425 snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET, 426 ACP_DSP_TO_HOST_IRQ); 427 return IRQ_WAKE_THREAD; 428 } 429 430 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat); 431 if (val & ACP_SDW0_IRQ_MASK) { 432 amd_manager = dev_get_drvdata(&adata->sdw->pdev[0]->dev); 433 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, ACP_SDW0_IRQ_MASK); 434 if (amd_manager) 435 schedule_work(&amd_manager->amd_sdw_irq_thread); 436 irq_flag = 1; 437 } 438 439 if (val & ACP_ERROR_IRQ_MASK) { 440 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, ACP_ERROR_IRQ_MASK); 441 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_sw0_i2s_err_reason, 0); 442 /* ACP_SW1_I2S_ERROR_REASON is newly added register from rmb platform onwards */ 443 if (adata->pci_rev >= ACP_RMB_PCI_ID) 444 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SW1_I2S_ERROR_REASON, 0); 445 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_error_stat, 0); 446 irq_flag = 1; 447 } 448 449 if (desc->ext_intr_stat1) { 450 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat1); 451 if (val & ACP_SDW1_IRQ_MASK) { 452 amd_manager = dev_get_drvdata(&adata->sdw->pdev[1]->dev); 453 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat1, 454 ACP_SDW1_IRQ_MASK); 455 if (amd_manager) 456 schedule_work(&amd_manager->amd_sdw_irq_thread); 457 irq_flag = 1; 458 } 459 } 460 if (irq_flag) 461 return IRQ_HANDLED; 462 else 463 return IRQ_NONE; 464 } 465 466 static int acp_power_on(struct snd_sof_dev *sdev) 467 { 468 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 469 struct acp_dev_data *adata = sdev->pdata->hw_pdata; 470 unsigned int base = desc->pgfsm_base; 471 unsigned int val; 472 unsigned int acp_pgfsm_status_mask, acp_pgfsm_cntl_mask; 473 int ret; 474 475 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET); 476 477 if (val == ACP_POWERED_ON) 478 return 0; 479 480 switch (adata->pci_rev) { 481 case ACP_RN_PCI_ID: 482 case ACP_VANGOGH_PCI_ID: 483 acp_pgfsm_status_mask = ACP3X_PGFSM_STATUS_MASK; 484 acp_pgfsm_cntl_mask = ACP3X_PGFSM_CNTL_POWER_ON_MASK; 485 break; 486 case ACP_RMB_PCI_ID: 487 case ACP63_PCI_ID: 488 acp_pgfsm_status_mask = ACP6X_PGFSM_STATUS_MASK; 489 acp_pgfsm_cntl_mask = ACP6X_PGFSM_CNTL_POWER_ON_MASK; 490 break; 491 case ACP70_PCI_ID: 492 case ACP71_PCI_ID: 493 acp_pgfsm_status_mask = ACP70_PGFSM_STATUS_MASK; 494 acp_pgfsm_cntl_mask = ACP70_PGFSM_CNTL_POWER_ON_MASK; 495 break; 496 default: 497 return -EINVAL; 498 } 499 500 if (val & acp_pgfsm_status_mask) 501 snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET, 502 acp_pgfsm_cntl_mask); 503 504 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val, 505 !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 506 if (ret < 0) 507 dev_err(sdev->dev, "timeout in ACP_PGFSM_STATUS read\n"); 508 509 return ret; 510 } 511 512 static int acp_reset(struct snd_sof_dev *sdev) 513 { 514 unsigned int val; 515 int ret; 516 517 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_ASSERT_RESET); 518 519 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, 520 val & ACP_SOFT_RESET_DONE_MASK, 521 ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 522 if (ret < 0) { 523 dev_err(sdev->dev, "timeout asserting reset\n"); 524 return ret; 525 } 526 527 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_RELEASE_RESET); 528 529 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val, 530 ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 531 if (ret < 0) 532 dev_err(sdev->dev, "timeout in releasing reset\n"); 533 534 return ret; 535 } 536 537 static int acp_dsp_reset(struct snd_sof_dev *sdev) 538 { 539 unsigned int val; 540 int ret; 541 542 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_DSP_ASSERT_RESET); 543 544 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, 545 val & ACP_DSP_SOFT_RESET_DONE_MASK, 546 ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 547 if (ret < 0) { 548 dev_err(sdev->dev, "timeout asserting reset\n"); 549 return ret; 550 } 551 552 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_DSP_RELEASE_RESET); 553 554 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val, 555 ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 556 if (ret < 0) 557 dev_err(sdev->dev, "timeout in releasing reset\n"); 558 559 return ret; 560 } 561 562 static int acp_init(struct snd_sof_dev *sdev) 563 { 564 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 565 struct acp_dev_data *acp_data; 566 unsigned int sdw0_wake_en, sdw1_wake_en; 567 int ret; 568 569 /* power on */ 570 acp_data = sdev->pdata->hw_pdata; 571 ret = acp_power_on(sdev); 572 if (ret) { 573 dev_err(sdev->dev, "ACP power on failed\n"); 574 return ret; 575 } 576 577 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x01); 578 /* Reset */ 579 ret = acp_reset(sdev); 580 if (ret) 581 return ret; 582 583 if (desc->acp_clkmux_sel) 584 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_clkmux_sel, ACP_CLOCK_ACLK); 585 586 if (desc->ext_intr_enb) 587 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_enb, 0x01); 588 589 if (desc->ext_intr_cntl) 590 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_cntl, ACP_ERROR_IRQ_MASK); 591 592 switch (acp_data->pci_rev) { 593 case ACP70_PCI_ID: 594 case ACP71_PCI_ID: 595 sdw0_wake_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP70_SW0_WAKE_EN); 596 sdw1_wake_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP70_SW1_WAKE_EN); 597 if (sdw0_wake_en || sdw1_wake_en) 598 snd_sof_dsp_update_bits(sdev, ACP_DSP_BAR, ACP70_EXTERNAL_INTR_CNTL1, 599 ACP70_SDW_HOST_WAKE_MASK, ACP70_SDW_HOST_WAKE_MASK); 600 601 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP70_PME_EN, 1); 602 break; 603 } 604 return 0; 605 } 606 607 static bool check_acp_sdw_enable_status(struct snd_sof_dev *sdev) 608 { 609 struct acp_dev_data *acp_data; 610 u32 sdw0_en, sdw1_en; 611 612 acp_data = sdev->pdata->hw_pdata; 613 if (!acp_data->sdw) 614 return false; 615 616 sdw0_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SW0_EN); 617 sdw1_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SW1_EN); 618 acp_data->sdw_en_stat = sdw0_en || sdw1_en; 619 return acp_data->sdw_en_stat; 620 } 621 622 int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state) 623 { 624 struct acp_dev_data *acp_data; 625 int ret; 626 bool enable = false; 627 628 acp_data = sdev->pdata->hw_pdata; 629 /* When acp_reset() function is invoked, it will apply ACP SOFT reset and 630 * DSP reset. ACP Soft reset sequence will cause all ACP IP registers will 631 * be reset to default values which will break the ClockStop Mode functionality. 632 * Add a condition check to apply DSP reset when SoundWire ClockStop mode 633 * is selected. For the rest of the scenarios, apply acp reset sequence. 634 */ 635 if (check_acp_sdw_enable_status(sdev)) 636 return acp_dsp_reset(sdev); 637 638 ret = acp_reset(sdev); 639 if (ret) { 640 dev_err(sdev->dev, "ACP Reset failed\n"); 641 return ret; 642 } 643 switch (acp_data->pci_rev) { 644 case ACP70_PCI_ID: 645 case ACP71_PCI_ID: 646 enable = true; 647 break; 648 } 649 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, enable); 650 651 return 0; 652 } 653 EXPORT_SYMBOL_NS(amd_sof_acp_suspend, "SND_SOC_SOF_AMD_COMMON"); 654 655 int amd_sof_acp_resume(struct snd_sof_dev *sdev) 656 { 657 int ret; 658 struct acp_dev_data *acp_data; 659 660 acp_data = sdev->pdata->hw_pdata; 661 if (!acp_data->sdw_en_stat) { 662 ret = acp_init(sdev); 663 if (ret) { 664 dev_err(sdev->dev, "ACP Init failed\n"); 665 return ret; 666 } 667 return acp_memory_init(sdev); 668 } 669 switch (acp_data->pci_rev) { 670 case ACP70_PCI_ID: 671 case ACP71_PCI_ID: 672 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP70_PME_EN, 1); 673 break; 674 } 675 676 return acp_dsp_reset(sdev); 677 } 678 EXPORT_SYMBOL_NS(amd_sof_acp_resume, "SND_SOC_SOF_AMD_COMMON"); 679 680 #if IS_ENABLED(CONFIG_SND_SOC_SOF_AMD_SOUNDWIRE) 681 static int acp_sof_scan_sdw_devices(struct snd_sof_dev *sdev, u64 addr) 682 { 683 struct acpi_device *sdw_dev; 684 struct acp_dev_data *acp_data; 685 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 686 687 if (!addr) 688 return -ENODEV; 689 690 acp_data = sdev->pdata->hw_pdata; 691 sdw_dev = acpi_find_child_device(ACPI_COMPANION(sdev->dev), addr, 0); 692 if (!sdw_dev) 693 return -ENODEV; 694 695 acp_data->info.handle = sdw_dev->handle; 696 acp_data->info.count = desc->sdw_max_link_count; 697 698 return amd_sdw_scan_controller(&acp_data->info); 699 } 700 701 static int amd_sof_sdw_probe(struct snd_sof_dev *sdev) 702 { 703 struct acp_dev_data *acp_data; 704 struct sdw_amd_res sdw_res; 705 int ret; 706 707 acp_data = sdev->pdata->hw_pdata; 708 709 memset(&sdw_res, 0, sizeof(sdw_res)); 710 sdw_res.addr = acp_data->addr; 711 sdw_res.reg_range = acp_data->reg_range; 712 sdw_res.handle = acp_data->info.handle; 713 sdw_res.parent = sdev->dev; 714 sdw_res.dev = sdev->dev; 715 sdw_res.acp_lock = &acp_data->acp_lock; 716 sdw_res.count = acp_data->info.count; 717 sdw_res.link_mask = acp_data->info.link_mask; 718 sdw_res.mmio_base = sdev->bar[ACP_DSP_BAR]; 719 sdw_res.acp_rev = acp_data->pci_rev; 720 721 ret = sdw_amd_probe(&sdw_res, &acp_data->sdw); 722 if (ret) 723 dev_err(sdev->dev, "SoundWire probe failed\n"); 724 return ret; 725 } 726 727 static int amd_sof_sdw_exit(struct snd_sof_dev *sdev) 728 { 729 struct acp_dev_data *acp_data; 730 731 acp_data = sdev->pdata->hw_pdata; 732 if (acp_data->sdw) 733 sdw_amd_exit(acp_data->sdw); 734 acp_data->sdw = NULL; 735 736 return 0; 737 } 738 739 #else 740 static int acp_sof_scan_sdw_devices(struct snd_sof_dev *sdev, u64 addr) 741 { 742 return 0; 743 } 744 745 static int amd_sof_sdw_probe(struct snd_sof_dev *sdev) 746 { 747 return 0; 748 } 749 750 static int amd_sof_sdw_exit(struct snd_sof_dev *sdev) 751 { 752 return 0; 753 } 754 #endif 755 756 int amd_sof_acp_probe(struct snd_sof_dev *sdev) 757 { 758 struct pci_dev *pci = to_pci_dev(sdev->dev); 759 struct acp_dev_data *adata; 760 const struct sof_amd_acp_desc *chip; 761 const struct dmi_system_id *dmi_id; 762 unsigned int addr; 763 int ret; 764 765 chip = get_chip_info(sdev->pdata); 766 if (!chip) { 767 dev_err(sdev->dev, "no such device supported, chip id:%x\n", pci->device); 768 return -EIO; 769 } 770 adata = devm_kzalloc(sdev->dev, sizeof(struct acp_dev_data), 771 GFP_KERNEL); 772 if (!adata) 773 return -ENOMEM; 774 775 adata->dev = sdev; 776 adata->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec", 777 PLATFORM_DEVID_NONE, NULL, 0); 778 if (IS_ERR(adata->dmic_dev)) { 779 dev_err(sdev->dev, "failed to register platform for dmic codec\n"); 780 return PTR_ERR(adata->dmic_dev); 781 } 782 addr = pci_resource_start(pci, ACP_DSP_BAR); 783 sdev->bar[ACP_DSP_BAR] = devm_ioremap(sdev->dev, addr, pci_resource_len(pci, ACP_DSP_BAR)); 784 if (!sdev->bar[ACP_DSP_BAR]) { 785 dev_err(sdev->dev, "ioremap error\n"); 786 ret = -ENXIO; 787 goto unregister_dev; 788 } 789 790 pci_set_master(pci); 791 adata->addr = addr; 792 adata->reg_range = chip->reg_end_addr - chip->reg_start_addr; 793 adata->pci_rev = pci->revision; 794 mutex_init(&adata->acp_lock); 795 sdev->pdata->hw_pdata = adata; 796 797 ret = acp_init(sdev); 798 if (ret < 0) 799 goto unregister_dev; 800 801 sdev->ipc_irq = pci->irq; 802 ret = request_threaded_irq(sdev->ipc_irq, acp_irq_handler, acp_irq_thread, 803 IRQF_SHARED, "AudioDSP", sdev); 804 if (ret < 0) { 805 dev_err(sdev->dev, "failed to register IRQ %d\n", 806 sdev->ipc_irq); 807 goto unregister_dev; 808 } 809 810 /* scan SoundWire capabilities exposed by DSDT */ 811 ret = acp_sof_scan_sdw_devices(sdev, chip->sdw_acpi_dev_addr); 812 if (ret < 0) { 813 dev_dbg(sdev->dev, "skipping SoundWire, not detected with ACPI scan\n"); 814 goto skip_soundwire; 815 } 816 ret = amd_sof_sdw_probe(sdev); 817 if (ret < 0) { 818 dev_err(sdev->dev, "error: SoundWire probe error\n"); 819 free_irq(sdev->ipc_irq, sdev); 820 return ret; 821 } 822 823 skip_soundwire: 824 sdev->dsp_box.offset = 0; 825 sdev->dsp_box.size = BOX_SIZE_512; 826 827 sdev->host_box.offset = sdev->dsp_box.offset + sdev->dsp_box.size; 828 sdev->host_box.size = BOX_SIZE_512; 829 830 sdev->debug_box.offset = sdev->host_box.offset + sdev->host_box.size; 831 sdev->debug_box.size = BOX_SIZE_1024; 832 833 dmi_id = dmi_first_match(acp_sof_quirk_table); 834 if (dmi_id) { 835 adata->quirks = dmi_id->driver_data; 836 837 if (adata->quirks->signed_fw_image) { 838 adata->fw_code_bin = devm_kasprintf(sdev->dev, GFP_KERNEL, 839 "sof-%s-code.bin", 840 chip->name); 841 if (!adata->fw_code_bin) { 842 ret = -ENOMEM; 843 goto free_ipc_irq; 844 } 845 846 adata->fw_data_bin = devm_kasprintf(sdev->dev, GFP_KERNEL, 847 "sof-%s-data.bin", 848 chip->name); 849 if (!adata->fw_data_bin) { 850 ret = -ENOMEM; 851 goto free_ipc_irq; 852 } 853 } 854 } 855 856 adata->enable_fw_debug = enable_fw_debug; 857 acp_memory_init(sdev); 858 859 acp_dsp_stream_init(sdev); 860 861 return 0; 862 863 free_ipc_irq: 864 free_irq(sdev->ipc_irq, sdev); 865 unregister_dev: 866 platform_device_unregister(adata->dmic_dev); 867 return ret; 868 } 869 EXPORT_SYMBOL_NS(amd_sof_acp_probe, "SND_SOC_SOF_AMD_COMMON"); 870 871 void amd_sof_acp_remove(struct snd_sof_dev *sdev) 872 { 873 struct acp_dev_data *adata = sdev->pdata->hw_pdata; 874 875 if (adata->sdw) 876 amd_sof_sdw_exit(sdev); 877 878 if (sdev->ipc_irq) 879 free_irq(sdev->ipc_irq, sdev); 880 881 if (adata->dmic_dev) 882 platform_device_unregister(adata->dmic_dev); 883 884 acp_reset(sdev); 885 } 886 EXPORT_SYMBOL_NS(amd_sof_acp_remove, "SND_SOC_SOF_AMD_COMMON"); 887 888 MODULE_LICENSE("Dual BSD/GPL"); 889 MODULE_DESCRIPTION("AMD ACP sof driver"); 890 MODULE_IMPORT_NS("SOUNDWIRE_AMD_INIT"); 891 MODULE_IMPORT_NS("SND_AMD_SOUNDWIRE_ACPI"); 892