1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm self-authenticating modem subsystem remoteproc driver 4 * 5 * Copyright (C) 2016 Linaro Ltd. 6 * Copyright (C) 2014 Sony Mobile Communications AB 7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/devcoredump.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/module.h> 18 #include <linux/of.h> 19 #include <linux/of_reserved_mem.h> 20 #include <linux/of_platform.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_domain.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/regmap.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/remoteproc.h> 27 #include <linux/reset.h> 28 #include <linux/soc/qcom/mdt_loader.h> 29 #include <linux/iopoll.h> 30 #include <linux/slab.h> 31 32 #include "remoteproc_internal.h" 33 #include "qcom_common.h" 34 #include "qcom_pil_info.h" 35 #include "qcom_q6v5.h" 36 37 #include <linux/firmware/qcom/qcom_scm.h> 38 39 #define MPSS_CRASH_REASON_SMEM 421 40 41 #define MBA_LOG_SIZE SZ_4K 42 43 #define MPSS_PAS_ID 5 44 45 /* RMB Status Register Values */ 46 #define RMB_PBL_SUCCESS 0x1 47 48 #define RMB_MBA_XPU_UNLOCKED 0x1 49 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 50 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 51 #define RMB_MBA_AUTH_COMPLETE 0x4 52 53 /* PBL/MBA interface registers */ 54 #define RMB_MBA_IMAGE_REG 0x00 55 #define RMB_PBL_STATUS_REG 0x04 56 #define RMB_MBA_COMMAND_REG 0x08 57 #define RMB_MBA_STATUS_REG 0x0C 58 #define RMB_PMI_META_DATA_REG 0x10 59 #define RMB_PMI_CODE_START_REG 0x14 60 #define RMB_PMI_CODE_LENGTH_REG 0x18 61 #define RMB_MBA_MSS_STATUS 0x40 62 #define RMB_MBA_ALT_RESET 0x44 63 64 #define RMB_CMD_META_DATA_READY 0x1 65 #define RMB_CMD_LOAD_READY 0x2 66 67 /* QDSP6SS Register Offsets */ 68 #define QDSP6SS_RESET_REG 0x014 69 #define QDSP6SS_GFMUX_CTL_REG 0x020 70 #define QDSP6SS_PWR_CTL_REG 0x030 71 #define QDSP6SS_MEM_PWR_CTL 0x0B0 72 #define QDSP6V6SS_MEM_PWR_CTL 0x034 73 #define QDSP6SS_STRAP_ACC 0x110 74 #define QDSP6V62SS_BHS_STATUS 0x0C4 75 76 /* AXI Halt Register Offsets */ 77 #define AXI_HALTREQ_REG 0x0 78 #define AXI_HALTACK_REG 0x4 79 #define AXI_IDLE_REG 0x8 80 #define AXI_GATING_VALID_OVERRIDE BIT(0) 81 82 #define HALT_ACK_TIMEOUT_US 100000 83 84 /* QACCEPT Register Offsets */ 85 #define QACCEPT_ACCEPT_REG 0x0 86 #define QACCEPT_ACTIVE_REG 0x4 87 #define QACCEPT_DENY_REG 0x8 88 #define QACCEPT_REQ_REG 0xC 89 90 #define QACCEPT_TIMEOUT_US 50 91 92 /* QDSP6SS_RESET */ 93 #define Q6SS_STOP_CORE BIT(0) 94 #define Q6SS_CORE_ARES BIT(1) 95 #define Q6SS_BUS_ARES_ENABLE BIT(2) 96 97 /* QDSP6SS CBCR */ 98 #define Q6SS_CBCR_CLKEN BIT(0) 99 #define Q6SS_CBCR_CLKOFF BIT(31) 100 #define Q6SS_CBCR_TIMEOUT_US 200 101 102 /* QDSP6SS_GFMUX_CTL */ 103 #define Q6SS_CLK_ENABLE BIT(1) 104 105 /* QDSP6SS_PWR_CTL */ 106 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 107 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 108 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 109 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 110 #define Q6SS_ETB_SLP_NRET_N BIT(17) 111 #define Q6SS_L2DATA_STBY_N BIT(18) 112 #define Q6SS_SLP_RET_N BIT(19) 113 #define Q6SS_CLAMP_IO BIT(20) 114 #define QDSS_BHS_ON BIT(21) 115 #define QDSS_LDO_BYP BIT(22) 116 117 /* QDSP6v55 parameters */ 118 #define QDSP6V55_MEM_BITS GENMASK(16, 8) 119 120 /* QDSP6v56 parameters */ 121 #define QDSP6v56_LDO_BYP BIT(25) 122 #define QDSP6v56_BHS_ON BIT(24) 123 #define QDSP6v56_CLAMP_WL BIT(21) 124 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 125 #define QDSP6SS_XO_CBCR 0x0038 126 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 127 #define QDSP6v55_BHS_EN_REST_ACK BIT(0) 128 129 /* QDSP6v65 parameters */ 130 #define QDSP6SS_CORE_CBCR 0x20 131 #define QDSP6SS_SLEEP 0x3C 132 #define QDSP6SS_BOOT_CORE_START 0x400 133 #define QDSP6SS_BOOT_CMD 0x404 134 #define BOOT_FSM_TIMEOUT 10000 135 #define BHS_CHECK_MAX_LOOPS 200 136 137 /* External power block headswitch */ 138 #define EXTERNAL_BHS_ON BIT(0) 139 #define EXTERNAL_BHS_STATUS BIT(4) 140 #define EXTERNAL_BHS_TIMEOUT_US 50 141 142 struct reg_info { 143 struct regulator *reg; 144 int uV; 145 int uA; 146 }; 147 148 struct qcom_mss_reg_res { 149 const char *supply; 150 int uV; 151 int uA; 152 }; 153 154 struct rproc_hexagon_res { 155 const char *hexagon_mba_image; 156 struct qcom_mss_reg_res *proxy_supply; 157 struct qcom_mss_reg_res *fallback_proxy_supply; 158 struct qcom_mss_reg_res *active_supply; 159 char **proxy_clk_names; 160 char **reset_clk_names; 161 char **active_clk_names; 162 char **proxy_pd_names; 163 int version; 164 bool need_mem_protection; 165 bool has_alt_reset; 166 bool has_mba_logs; 167 bool has_spare_reg; 168 bool has_qaccept_regs; 169 bool has_ext_bhs_reg; 170 bool has_ext_cntl_regs; 171 bool has_vq6; 172 }; 173 174 struct q6v5 { 175 struct device *dev; 176 struct rproc *rproc; 177 178 void __iomem *reg_base; 179 void __iomem *rmb_base; 180 181 struct regmap *halt_map; 182 struct regmap *conn_map; 183 184 u32 halt_q6; 185 u32 halt_modem; 186 u32 halt_nc; 187 u32 halt_vq6; 188 u32 conn_box; 189 u32 ext_bhs; 190 191 u32 qaccept_mdm; 192 u32 qaccept_cx; 193 u32 qaccept_axi; 194 195 u32 axim1_clk_off; 196 u32 crypto_clk_off; 197 u32 force_clk_on; 198 u32 rscc_disable; 199 200 struct reset_control *mss_restart; 201 struct reset_control *pdc_reset; 202 203 struct qcom_q6v5 q6v5; 204 205 struct clk *active_clks[8]; 206 struct clk *reset_clks[4]; 207 struct clk *proxy_clks[4]; 208 struct device *proxy_pds[3]; 209 int active_clk_count; 210 int reset_clk_count; 211 int proxy_clk_count; 212 int proxy_pd_count; 213 214 struct reg_info active_regs[1]; 215 struct reg_info proxy_regs[1]; 216 struct reg_info fallback_proxy_regs[2]; 217 int active_reg_count; 218 int proxy_reg_count; 219 int fallback_proxy_reg_count; 220 221 bool dump_mba_loaded; 222 size_t current_dump_size; 223 size_t total_dump_size; 224 225 phys_addr_t mba_phys; 226 size_t mba_size; 227 size_t dp_size; 228 229 phys_addr_t mdata_phys; 230 size_t mdata_size; 231 232 phys_addr_t mpss_phys; 233 phys_addr_t mpss_reloc; 234 size_t mpss_size; 235 236 struct qcom_rproc_glink glink_subdev; 237 struct qcom_rproc_subdev smd_subdev; 238 struct qcom_rproc_pdm pdm_subdev; 239 struct qcom_rproc_ssr ssr_subdev; 240 struct qcom_sysmon *sysmon; 241 struct platform_device *bam_dmux; 242 bool need_mem_protection; 243 bool has_alt_reset; 244 bool has_mba_logs; 245 bool has_spare_reg; 246 bool has_qaccept_regs; 247 bool has_ext_bhs_reg; 248 bool has_ext_cntl_regs; 249 bool has_vq6; 250 u64 mpss_perm; 251 u64 mba_perm; 252 const char *hexagon_mdt_image; 253 int version; 254 }; 255 256 enum { 257 MSS_MSM8226, 258 MSS_MSM8909, 259 MSS_MSM8916, 260 MSS_MSM8926, 261 MSS_MSM8953, 262 MSS_MSM8974, 263 MSS_MSM8996, 264 MSS_MSM8998, 265 MSS_SC7180, 266 MSS_SC7280, 267 MSS_SDM660, 268 MSS_SDM845, 269 }; 270 271 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 272 const struct qcom_mss_reg_res *reg_res) 273 { 274 int i; 275 276 if (!reg_res) 277 return 0; 278 279 for (i = 0; reg_res[i].supply; i++) { 280 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 281 if (IS_ERR(regs[i].reg)) 282 return dev_err_probe(dev, PTR_ERR(regs[i].reg), 283 "Failed to get %s\n regulator", 284 reg_res[i].supply); 285 286 regs[i].uV = reg_res[i].uV; 287 regs[i].uA = reg_res[i].uA; 288 } 289 290 return i; 291 } 292 293 static int q6v5_regulator_enable(struct q6v5 *qproc, 294 struct reg_info *regs, int count) 295 { 296 int ret; 297 int i; 298 299 for (i = 0; i < count; i++) { 300 if (regs[i].uV > 0) { 301 ret = regulator_set_voltage(regs[i].reg, 302 regs[i].uV, INT_MAX); 303 if (ret) { 304 dev_err(qproc->dev, 305 "Failed to request voltage for %d.\n", 306 i); 307 goto err; 308 } 309 } 310 311 if (regs[i].uA > 0) { 312 ret = regulator_set_load(regs[i].reg, 313 regs[i].uA); 314 if (ret < 0) { 315 dev_err(qproc->dev, 316 "Failed to set regulator mode\n"); 317 goto err; 318 } 319 } 320 321 ret = regulator_enable(regs[i].reg); 322 if (ret) { 323 dev_err(qproc->dev, "Regulator enable failed\n"); 324 goto err; 325 } 326 } 327 328 return 0; 329 err: 330 for (; i >= 0; i--) { 331 if (regs[i].uV > 0) 332 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 333 334 if (regs[i].uA > 0) 335 regulator_set_load(regs[i].reg, 0); 336 337 regulator_disable(regs[i].reg); 338 } 339 340 return ret; 341 } 342 343 static void q6v5_regulator_disable(struct q6v5 *qproc, 344 struct reg_info *regs, int count) 345 { 346 int i; 347 348 for (i = 0; i < count; i++) { 349 if (regs[i].uV > 0) 350 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 351 352 if (regs[i].uA > 0) 353 regulator_set_load(regs[i].reg, 0); 354 355 regulator_disable(regs[i].reg); 356 } 357 } 358 359 static int q6v5_clk_enable(struct device *dev, 360 struct clk **clks, int count) 361 { 362 int rc; 363 int i; 364 365 for (i = 0; i < count; i++) { 366 rc = clk_prepare_enable(clks[i]); 367 if (rc) { 368 dev_err(dev, "Clock enable failed\n"); 369 goto err; 370 } 371 } 372 373 return 0; 374 err: 375 for (i--; i >= 0; i--) 376 clk_disable_unprepare(clks[i]); 377 378 return rc; 379 } 380 381 static void q6v5_clk_disable(struct device *dev, 382 struct clk **clks, int count) 383 { 384 int i; 385 386 for (i = 0; i < count; i++) 387 clk_disable_unprepare(clks[i]); 388 } 389 390 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, 391 size_t pd_count) 392 { 393 int ret; 394 int i; 395 396 for (i = 0; i < pd_count; i++) { 397 dev_pm_genpd_set_performance_state(pds[i], INT_MAX); 398 ret = pm_runtime_get_sync(pds[i]); 399 if (ret < 0) { 400 pm_runtime_put_noidle(pds[i]); 401 dev_pm_genpd_set_performance_state(pds[i], 0); 402 goto unroll_pd_votes; 403 } 404 } 405 406 return 0; 407 408 unroll_pd_votes: 409 for (i--; i >= 0; i--) { 410 dev_pm_genpd_set_performance_state(pds[i], 0); 411 pm_runtime_put(pds[i]); 412 } 413 414 return ret; 415 } 416 417 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, 418 size_t pd_count) 419 { 420 int i; 421 422 for (i = 0; i < pd_count; i++) { 423 dev_pm_genpd_set_performance_state(pds[i], 0); 424 pm_runtime_put(pds[i]); 425 } 426 } 427 428 static int q6v5_external_bhs_enable(struct q6v5 *qproc) 429 { 430 u32 val; 431 int ret = 0; 432 433 /* 434 * Enable external power block headswitch and wait for it to 435 * stabilize 436 */ 437 regmap_set_bits(qproc->conn_map, qproc->ext_bhs, EXTERNAL_BHS_ON); 438 439 ret = regmap_read_poll_timeout(qproc->conn_map, qproc->ext_bhs, 440 val, val & EXTERNAL_BHS_STATUS, 441 1, EXTERNAL_BHS_TIMEOUT_US); 442 443 if (ret) { 444 dev_err(qproc->dev, "External BHS timed out\n"); 445 ret = -ETIMEDOUT; 446 } 447 448 return ret; 449 } 450 451 static void q6v5_external_bhs_disable(struct q6v5 *qproc) 452 { 453 regmap_clear_bits(qproc->conn_map, qproc->ext_bhs, EXTERNAL_BHS_ON); 454 } 455 456 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, u64 *current_perm, 457 bool local, bool remote, phys_addr_t addr, 458 size_t size) 459 { 460 struct qcom_scm_vmperm next[2]; 461 int perms = 0; 462 463 if (!qproc->need_mem_protection) 464 return 0; 465 466 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) && 467 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA))) 468 return 0; 469 470 if (local) { 471 next[perms].vmid = QCOM_SCM_VMID_HLOS; 472 next[perms].perm = QCOM_SCM_PERM_RWX; 473 perms++; 474 } 475 476 if (remote) { 477 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA; 478 next[perms].perm = QCOM_SCM_PERM_RW; 479 perms++; 480 } 481 482 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 483 current_perm, next, perms); 484 } 485 486 static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region) 487 { 488 const struct firmware *dp_fw; 489 490 if (request_firmware_direct(&dp_fw, "msadp", qproc->dev)) 491 return; 492 493 if (SZ_1M + dp_fw->size <= qproc->mba_size) { 494 memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size); 495 qproc->dp_size = dp_fw->size; 496 } 497 498 release_firmware(dp_fw); 499 } 500 501 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 502 { 503 struct q6v5 *qproc = rproc->priv; 504 void *mba_region; 505 506 /* MBA is restricted to a maximum size of 1M */ 507 if (fw->size > qproc->mba_size || fw->size > SZ_1M) { 508 dev_err(qproc->dev, "MBA firmware load failed\n"); 509 return -EINVAL; 510 } 511 512 mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC); 513 if (!mba_region) { 514 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 515 &qproc->mba_phys, qproc->mba_size); 516 return -EBUSY; 517 } 518 519 memcpy(mba_region, fw->data, fw->size); 520 q6v5_debug_policy_load(qproc, mba_region); 521 memunmap(mba_region); 522 523 return 0; 524 } 525 526 static int q6v5_reset_assert(struct q6v5 *qproc) 527 { 528 int ret; 529 530 if (qproc->has_alt_reset) { 531 reset_control_assert(qproc->pdc_reset); 532 ret = reset_control_reset(qproc->mss_restart); 533 reset_control_deassert(qproc->pdc_reset); 534 } else if (qproc->has_spare_reg) { 535 /* 536 * When the AXI pipeline is being reset with the Q6 modem partly 537 * operational there is possibility of AXI valid signal to 538 * glitch, leading to spurious transactions and Q6 hangs. A work 539 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE 540 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE 541 * is withdrawn post MSS assert followed by a MSS deassert, 542 * while holding the PDC reset. 543 */ 544 reset_control_assert(qproc->pdc_reset); 545 regmap_update_bits(qproc->conn_map, qproc->conn_box, 546 AXI_GATING_VALID_OVERRIDE, 1); 547 reset_control_assert(qproc->mss_restart); 548 reset_control_deassert(qproc->pdc_reset); 549 regmap_update_bits(qproc->conn_map, qproc->conn_box, 550 AXI_GATING_VALID_OVERRIDE, 0); 551 ret = reset_control_deassert(qproc->mss_restart); 552 } else if (qproc->has_ext_cntl_regs) { 553 regmap_write(qproc->conn_map, qproc->rscc_disable, 0); 554 reset_control_assert(qproc->pdc_reset); 555 reset_control_assert(qproc->mss_restart); 556 reset_control_deassert(qproc->pdc_reset); 557 ret = reset_control_deassert(qproc->mss_restart); 558 } else { 559 ret = reset_control_assert(qproc->mss_restart); 560 } 561 562 return ret; 563 } 564 565 static int q6v5_reset_deassert(struct q6v5 *qproc) 566 { 567 int ret; 568 569 if (qproc->has_alt_reset) { 570 reset_control_assert(qproc->pdc_reset); 571 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 572 ret = reset_control_reset(qproc->mss_restart); 573 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 574 reset_control_deassert(qproc->pdc_reset); 575 } else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) { 576 ret = reset_control_reset(qproc->mss_restart); 577 } else { 578 ret = reset_control_deassert(qproc->mss_restart); 579 } 580 581 return ret; 582 } 583 584 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 585 { 586 unsigned long timeout; 587 s32 val; 588 589 timeout = jiffies + msecs_to_jiffies(ms); 590 for (;;) { 591 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 592 if (val) 593 break; 594 595 if (time_after(jiffies, timeout)) 596 return -ETIMEDOUT; 597 598 msleep(1); 599 } 600 601 return val; 602 } 603 604 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 605 { 606 607 unsigned long timeout; 608 s32 val; 609 610 timeout = jiffies + msecs_to_jiffies(ms); 611 for (;;) { 612 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 613 if (val < 0) 614 break; 615 616 if (!status && val) 617 break; 618 else if (status && val == status) 619 break; 620 621 if (time_after(jiffies, timeout)) 622 return -ETIMEDOUT; 623 624 msleep(1); 625 } 626 627 return val; 628 } 629 630 static void q6v5_dump_mba_logs(struct q6v5 *qproc) 631 { 632 struct rproc *rproc = qproc->rproc; 633 void *data; 634 void *mba_region; 635 636 if (!qproc->has_mba_logs) 637 return; 638 639 if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys, 640 qproc->mba_size)) 641 return; 642 643 mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC); 644 if (!mba_region) 645 return; 646 647 data = vmalloc(MBA_LOG_SIZE); 648 if (data) { 649 memcpy(data, mba_region, MBA_LOG_SIZE); 650 dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL); 651 } 652 memunmap(mba_region); 653 } 654 655 static int q6v5proc_reset(struct q6v5 *qproc) 656 { 657 u32 val; 658 int ret; 659 int i; 660 661 if (qproc->version == MSS_SDM845) { 662 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 663 val |= Q6SS_CBCR_CLKEN; 664 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 665 666 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 667 val, !(val & Q6SS_CBCR_CLKOFF), 1, 668 Q6SS_CBCR_TIMEOUT_US); 669 if (ret) { 670 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 671 return -ETIMEDOUT; 672 } 673 674 /* De-assert QDSP6 stop core */ 675 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 676 /* Trigger boot FSM */ 677 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 678 679 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 680 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 681 if (ret) { 682 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 683 /* Reset the modem so that boot FSM is in reset state */ 684 q6v5_reset_deassert(qproc); 685 return ret; 686 } 687 688 goto pbl_wait; 689 } else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) { 690 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 691 val |= Q6SS_CBCR_CLKEN; 692 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 693 694 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 695 val, !(val & Q6SS_CBCR_CLKOFF), 1, 696 Q6SS_CBCR_TIMEOUT_US); 697 if (ret) { 698 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 699 return -ETIMEDOUT; 700 } 701 702 /* Turn on the XO clock needed for PLL setup */ 703 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 704 val |= Q6SS_CBCR_CLKEN; 705 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 706 707 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 708 val, !(val & Q6SS_CBCR_CLKOFF), 1, 709 Q6SS_CBCR_TIMEOUT_US); 710 if (ret) { 711 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); 712 return -ETIMEDOUT; 713 } 714 715 /* Configure Q6 core CBCR to auto-enable after reset sequence */ 716 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); 717 val |= Q6SS_CBCR_CLKEN; 718 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); 719 720 /* De-assert the Q6 stop core signal */ 721 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 722 723 /* Wait for 10 us for any staggering logic to settle */ 724 usleep_range(10, 20); 725 726 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ 727 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 728 729 /* Poll the MSS_STATUS for FSM completion */ 730 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 731 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 732 if (ret) { 733 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 734 /* Reset the modem so that boot FSM is in reset state */ 735 q6v5_reset_deassert(qproc); 736 return ret; 737 } 738 goto pbl_wait; 739 } else if (qproc->version == MSS_MSM8909 || 740 qproc->version == MSS_MSM8953 || 741 qproc->version == MSS_MSM8996 || 742 qproc->version == MSS_MSM8998 || 743 qproc->version == MSS_SDM660) { 744 745 if (qproc->version != MSS_MSM8909 && 746 qproc->version != MSS_MSM8953) 747 /* Override the ACC value if required */ 748 writel(QDSP6SS_ACC_OVERRIDE_VAL, 749 qproc->reg_base + QDSP6SS_STRAP_ACC); 750 751 /* Assert resets, stop core */ 752 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 753 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 754 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 755 756 /* BHS require xo cbcr to be enabled */ 757 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 758 val |= Q6SS_CBCR_CLKEN; 759 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 760 761 /* Read CLKOFF bit to go low indicating CLK is enabled */ 762 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 763 val, !(val & Q6SS_CBCR_CLKOFF), 1, 764 Q6SS_CBCR_TIMEOUT_US); 765 if (ret) { 766 dev_err(qproc->dev, 767 "xo cbcr enabling timed out (rc:%d)\n", ret); 768 return ret; 769 } 770 /* Enable power block headswitch and wait for it to stabilize */ 771 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 772 val |= QDSP6v56_BHS_ON; 773 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 774 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 775 udelay(1); 776 777 if (qproc->version == MSS_SDM660) { 778 ret = readl_relaxed_poll_timeout(qproc->reg_base + QDSP6V62SS_BHS_STATUS, 779 i, (i & QDSP6v55_BHS_EN_REST_ACK), 780 1, BHS_CHECK_MAX_LOOPS); 781 if (ret == -ETIMEDOUT) { 782 dev_err(qproc->dev, "BHS_EN_REST_ACK not set!\n"); 783 return -ETIMEDOUT; 784 } 785 } 786 787 /* Put LDO in bypass mode */ 788 val |= QDSP6v56_LDO_BYP; 789 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 790 791 if (qproc->version != MSS_MSM8909) { 792 int mem_pwr_ctl; 793 794 /* Deassert QDSP6 compiler memory clamp */ 795 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 796 val &= ~QDSP6v56_CLAMP_QMC_MEM; 797 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 798 799 /* Deassert memory peripheral sleep and L2 memory standby */ 800 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 801 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 802 803 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 804 if (qproc->version == MSS_MSM8953 || 805 qproc->version == MSS_MSM8996) { 806 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; 807 i = 19; 808 } else { 809 /* MSS_MSM8998, MSS_SDM660 */ 810 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; 811 i = 28; 812 } 813 val = readl(qproc->reg_base + mem_pwr_ctl); 814 for (; i >= 0; i--) { 815 val |= BIT(i); 816 writel(val, qproc->reg_base + mem_pwr_ctl); 817 /* 818 * Read back value to ensure the write is done then 819 * wait for 1us for both memory peripheral and data 820 * array to turn on. 821 */ 822 val |= readl(qproc->reg_base + mem_pwr_ctl); 823 udelay(1); 824 } 825 } else { 826 /* Turn on memories */ 827 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 828 val |= Q6SS_SLP_RET_N | Q6SS_L2DATA_STBY_N | 829 Q6SS_ETB_SLP_NRET_N | QDSP6V55_MEM_BITS; 830 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 831 832 /* Turn on L2 banks 1 at a time */ 833 for (i = 0; i <= 7; i++) { 834 val |= BIT(i); 835 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 836 } 837 } 838 839 /* Remove word line clamp */ 840 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 841 val &= ~QDSP6v56_CLAMP_WL; 842 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 843 } else { 844 /* Assert resets, stop core */ 845 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 846 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 847 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 848 849 /* Enable power block headswitch and wait for it to stabilize */ 850 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 851 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 852 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 853 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 854 udelay(1); 855 /* 856 * Turn on memories. L2 banks should be done individually 857 * to minimize inrush current. 858 */ 859 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 860 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 861 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 862 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 863 val |= Q6SS_L2DATA_SLP_NRET_N_2; 864 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 865 val |= Q6SS_L2DATA_SLP_NRET_N_1; 866 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 867 val |= Q6SS_L2DATA_SLP_NRET_N_0; 868 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 869 } 870 /* Remove IO clamp */ 871 val &= ~Q6SS_CLAMP_IO; 872 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 873 874 /* Bring core out of reset */ 875 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 876 val &= ~Q6SS_CORE_ARES; 877 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 878 879 /* Turn on core clock */ 880 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 881 val |= Q6SS_CLK_ENABLE; 882 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 883 884 /* Start core execution */ 885 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 886 val &= ~Q6SS_STOP_CORE; 887 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 888 889 pbl_wait: 890 /* Wait for PBL status */ 891 ret = q6v5_rmb_pbl_wait(qproc, 1000); 892 if (ret == -ETIMEDOUT) { 893 dev_err(qproc->dev, "PBL boot timed out\n"); 894 } else if (ret != RMB_PBL_SUCCESS) { 895 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 896 ret = -EINVAL; 897 } else { 898 ret = 0; 899 } 900 901 return ret; 902 } 903 904 static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset) 905 { 906 unsigned int val; 907 int ret; 908 909 if (!qproc->has_qaccept_regs) 910 return 0; 911 912 if (qproc->has_ext_cntl_regs) { 913 regmap_write(qproc->conn_map, qproc->rscc_disable, 0); 914 regmap_write(qproc->conn_map, qproc->force_clk_on, 1); 915 916 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val, 917 !val, 1, Q6SS_CBCR_TIMEOUT_US); 918 if (ret) { 919 dev_err(qproc->dev, "failed to enable axim1 clock\n"); 920 return -ETIMEDOUT; 921 } 922 } 923 924 regmap_write(map, offset + QACCEPT_REQ_REG, 1); 925 926 /* Wait for accept */ 927 ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5, 928 QACCEPT_TIMEOUT_US); 929 if (ret) { 930 dev_err(qproc->dev, "qchannel enable failed\n"); 931 return -ETIMEDOUT; 932 } 933 934 return 0; 935 } 936 937 static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset) 938 { 939 int ret; 940 unsigned int val, retry; 941 unsigned int nretry = 10; 942 bool takedown_complete = false; 943 944 if (!qproc->has_qaccept_regs) 945 return; 946 947 while (!takedown_complete && nretry) { 948 nretry--; 949 950 /* Wait for active transactions to complete */ 951 regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5, 952 QACCEPT_TIMEOUT_US); 953 954 /* Request Q-channel transaction takedown */ 955 regmap_write(map, offset + QACCEPT_REQ_REG, 0); 956 957 /* 958 * If the request is denied, reset the Q-channel takedown request, 959 * wait for active transactions to complete and retry takedown. 960 */ 961 retry = 10; 962 while (retry) { 963 usleep_range(5, 10); 964 retry--; 965 ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val); 966 if (!ret && val) { 967 regmap_write(map, offset + QACCEPT_REQ_REG, 1); 968 break; 969 } 970 971 ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val); 972 if (!ret && !val) { 973 takedown_complete = true; 974 break; 975 } 976 } 977 978 if (!retry) 979 break; 980 } 981 982 /* Rely on mss_restart to clear out pending transactions on takedown failure */ 983 if (!takedown_complete) 984 dev_err(qproc->dev, "qchannel takedown failed\n"); 985 } 986 987 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 988 struct regmap *halt_map, 989 u32 offset) 990 { 991 unsigned int val; 992 int ret; 993 994 /* Check if we're already idle */ 995 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 996 if (!ret && val) 997 return; 998 999 /* Assert halt request */ 1000 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 1001 1002 /* Wait for halt */ 1003 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, 1004 val, 1000, HALT_ACK_TIMEOUT_US); 1005 1006 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 1007 if (ret || !val) 1008 dev_err(qproc->dev, "port failed halt\n"); 1009 1010 /* Clear halt request (port will remain halted until reset) */ 1011 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 1012 } 1013 1014 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw, 1015 const char *fw_name) 1016 { 1017 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 1018 dma_addr_t phys; 1019 void *metadata; 1020 u64 mdata_perm; 1021 int xferop_ret; 1022 size_t size; 1023 void *ptr; 1024 int ret; 1025 1026 metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev); 1027 if (IS_ERR(metadata)) 1028 return PTR_ERR(metadata); 1029 1030 if (qproc->mdata_phys) { 1031 if (size > qproc->mdata_size) { 1032 ret = -EINVAL; 1033 dev_err(qproc->dev, "metadata size outside memory range\n"); 1034 goto free_metadata; 1035 } 1036 1037 phys = qproc->mdata_phys; 1038 ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC); 1039 if (!ptr) { 1040 ret = -EBUSY; 1041 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1042 &qproc->mdata_phys, size); 1043 goto free_metadata; 1044 } 1045 } else { 1046 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); 1047 if (!ptr) { 1048 ret = -ENOMEM; 1049 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 1050 goto free_metadata; 1051 } 1052 } 1053 1054 memcpy(ptr, metadata, size); 1055 1056 if (qproc->mdata_phys) 1057 memunmap(ptr); 1058 1059 /* Hypervisor mapping to access metadata by modem */ 1060 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 1061 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, 1062 phys, size); 1063 if (ret) { 1064 dev_err(qproc->dev, 1065 "assigning Q6 access to metadata failed: %d\n", ret); 1066 ret = -EAGAIN; 1067 goto free_dma_attrs; 1068 } 1069 1070 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 1071 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 1072 1073 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 1074 if (ret == -ETIMEDOUT) 1075 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 1076 else if (ret < 0) 1077 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 1078 1079 /* Metadata authentication done, remove modem access */ 1080 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false, 1081 phys, size); 1082 if (xferop_ret) 1083 dev_warn(qproc->dev, 1084 "mdt buffer not reclaimed system may become unstable\n"); 1085 1086 free_dma_attrs: 1087 if (!qproc->mdata_phys) 1088 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); 1089 free_metadata: 1090 kfree(metadata); 1091 1092 return ret < 0 ? ret : 0; 1093 } 1094 1095 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 1096 { 1097 if (phdr->p_type != PT_LOAD) 1098 return false; 1099 1100 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 1101 return false; 1102 1103 if (!phdr->p_memsz) 1104 return false; 1105 1106 return true; 1107 } 1108 1109 static int q6v5_mba_load(struct q6v5 *qproc) 1110 { 1111 int ret; 1112 int xfermemop_ret; 1113 bool mba_load_err = false; 1114 1115 ret = qcom_q6v5_prepare(&qproc->q6v5); 1116 if (ret) 1117 return ret; 1118 1119 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1120 if (ret < 0) { 1121 dev_err(qproc->dev, "failed to enable proxy power domains\n"); 1122 goto disable_irqs; 1123 } 1124 1125 ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs, 1126 qproc->fallback_proxy_reg_count); 1127 if (ret) { 1128 dev_err(qproc->dev, "failed to enable fallback proxy supplies\n"); 1129 goto disable_proxy_pds; 1130 } 1131 1132 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 1133 qproc->proxy_reg_count); 1134 if (ret) { 1135 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 1136 goto disable_fallback_proxy_reg; 1137 } 1138 1139 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 1140 qproc->proxy_clk_count); 1141 if (ret) { 1142 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 1143 goto disable_proxy_reg; 1144 } 1145 1146 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 1147 qproc->active_reg_count); 1148 if (ret) { 1149 dev_err(qproc->dev, "failed to enable supplies\n"); 1150 goto disable_proxy_clk; 1151 } 1152 1153 if (qproc->has_ext_bhs_reg) { 1154 ret = q6v5_external_bhs_enable(qproc); 1155 if (ret < 0) 1156 goto disable_vdd; 1157 } 1158 1159 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 1160 qproc->reset_clk_count); 1161 if (ret) { 1162 dev_err(qproc->dev, "failed to enable reset clocks\n"); 1163 goto disable_ext_bhs; 1164 } 1165 1166 ret = q6v5_reset_deassert(qproc); 1167 if (ret) { 1168 dev_err(qproc->dev, "failed to deassert mss restart\n"); 1169 goto disable_reset_clks; 1170 } 1171 1172 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 1173 qproc->active_clk_count); 1174 if (ret) { 1175 dev_err(qproc->dev, "failed to enable clocks\n"); 1176 goto assert_reset; 1177 } 1178 1179 ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi); 1180 if (ret) { 1181 dev_err(qproc->dev, "failed to enable axi bridge\n"); 1182 goto disable_active_clks; 1183 } 1184 1185 /* 1186 * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide 1187 * the Q6 access to this region. 1188 */ 1189 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 1190 qproc->mpss_phys, qproc->mpss_size); 1191 if (ret) { 1192 dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret); 1193 goto disable_active_clks; 1194 } 1195 1196 /* Assign MBA image access in DDR to q6 */ 1197 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true, 1198 qproc->mba_phys, qproc->mba_size); 1199 if (ret) { 1200 dev_err(qproc->dev, 1201 "assigning Q6 access to mba memory failed: %d\n", ret); 1202 goto disable_active_clks; 1203 } 1204 1205 if (qproc->has_mba_logs) 1206 qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE); 1207 1208 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 1209 if (qproc->dp_size) { 1210 writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1211 writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1212 } 1213 1214 ret = q6v5proc_reset(qproc); 1215 if (ret) 1216 goto reclaim_mba; 1217 1218 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 1219 if (ret == -ETIMEDOUT) { 1220 dev_err(qproc->dev, "MBA boot timed out\n"); 1221 goto halt_axi_ports; 1222 } else if (ret != RMB_MBA_XPU_UNLOCKED && 1223 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 1224 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 1225 ret = -EINVAL; 1226 goto halt_axi_ports; 1227 } 1228 1229 qproc->dump_mba_loaded = true; 1230 return 0; 1231 1232 halt_axi_ports: 1233 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 1234 if (qproc->has_vq6) 1235 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6); 1236 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 1237 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 1238 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm); 1239 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx); 1240 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi); 1241 mba_load_err = true; 1242 reclaim_mba: 1243 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1244 false, qproc->mba_phys, 1245 qproc->mba_size); 1246 if (xfermemop_ret) { 1247 dev_err(qproc->dev, 1248 "Failed to reclaim mba buffer, system may become unstable\n"); 1249 } else if (mba_load_err) { 1250 q6v5_dump_mba_logs(qproc); 1251 } 1252 1253 disable_active_clks: 1254 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1255 qproc->active_clk_count); 1256 assert_reset: 1257 q6v5_reset_assert(qproc); 1258 disable_reset_clks: 1259 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1260 qproc->reset_clk_count); 1261 disable_ext_bhs: 1262 if (qproc->has_ext_bhs_reg) 1263 q6v5_external_bhs_disable(qproc); 1264 disable_vdd: 1265 q6v5_regulator_disable(qproc, qproc->active_regs, 1266 qproc->active_reg_count); 1267 disable_proxy_clk: 1268 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1269 qproc->proxy_clk_count); 1270 disable_proxy_reg: 1271 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1272 qproc->proxy_reg_count); 1273 disable_fallback_proxy_reg: 1274 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, 1275 qproc->fallback_proxy_reg_count); 1276 disable_proxy_pds: 1277 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1278 disable_irqs: 1279 qcom_q6v5_unprepare(&qproc->q6v5); 1280 1281 return ret; 1282 } 1283 1284 static void q6v5_mba_reclaim(struct q6v5 *qproc) 1285 { 1286 int ret; 1287 u32 val; 1288 1289 qproc->dump_mba_loaded = false; 1290 qproc->dp_size = 0; 1291 1292 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 1293 if (qproc->has_vq6) 1294 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6); 1295 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 1296 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 1297 if (qproc->version == MSS_MSM8996) { 1298 /* 1299 * To avoid high MX current during LPASS/MSS restart. 1300 */ 1301 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1302 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 1303 QDSP6v56_CLAMP_QMC_MEM; 1304 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1305 } 1306 1307 if (qproc->has_ext_cntl_regs) { 1308 regmap_write(qproc->conn_map, qproc->rscc_disable, 1); 1309 1310 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val, 1311 !val, 1, Q6SS_CBCR_TIMEOUT_US); 1312 if (ret) 1313 dev_err(qproc->dev, "failed to enable axim1 clock\n"); 1314 1315 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val, 1316 !val, 1, Q6SS_CBCR_TIMEOUT_US); 1317 if (ret) 1318 dev_err(qproc->dev, "failed to enable crypto clock\n"); 1319 } 1320 1321 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm); 1322 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx); 1323 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi); 1324 1325 q6v5_reset_assert(qproc); 1326 1327 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1328 qproc->reset_clk_count); 1329 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1330 qproc->active_clk_count); 1331 if (qproc->has_ext_bhs_reg) 1332 q6v5_external_bhs_disable(qproc); 1333 q6v5_regulator_disable(qproc, qproc->active_regs, 1334 qproc->active_reg_count); 1335 1336 /* In case of failure or coredump scenario where reclaiming MBA memory 1337 * could not happen reclaim it here. 1338 */ 1339 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, 1340 qproc->mba_phys, 1341 qproc->mba_size); 1342 WARN_ON(ret); 1343 1344 ret = qcom_q6v5_unprepare(&qproc->q6v5); 1345 if (ret) { 1346 q6v5_pds_disable(qproc, qproc->proxy_pds, 1347 qproc->proxy_pd_count); 1348 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1349 qproc->proxy_clk_count); 1350 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, 1351 qproc->fallback_proxy_reg_count); 1352 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1353 qproc->proxy_reg_count); 1354 } 1355 } 1356 1357 static int q6v5_reload_mba(struct rproc *rproc) 1358 { 1359 struct q6v5 *qproc = rproc->priv; 1360 const struct firmware *fw; 1361 int ret; 1362 1363 ret = request_firmware(&fw, rproc->firmware, qproc->dev); 1364 if (ret < 0) 1365 return ret; 1366 1367 q6v5_load(rproc, fw); 1368 ret = q6v5_mba_load(qproc); 1369 release_firmware(fw); 1370 1371 return ret; 1372 } 1373 1374 static int q6v5_mpss_load(struct q6v5 *qproc) 1375 { 1376 const struct elf32_phdr *phdrs; 1377 const struct elf32_phdr *phdr; 1378 const struct firmware *seg_fw; 1379 const struct firmware *fw; 1380 struct elf32_hdr *ehdr; 1381 phys_addr_t mpss_reloc; 1382 phys_addr_t boot_addr; 1383 phys_addr_t min_addr = PHYS_ADDR_MAX; 1384 phys_addr_t max_addr = 0; 1385 u32 code_length; 1386 bool relocate = false; 1387 char *fw_name; 1388 size_t fw_name_len; 1389 ssize_t offset; 1390 size_t size = 0; 1391 void *ptr; 1392 int ret; 1393 int i; 1394 1395 fw_name_len = strlen(qproc->hexagon_mdt_image); 1396 if (fw_name_len <= 4) 1397 return -EINVAL; 1398 1399 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); 1400 if (!fw_name) 1401 return -ENOMEM; 1402 1403 ret = request_firmware(&fw, fw_name, qproc->dev); 1404 if (ret < 0) { 1405 dev_err(qproc->dev, "unable to load %s\n", fw_name); 1406 goto out; 1407 } 1408 1409 /* Initialize the RMB validator */ 1410 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1411 1412 ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image); 1413 if (ret) 1414 goto release_firmware; 1415 1416 ehdr = (struct elf32_hdr *)fw->data; 1417 phdrs = (struct elf32_phdr *)(ehdr + 1); 1418 1419 for (i = 0; i < ehdr->e_phnum; i++) { 1420 phdr = &phdrs[i]; 1421 1422 if (!q6v5_phdr_valid(phdr)) 1423 continue; 1424 1425 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 1426 relocate = true; 1427 1428 if (phdr->p_paddr < min_addr) 1429 min_addr = phdr->p_paddr; 1430 1431 if (phdr->p_paddr + phdr->p_memsz > max_addr) 1432 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 1433 } 1434 1435 if (qproc->version == MSS_MSM8953) { 1436 ret = qcom_scm_pas_mem_setup(MPSS_PAS_ID, qproc->mpss_phys, qproc->mpss_size); 1437 if (ret) { 1438 dev_err(qproc->dev, 1439 "setting up mpss memory failed: %d\n", ret); 1440 goto release_firmware; 1441 } 1442 } 1443 1444 /* 1445 * In case of a modem subsystem restart on secure devices, the modem 1446 * memory can be reclaimed only after MBA is loaded. 1447 */ 1448 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false, 1449 qproc->mpss_phys, qproc->mpss_size); 1450 1451 /* Share ownership between Linux and MSS, during segment loading */ 1452 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true, 1453 qproc->mpss_phys, qproc->mpss_size); 1454 if (ret) { 1455 dev_err(qproc->dev, 1456 "assigning Q6 access to mpss memory failed: %d\n", ret); 1457 ret = -EAGAIN; 1458 goto release_firmware; 1459 } 1460 1461 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 1462 qproc->mpss_reloc = mpss_reloc; 1463 /* Load firmware segments */ 1464 for (i = 0; i < ehdr->e_phnum; i++) { 1465 phdr = &phdrs[i]; 1466 1467 if (!q6v5_phdr_valid(phdr)) 1468 continue; 1469 1470 offset = phdr->p_paddr - mpss_reloc; 1471 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 1472 dev_err(qproc->dev, "segment outside memory range\n"); 1473 ret = -EINVAL; 1474 goto release_firmware; 1475 } 1476 1477 if (phdr->p_filesz > phdr->p_memsz) { 1478 dev_err(qproc->dev, 1479 "refusing to load segment %d with p_filesz > p_memsz\n", 1480 i); 1481 ret = -EINVAL; 1482 goto release_firmware; 1483 } 1484 1485 ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC); 1486 if (!ptr) { 1487 dev_err(qproc->dev, 1488 "unable to map memory region: %pa+%zx-%x\n", 1489 &qproc->mpss_phys, offset, phdr->p_memsz); 1490 goto release_firmware; 1491 } 1492 1493 if (phdr->p_filesz && phdr->p_offset < fw->size) { 1494 /* Firmware is large enough to be non-split */ 1495 if (phdr->p_offset + phdr->p_filesz > fw->size) { 1496 dev_err(qproc->dev, 1497 "failed to load segment %d from truncated file %s\n", 1498 i, fw_name); 1499 ret = -EINVAL; 1500 memunmap(ptr); 1501 goto release_firmware; 1502 } 1503 1504 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); 1505 } else if (phdr->p_filesz) { 1506 /* Replace "xxx.xxx" with "xxx.bxx" */ 1507 sprintf(fw_name + fw_name_len - 3, "b%02d", i); 1508 ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev, 1509 ptr, phdr->p_filesz); 1510 if (ret) { 1511 dev_err(qproc->dev, "failed to load %s\n", fw_name); 1512 memunmap(ptr); 1513 goto release_firmware; 1514 } 1515 1516 if (seg_fw->size != phdr->p_filesz) { 1517 dev_err(qproc->dev, 1518 "failed to load segment %d from truncated file %s\n", 1519 i, fw_name); 1520 ret = -EINVAL; 1521 release_firmware(seg_fw); 1522 memunmap(ptr); 1523 goto release_firmware; 1524 } 1525 1526 release_firmware(seg_fw); 1527 } 1528 1529 if (phdr->p_memsz > phdr->p_filesz) { 1530 memset(ptr + phdr->p_filesz, 0, 1531 phdr->p_memsz - phdr->p_filesz); 1532 } 1533 memunmap(ptr); 1534 size += phdr->p_memsz; 1535 1536 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1537 if (!code_length) { 1538 boot_addr = relocate ? qproc->mpss_phys : min_addr; 1539 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1540 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 1541 } 1542 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1543 1544 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 1545 if (ret < 0) { 1546 dev_err(qproc->dev, "MPSS authentication failed: %d\n", 1547 ret); 1548 goto release_firmware; 1549 } 1550 } 1551 1552 /* Transfer ownership of modem ddr region to q6 */ 1553 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 1554 qproc->mpss_phys, qproc->mpss_size); 1555 if (ret) { 1556 dev_err(qproc->dev, 1557 "assigning Q6 access to mpss memory failed: %d\n", ret); 1558 ret = -EAGAIN; 1559 goto release_firmware; 1560 } 1561 1562 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 1563 if (ret == -ETIMEDOUT) 1564 dev_err(qproc->dev, "MPSS authentication timed out\n"); 1565 else if (ret < 0) 1566 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 1567 1568 qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size); 1569 1570 release_firmware: 1571 release_firmware(fw); 1572 out: 1573 kfree(fw_name); 1574 1575 return ret < 0 ? ret : 0; 1576 } 1577 1578 static void qcom_q6v5_dump_segment(struct rproc *rproc, 1579 struct rproc_dump_segment *segment, 1580 void *dest, size_t cp_offset, size_t size) 1581 { 1582 int ret = 0; 1583 struct q6v5 *qproc = rproc->priv; 1584 int offset = segment->da - qproc->mpss_reloc; 1585 void *ptr = NULL; 1586 1587 /* Unlock mba before copying segments */ 1588 if (!qproc->dump_mba_loaded) { 1589 ret = q6v5_reload_mba(rproc); 1590 if (!ret) { 1591 /* Reset ownership back to Linux to copy segments */ 1592 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1593 true, false, 1594 qproc->mpss_phys, 1595 qproc->mpss_size); 1596 } 1597 } 1598 1599 if (!ret) 1600 ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC); 1601 1602 if (ptr) { 1603 memcpy(dest, ptr, size); 1604 memunmap(ptr); 1605 } else { 1606 memset(dest, 0xff, size); 1607 } 1608 1609 qproc->current_dump_size += size; 1610 1611 /* Reclaim mba after copying segments */ 1612 if (qproc->current_dump_size == qproc->total_dump_size) { 1613 if (qproc->dump_mba_loaded) { 1614 /* Try to reset ownership back to Q6 */ 1615 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1616 false, true, 1617 qproc->mpss_phys, 1618 qproc->mpss_size); 1619 q6v5_mba_reclaim(qproc); 1620 } 1621 } 1622 } 1623 1624 static int q6v5_start(struct rproc *rproc) 1625 { 1626 struct q6v5 *qproc = rproc->priv; 1627 int xfermemop_ret; 1628 int ret; 1629 1630 ret = q6v5_mba_load(qproc); 1631 if (ret) 1632 return ret; 1633 1634 dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n", 1635 qproc->dp_size ? "" : "out"); 1636 1637 ret = q6v5_mpss_load(qproc); 1638 if (ret) 1639 goto reclaim_mpss; 1640 1641 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1642 if (ret == -ETIMEDOUT) { 1643 dev_err(qproc->dev, "start timed out\n"); 1644 goto reclaim_mpss; 1645 } 1646 1647 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1648 false, qproc->mba_phys, 1649 qproc->mba_size); 1650 if (xfermemop_ret) 1651 dev_err(qproc->dev, 1652 "Failed to reclaim mba buffer system may become unstable\n"); 1653 1654 /* Reset Dump Segment Mask */ 1655 qproc->current_dump_size = 0; 1656 1657 return 0; 1658 1659 reclaim_mpss: 1660 q6v5_mba_reclaim(qproc); 1661 q6v5_dump_mba_logs(qproc); 1662 1663 return ret; 1664 } 1665 1666 static int q6v5_stop(struct rproc *rproc) 1667 { 1668 struct q6v5 *qproc = rproc->priv; 1669 int ret; 1670 1671 ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon); 1672 if (ret == -ETIMEDOUT) 1673 dev_err(qproc->dev, "timed out on wait\n"); 1674 1675 q6v5_mba_reclaim(qproc); 1676 1677 return 0; 1678 } 1679 1680 static int qcom_q6v5_register_dump_segments(struct rproc *rproc, 1681 const struct firmware *mba_fw) 1682 { 1683 const struct firmware *fw; 1684 const struct elf32_phdr *phdrs; 1685 const struct elf32_phdr *phdr; 1686 const struct elf32_hdr *ehdr; 1687 struct q6v5 *qproc = rproc->priv; 1688 unsigned long i; 1689 int ret; 1690 1691 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); 1692 if (ret < 0) { 1693 dev_err(qproc->dev, "unable to load %s\n", 1694 qproc->hexagon_mdt_image); 1695 return ret; 1696 } 1697 1698 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1699 1700 ehdr = (struct elf32_hdr *)fw->data; 1701 phdrs = (struct elf32_phdr *)(ehdr + 1); 1702 qproc->total_dump_size = 0; 1703 1704 for (i = 0; i < ehdr->e_phnum; i++) { 1705 phdr = &phdrs[i]; 1706 1707 if (!q6v5_phdr_valid(phdr)) 1708 continue; 1709 1710 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, 1711 phdr->p_memsz, 1712 qcom_q6v5_dump_segment, 1713 NULL); 1714 if (ret) 1715 break; 1716 1717 qproc->total_dump_size += phdr->p_memsz; 1718 } 1719 1720 release_firmware(fw); 1721 return ret; 1722 } 1723 1724 static unsigned long q6v5_panic(struct rproc *rproc) 1725 { 1726 struct q6v5 *qproc = rproc->priv; 1727 1728 return qcom_q6v5_panic(&qproc->q6v5); 1729 } 1730 1731 static const struct rproc_ops q6v5_ops = { 1732 .start = q6v5_start, 1733 .stop = q6v5_stop, 1734 .parse_fw = qcom_q6v5_register_dump_segments, 1735 .load = q6v5_load, 1736 .panic = q6v5_panic, 1737 }; 1738 1739 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1740 { 1741 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1742 1743 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1744 qproc->proxy_clk_count); 1745 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1746 qproc->proxy_reg_count); 1747 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, 1748 qproc->fallback_proxy_reg_count); 1749 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1750 } 1751 1752 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1753 { 1754 struct of_phandle_args args; 1755 int halt_cell_cnt = 3; 1756 int ret; 1757 1758 qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6"); 1759 if (IS_ERR(qproc->reg_base)) 1760 return PTR_ERR(qproc->reg_base); 1761 1762 qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb"); 1763 if (IS_ERR(qproc->rmb_base)) 1764 return PTR_ERR(qproc->rmb_base); 1765 1766 if (qproc->has_vq6) 1767 halt_cell_cnt++; 1768 1769 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1770 "qcom,halt-regs", halt_cell_cnt, 0, &args); 1771 if (ret < 0) { 1772 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1773 return -EINVAL; 1774 } 1775 1776 qproc->halt_map = syscon_node_to_regmap(args.np); 1777 of_node_put(args.np); 1778 if (IS_ERR(qproc->halt_map)) 1779 return PTR_ERR(qproc->halt_map); 1780 1781 qproc->halt_q6 = args.args[0]; 1782 qproc->halt_modem = args.args[1]; 1783 qproc->halt_nc = args.args[2]; 1784 1785 if (qproc->has_vq6) 1786 qproc->halt_vq6 = args.args[3]; 1787 1788 if (qproc->has_qaccept_regs) { 1789 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1790 "qcom,qaccept-regs", 1791 3, 0, &args); 1792 if (ret < 0) { 1793 dev_err(&pdev->dev, "failed to parse qaccept-regs\n"); 1794 return -EINVAL; 1795 } 1796 1797 qproc->qaccept_mdm = args.args[0]; 1798 qproc->qaccept_cx = args.args[1]; 1799 qproc->qaccept_axi = args.args[2]; 1800 } 1801 1802 if (qproc->has_ext_bhs_reg) { 1803 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1804 "qcom,ext-bhs-reg", 1805 1, 0, &args); 1806 if (ret < 0) { 1807 dev_err(&pdev->dev, "failed to parse ext-bhs-reg index 0\n"); 1808 return -EINVAL; 1809 } 1810 1811 qproc->conn_map = syscon_node_to_regmap(args.np); 1812 of_node_put(args.np); 1813 if (IS_ERR(qproc->conn_map)) 1814 return PTR_ERR(qproc->conn_map); 1815 1816 qproc->ext_bhs = args.args[0]; 1817 } 1818 1819 if (qproc->has_ext_cntl_regs) { 1820 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1821 "qcom,ext-regs", 1822 2, 0, &args); 1823 if (ret < 0) { 1824 dev_err(&pdev->dev, "failed to parse ext-regs index 0\n"); 1825 return -EINVAL; 1826 } 1827 1828 qproc->conn_map = syscon_node_to_regmap(args.np); 1829 of_node_put(args.np); 1830 if (IS_ERR(qproc->conn_map)) 1831 return PTR_ERR(qproc->conn_map); 1832 1833 qproc->force_clk_on = args.args[0]; 1834 qproc->rscc_disable = args.args[1]; 1835 1836 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1837 "qcom,ext-regs", 1838 2, 1, &args); 1839 if (ret < 0) { 1840 dev_err(&pdev->dev, "failed to parse ext-regs index 1\n"); 1841 return -EINVAL; 1842 } 1843 1844 qproc->axim1_clk_off = args.args[0]; 1845 qproc->crypto_clk_off = args.args[1]; 1846 } 1847 1848 if (qproc->has_spare_reg) { 1849 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1850 "qcom,spare-regs", 1851 1, 0, &args); 1852 if (ret < 0) { 1853 dev_err(&pdev->dev, "failed to parse spare-regs\n"); 1854 return -EINVAL; 1855 } 1856 1857 qproc->conn_map = syscon_node_to_regmap(args.np); 1858 of_node_put(args.np); 1859 if (IS_ERR(qproc->conn_map)) 1860 return PTR_ERR(qproc->conn_map); 1861 1862 qproc->conn_box = args.args[0]; 1863 } 1864 1865 return 0; 1866 } 1867 1868 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1869 char **clk_names) 1870 { 1871 int i; 1872 1873 if (!clk_names) 1874 return 0; 1875 1876 for (i = 0; clk_names[i]; i++) { 1877 clks[i] = devm_clk_get(dev, clk_names[i]); 1878 if (IS_ERR(clks[i])) 1879 return dev_err_probe(dev, PTR_ERR(clks[i]), 1880 "Failed to get %s clock\n", 1881 clk_names[i]); 1882 } 1883 1884 return i; 1885 } 1886 1887 static int q6v5_pds_attach(struct device *dev, struct device **devs, 1888 char **pd_names) 1889 { 1890 size_t num_pds = 0; 1891 int ret; 1892 int i; 1893 1894 if (!pd_names) 1895 return 0; 1896 1897 while (pd_names[num_pds]) 1898 num_pds++; 1899 1900 /* Handle single power domain */ 1901 if (num_pds == 1 && dev->pm_domain) { 1902 devs[0] = dev; 1903 pm_runtime_enable(dev); 1904 return 1; 1905 } 1906 1907 for (i = 0; i < num_pds; i++) { 1908 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); 1909 if (IS_ERR_OR_NULL(devs[i])) { 1910 ret = PTR_ERR(devs[i]) ? : -ENODATA; 1911 goto unroll_attach; 1912 } 1913 } 1914 1915 return num_pds; 1916 1917 unroll_attach: 1918 for (i--; i >= 0; i--) 1919 dev_pm_domain_detach(devs[i], false); 1920 1921 return ret; 1922 } 1923 1924 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, 1925 size_t pd_count) 1926 { 1927 struct device *dev = qproc->dev; 1928 int i; 1929 1930 /* Handle single power domain */ 1931 if (pd_count == 1 && dev->pm_domain) { 1932 pm_runtime_disable(dev); 1933 return; 1934 } 1935 1936 for (i = 0; i < pd_count; i++) 1937 dev_pm_domain_detach(pds[i], false); 1938 } 1939 1940 static int q6v5_init_reset(struct q6v5 *qproc) 1941 { 1942 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1943 "mss_restart"); 1944 if (IS_ERR(qproc->mss_restart)) { 1945 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1946 return PTR_ERR(qproc->mss_restart); 1947 } 1948 1949 if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) { 1950 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1951 "pdc_reset"); 1952 if (IS_ERR(qproc->pdc_reset)) { 1953 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1954 return PTR_ERR(qproc->pdc_reset); 1955 } 1956 } 1957 1958 return 0; 1959 } 1960 1961 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1962 { 1963 struct device_node *child; 1964 struct reserved_mem *rmem; 1965 struct device_node *node; 1966 1967 /* 1968 * In the absence of mba/mpss sub-child, extract the mba and mpss 1969 * reserved memory regions from device's memory-region property. 1970 */ 1971 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1972 if (!child) { 1973 node = of_parse_phandle(qproc->dev->of_node, 1974 "memory-region", 0); 1975 } else { 1976 node = of_parse_phandle(child, "memory-region", 0); 1977 of_node_put(child); 1978 } 1979 1980 if (!node) { 1981 dev_err(qproc->dev, "no mba memory-region specified\n"); 1982 return -EINVAL; 1983 } 1984 1985 rmem = of_reserved_mem_lookup(node); 1986 of_node_put(node); 1987 if (!rmem) { 1988 dev_err(qproc->dev, "unable to resolve mba region\n"); 1989 return -EINVAL; 1990 } 1991 1992 qproc->mba_phys = rmem->base; 1993 qproc->mba_size = rmem->size; 1994 1995 if (!child) { 1996 node = of_parse_phandle(qproc->dev->of_node, 1997 "memory-region", 1); 1998 } else { 1999 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 2000 node = of_parse_phandle(child, "memory-region", 0); 2001 of_node_put(child); 2002 } 2003 2004 if (!node) { 2005 dev_err(qproc->dev, "no mpss memory-region specified\n"); 2006 return -EINVAL; 2007 } 2008 2009 rmem = of_reserved_mem_lookup(node); 2010 of_node_put(node); 2011 if (!rmem) { 2012 dev_err(qproc->dev, "unable to resolve mpss region\n"); 2013 return -EINVAL; 2014 } 2015 2016 qproc->mpss_phys = qproc->mpss_reloc = rmem->base; 2017 qproc->mpss_size = rmem->size; 2018 2019 if (!child) { 2020 node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2); 2021 } else { 2022 child = of_get_child_by_name(qproc->dev->of_node, "metadata"); 2023 node = of_parse_phandle(child, "memory-region", 0); 2024 of_node_put(child); 2025 } 2026 2027 if (!node) 2028 return 0; 2029 2030 rmem = of_reserved_mem_lookup(node); 2031 if (!rmem) { 2032 dev_err(qproc->dev, "unable to resolve metadata region\n"); 2033 return -EINVAL; 2034 } 2035 2036 qproc->mdata_phys = rmem->base; 2037 qproc->mdata_size = rmem->size; 2038 2039 return 0; 2040 } 2041 2042 static int q6v5_probe(struct platform_device *pdev) 2043 { 2044 const struct rproc_hexagon_res *desc; 2045 struct device_node *node; 2046 struct q6v5 *qproc; 2047 struct rproc *rproc; 2048 const char *mba_image; 2049 int ret; 2050 2051 desc = of_device_get_match_data(&pdev->dev); 2052 if (!desc) 2053 return -EINVAL; 2054 2055 if (desc->need_mem_protection && !qcom_scm_is_available()) 2056 return -EPROBE_DEFER; 2057 2058 mba_image = desc->hexagon_mba_image; 2059 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 2060 0, &mba_image); 2061 if (ret < 0 && ret != -EINVAL) { 2062 dev_err(&pdev->dev, "unable to read mba firmware-name\n"); 2063 return ret; 2064 } 2065 2066 rproc = devm_rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 2067 mba_image, sizeof(*qproc)); 2068 if (!rproc) { 2069 dev_err(&pdev->dev, "failed to allocate rproc\n"); 2070 return -ENOMEM; 2071 } 2072 2073 rproc->auto_boot = false; 2074 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 2075 2076 qproc = rproc->priv; 2077 qproc->dev = &pdev->dev; 2078 qproc->rproc = rproc; 2079 qproc->hexagon_mdt_image = "modem.mdt"; 2080 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 2081 1, &qproc->hexagon_mdt_image); 2082 if (ret < 0 && ret != -EINVAL) { 2083 dev_err(&pdev->dev, "unable to read mpss firmware-name\n"); 2084 return ret; 2085 } 2086 2087 platform_set_drvdata(pdev, qproc); 2088 2089 qproc->has_qaccept_regs = desc->has_qaccept_regs; 2090 qproc->has_ext_bhs_reg = desc->has_ext_bhs_reg; 2091 qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs; 2092 qproc->has_vq6 = desc->has_vq6; 2093 qproc->has_spare_reg = desc->has_spare_reg; 2094 ret = q6v5_init_mem(qproc, pdev); 2095 if (ret) 2096 return ret; 2097 2098 ret = q6v5_alloc_memory_region(qproc); 2099 if (ret) 2100 return ret; 2101 2102 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 2103 desc->proxy_clk_names); 2104 if (ret < 0) 2105 return ret; 2106 qproc->proxy_clk_count = ret; 2107 2108 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 2109 desc->reset_clk_names); 2110 if (ret < 0) 2111 return ret; 2112 qproc->reset_clk_count = ret; 2113 2114 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 2115 desc->active_clk_names); 2116 if (ret < 0) 2117 return ret; 2118 qproc->active_clk_count = ret; 2119 2120 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 2121 desc->proxy_supply); 2122 if (ret < 0) 2123 return ret; 2124 qproc->proxy_reg_count = ret; 2125 2126 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 2127 desc->active_supply); 2128 if (ret < 0) 2129 return ret; 2130 qproc->active_reg_count = ret; 2131 2132 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, 2133 desc->proxy_pd_names); 2134 /* Fallback to regulators for old device trees */ 2135 if (ret == -ENODATA && desc->fallback_proxy_supply) { 2136 ret = q6v5_regulator_init(&pdev->dev, 2137 qproc->fallback_proxy_regs, 2138 desc->fallback_proxy_supply); 2139 if (ret < 0) 2140 return ret; 2141 qproc->fallback_proxy_reg_count = ret; 2142 } else if (ret < 0) { 2143 dev_err(&pdev->dev, "Failed to init power domains\n"); 2144 return ret; 2145 } else { 2146 qproc->proxy_pd_count = ret; 2147 } 2148 2149 qproc->has_alt_reset = desc->has_alt_reset; 2150 ret = q6v5_init_reset(qproc); 2151 if (ret) 2152 goto detach_proxy_pds; 2153 2154 qproc->version = desc->version; 2155 qproc->need_mem_protection = desc->need_mem_protection; 2156 qproc->has_mba_logs = desc->has_mba_logs; 2157 2158 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem", 2159 qcom_msa_handover); 2160 if (ret) 2161 goto detach_proxy_pds; 2162 2163 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 2164 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 2165 qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); 2166 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 2167 qcom_add_pdm_subdev(rproc, &qproc->pdm_subdev); 2168 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 2169 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 2170 if (IS_ERR(qproc->sysmon)) { 2171 ret = PTR_ERR(qproc->sysmon); 2172 goto remove_subdevs; 2173 } 2174 2175 ret = rproc_add(rproc); 2176 if (ret) 2177 goto remove_sysmon_subdev; 2178 2179 node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux"); 2180 qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev); 2181 of_node_put(node); 2182 2183 return 0; 2184 2185 remove_sysmon_subdev: 2186 qcom_remove_sysmon_subdev(qproc->sysmon); 2187 remove_subdevs: 2188 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 2189 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 2190 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 2191 detach_proxy_pds: 2192 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 2193 2194 return ret; 2195 } 2196 2197 static void q6v5_remove(struct platform_device *pdev) 2198 { 2199 struct q6v5 *qproc = platform_get_drvdata(pdev); 2200 struct rproc *rproc = qproc->rproc; 2201 2202 if (qproc->bam_dmux) 2203 of_platform_device_destroy(&qproc->bam_dmux->dev, NULL); 2204 rproc_del(rproc); 2205 2206 qcom_q6v5_deinit(&qproc->q6v5); 2207 qcom_remove_sysmon_subdev(qproc->sysmon); 2208 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 2209 qcom_remove_pdm_subdev(rproc, &qproc->pdm_subdev); 2210 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 2211 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 2212 2213 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 2214 } 2215 2216 static const struct rproc_hexagon_res sc7180_mss = { 2217 .hexagon_mba_image = "mba.mbn", 2218 .proxy_clk_names = (char*[]){ 2219 "xo", 2220 NULL 2221 }, 2222 .reset_clk_names = (char*[]){ 2223 "iface", 2224 "bus", 2225 "snoc_axi", 2226 NULL 2227 }, 2228 .active_clk_names = (char*[]){ 2229 "mnoc_axi", 2230 "nav", 2231 NULL 2232 }, 2233 .proxy_pd_names = (char*[]){ 2234 "cx", 2235 "mx", 2236 "mss", 2237 NULL 2238 }, 2239 .need_mem_protection = true, 2240 .has_alt_reset = false, 2241 .has_mba_logs = true, 2242 .has_spare_reg = true, 2243 .has_qaccept_regs = false, 2244 .has_ext_bhs_reg = false, 2245 .has_ext_cntl_regs = false, 2246 .has_vq6 = false, 2247 .version = MSS_SC7180, 2248 }; 2249 2250 static const struct rproc_hexagon_res sc7280_mss = { 2251 .hexagon_mba_image = "mba.mbn", 2252 .proxy_clk_names = (char*[]){ 2253 "xo", 2254 "pka", 2255 NULL 2256 }, 2257 .active_clk_names = (char*[]){ 2258 "iface", 2259 "offline", 2260 "snoc_axi", 2261 NULL 2262 }, 2263 .proxy_pd_names = (char*[]){ 2264 "cx", 2265 "mss", 2266 NULL 2267 }, 2268 .need_mem_protection = true, 2269 .has_alt_reset = false, 2270 .has_mba_logs = true, 2271 .has_spare_reg = false, 2272 .has_qaccept_regs = true, 2273 .has_ext_bhs_reg = false, 2274 .has_ext_cntl_regs = true, 2275 .has_vq6 = true, 2276 .version = MSS_SC7280, 2277 }; 2278 2279 static const struct rproc_hexagon_res sdm660_mss = { 2280 .hexagon_mba_image = "mba.mbn", 2281 .proxy_clk_names = (char*[]){ 2282 "xo", 2283 "qdss", 2284 "mem", 2285 NULL 2286 }, 2287 .active_clk_names = (char*[]){ 2288 "iface", 2289 "bus", 2290 "gpll0_mss", 2291 "mnoc_axi", 2292 "snoc_axi", 2293 NULL 2294 }, 2295 .proxy_pd_names = (char*[]){ 2296 "cx", 2297 "mx", 2298 NULL 2299 }, 2300 .need_mem_protection = true, 2301 .has_alt_reset = false, 2302 .has_mba_logs = false, 2303 .has_spare_reg = false, 2304 .has_qaccept_regs = false, 2305 .has_ext_bhs_reg = false, 2306 .has_ext_cntl_regs = false, 2307 .has_vq6 = false, 2308 .version = MSS_SDM660, 2309 }; 2310 2311 static const struct rproc_hexagon_res sdm845_mss = { 2312 .hexagon_mba_image = "mba.mbn", 2313 .proxy_clk_names = (char*[]){ 2314 "xo", 2315 "prng", 2316 NULL 2317 }, 2318 .reset_clk_names = (char*[]){ 2319 "iface", 2320 "snoc_axi", 2321 NULL 2322 }, 2323 .active_clk_names = (char*[]){ 2324 "bus", 2325 "mem", 2326 "gpll0_mss", 2327 "mnoc_axi", 2328 NULL 2329 }, 2330 .proxy_pd_names = (char*[]){ 2331 "cx", 2332 "mx", 2333 "mss", 2334 NULL 2335 }, 2336 .need_mem_protection = true, 2337 .has_alt_reset = true, 2338 .has_mba_logs = false, 2339 .has_spare_reg = false, 2340 .has_qaccept_regs = false, 2341 .has_ext_bhs_reg = false, 2342 .has_ext_cntl_regs = false, 2343 .has_vq6 = false, 2344 .version = MSS_SDM845, 2345 }; 2346 2347 static const struct rproc_hexagon_res msm8998_mss = { 2348 .hexagon_mba_image = "mba.mbn", 2349 .proxy_clk_names = (char*[]){ 2350 "xo", 2351 "qdss", 2352 "mem", 2353 NULL 2354 }, 2355 .active_clk_names = (char*[]){ 2356 "iface", 2357 "bus", 2358 "gpll0_mss", 2359 "mnoc_axi", 2360 "snoc_axi", 2361 NULL 2362 }, 2363 .proxy_pd_names = (char*[]){ 2364 "cx", 2365 "mx", 2366 NULL 2367 }, 2368 .need_mem_protection = true, 2369 .has_alt_reset = false, 2370 .has_mba_logs = false, 2371 .has_spare_reg = false, 2372 .has_qaccept_regs = false, 2373 .has_ext_bhs_reg = false, 2374 .has_ext_cntl_regs = false, 2375 .has_vq6 = false, 2376 .version = MSS_MSM8998, 2377 }; 2378 2379 static const struct rproc_hexagon_res msm8996_mss = { 2380 .hexagon_mba_image = "mba.mbn", 2381 .proxy_supply = (struct qcom_mss_reg_res[]) { 2382 { 2383 .supply = "pll", 2384 .uA = 100000, 2385 }, 2386 {} 2387 }, 2388 .proxy_clk_names = (char*[]){ 2389 "xo", 2390 "qdss", 2391 NULL 2392 }, 2393 .active_clk_names = (char*[]){ 2394 "iface", 2395 "bus", 2396 "mem", 2397 "gpll0_mss", 2398 "snoc_axi", 2399 "mnoc_axi", 2400 NULL 2401 }, 2402 .proxy_pd_names = (char*[]){ 2403 "mx", 2404 "cx", 2405 NULL 2406 }, 2407 .need_mem_protection = true, 2408 .has_alt_reset = false, 2409 .has_mba_logs = false, 2410 .has_spare_reg = false, 2411 .has_qaccept_regs = false, 2412 .has_ext_bhs_reg = false, 2413 .has_ext_cntl_regs = false, 2414 .has_vq6 = false, 2415 .version = MSS_MSM8996, 2416 }; 2417 2418 static const struct rproc_hexagon_res msm8909_mss = { 2419 .hexagon_mba_image = "mba.mbn", 2420 .proxy_supply = (struct qcom_mss_reg_res[]) { 2421 { 2422 .supply = "pll", 2423 .uA = 100000, 2424 }, 2425 {} 2426 }, 2427 .proxy_clk_names = (char*[]){ 2428 "xo", 2429 NULL 2430 }, 2431 .active_clk_names = (char*[]){ 2432 "iface", 2433 "bus", 2434 "mem", 2435 NULL 2436 }, 2437 .proxy_pd_names = (char*[]){ 2438 "mx", 2439 "cx", 2440 NULL 2441 }, 2442 .need_mem_protection = false, 2443 .has_alt_reset = false, 2444 .has_mba_logs = false, 2445 .has_spare_reg = false, 2446 .has_qaccept_regs = false, 2447 .has_ext_bhs_reg = false, 2448 .has_ext_cntl_regs = false, 2449 .has_vq6 = false, 2450 .version = MSS_MSM8909, 2451 }; 2452 2453 static const struct rproc_hexagon_res msm8916_mss = { 2454 .hexagon_mba_image = "mba.mbn", 2455 .proxy_supply = (struct qcom_mss_reg_res[]) { 2456 { 2457 .supply = "pll", 2458 .uA = 100000, 2459 }, 2460 {} 2461 }, 2462 .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { 2463 { 2464 .supply = "mx", 2465 .uV = 1050000, 2466 }, 2467 { 2468 .supply = "cx", 2469 .uA = 100000, 2470 }, 2471 {} 2472 }, 2473 .proxy_clk_names = (char*[]){ 2474 "xo", 2475 NULL 2476 }, 2477 .active_clk_names = (char*[]){ 2478 "iface", 2479 "bus", 2480 "mem", 2481 NULL 2482 }, 2483 .proxy_pd_names = (char*[]){ 2484 "mx", 2485 "cx", 2486 NULL 2487 }, 2488 .need_mem_protection = false, 2489 .has_alt_reset = false, 2490 .has_mba_logs = false, 2491 .has_spare_reg = false, 2492 .has_qaccept_regs = false, 2493 .has_ext_bhs_reg = false, 2494 .has_ext_cntl_regs = false, 2495 .has_vq6 = false, 2496 .version = MSS_MSM8916, 2497 }; 2498 2499 static const struct rproc_hexagon_res msm8953_mss = { 2500 .hexagon_mba_image = "mba.mbn", 2501 .proxy_supply = (struct qcom_mss_reg_res[]) { 2502 { 2503 .supply = "pll", 2504 .uA = 100000, 2505 }, 2506 {} 2507 }, 2508 .proxy_clk_names = (char*[]){ 2509 "xo", 2510 NULL 2511 }, 2512 .active_clk_names = (char*[]){ 2513 "iface", 2514 "bus", 2515 "mem", 2516 NULL 2517 }, 2518 .proxy_pd_names = (char*[]) { 2519 "cx", 2520 "mx", 2521 "mss", 2522 NULL 2523 }, 2524 .need_mem_protection = false, 2525 .has_alt_reset = false, 2526 .has_mba_logs = false, 2527 .has_spare_reg = false, 2528 .has_qaccept_regs = false, 2529 .has_ext_bhs_reg = false, 2530 .has_ext_cntl_regs = false, 2531 .has_vq6 = false, 2532 .version = MSS_MSM8953, 2533 }; 2534 2535 static const struct rproc_hexagon_res msm8974_mss = { 2536 .hexagon_mba_image = "mba.b00", 2537 .proxy_supply = (struct qcom_mss_reg_res[]) { 2538 { 2539 .supply = "pll", 2540 .uA = 100000, 2541 }, 2542 { 2543 .supply = "mx", 2544 .uV = 1050000, 2545 }, 2546 {} 2547 }, 2548 .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { 2549 { 2550 .supply = "cx", 2551 .uA = 100000, 2552 }, 2553 {} 2554 }, 2555 .active_supply = (struct qcom_mss_reg_res[]) { 2556 { 2557 .supply = "mss", 2558 .uV = 1050000, 2559 .uA = 100000, 2560 }, 2561 {} 2562 }, 2563 .proxy_clk_names = (char*[]){ 2564 "xo", 2565 NULL 2566 }, 2567 .active_clk_names = (char*[]){ 2568 "iface", 2569 "bus", 2570 "mem", 2571 NULL 2572 }, 2573 .proxy_pd_names = (char*[]){ 2574 "cx", 2575 NULL 2576 }, 2577 .need_mem_protection = false, 2578 .has_alt_reset = false, 2579 .has_mba_logs = false, 2580 .has_spare_reg = false, 2581 .has_qaccept_regs = false, 2582 .has_ext_bhs_reg = false, 2583 .has_ext_cntl_regs = false, 2584 .has_vq6 = false, 2585 .version = MSS_MSM8974, 2586 }; 2587 2588 static const struct rproc_hexagon_res msm8226_mss = { 2589 .hexagon_mba_image = "mba.b00", 2590 .proxy_supply = (struct qcom_mss_reg_res[]) { 2591 { 2592 .supply = "pll", 2593 .uA = 100000, 2594 }, 2595 { 2596 .supply = "mx", 2597 .uV = 1050000, 2598 }, 2599 {} 2600 }, 2601 .proxy_clk_names = (char*[]){ 2602 "xo", 2603 NULL 2604 }, 2605 .active_clk_names = (char*[]){ 2606 "iface", 2607 "bus", 2608 "mem", 2609 NULL 2610 }, 2611 .proxy_pd_names = (char*[]){ 2612 "cx", 2613 NULL 2614 }, 2615 .need_mem_protection = false, 2616 .has_alt_reset = false, 2617 .has_mba_logs = false, 2618 .has_spare_reg = false, 2619 .has_qaccept_regs = false, 2620 .has_ext_bhs_reg = true, 2621 .has_ext_cntl_regs = false, 2622 .has_vq6 = false, 2623 .version = MSS_MSM8226, 2624 }; 2625 2626 static const struct rproc_hexagon_res msm8926_mss = { 2627 .hexagon_mba_image = "mba.b00", 2628 .proxy_supply = (struct qcom_mss_reg_res[]) { 2629 { 2630 .supply = "pll", 2631 .uA = 100000, 2632 }, 2633 { 2634 .supply = "mx", 2635 .uV = 1050000, 2636 }, 2637 {} 2638 }, 2639 .active_supply = (struct qcom_mss_reg_res[]) { 2640 { 2641 .supply = "mss", 2642 .uV = 1050000, 2643 .uA = 100000, 2644 }, 2645 {} 2646 }, 2647 .proxy_clk_names = (char*[]){ 2648 "xo", 2649 NULL 2650 }, 2651 .active_clk_names = (char*[]){ 2652 "iface", 2653 "bus", 2654 "mem", 2655 NULL 2656 }, 2657 .proxy_pd_names = (char*[]){ 2658 "cx", 2659 NULL 2660 }, 2661 .need_mem_protection = false, 2662 .has_alt_reset = false, 2663 .has_mba_logs = false, 2664 .has_spare_reg = false, 2665 .has_qaccept_regs = false, 2666 .has_ext_bhs_reg = false, 2667 .has_ext_cntl_regs = false, 2668 .has_vq6 = false, 2669 .version = MSS_MSM8926, 2670 }; 2671 2672 static const struct of_device_id q6v5_of_match[] = { 2673 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 2674 { .compatible = "qcom,msm8226-mss-pil", .data = &msm8226_mss}, 2675 { .compatible = "qcom,msm8909-mss-pil", .data = &msm8909_mss}, 2676 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 2677 { .compatible = "qcom,msm8926-mss-pil", .data = &msm8926_mss}, 2678 { .compatible = "qcom,msm8953-mss-pil", .data = &msm8953_mss}, 2679 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 2680 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 2681 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, 2682 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, 2683 { .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss}, 2684 { .compatible = "qcom,sdm660-mss-pil", .data = &sdm660_mss}, 2685 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 2686 { }, 2687 }; 2688 MODULE_DEVICE_TABLE(of, q6v5_of_match); 2689 2690 static struct platform_driver q6v5_driver = { 2691 .probe = q6v5_probe, 2692 .remove = q6v5_remove, 2693 .driver = { 2694 .name = "qcom-q6v5-mss", 2695 .of_match_table = q6v5_of_match, 2696 }, 2697 }; 2698 module_platform_driver(q6v5_driver); 2699 2700 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 2701 MODULE_LICENSE("GPL v2"); 2702