1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013--2024 Intel Corporation 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/bits.h> 8 #include <linux/completion.h> 9 #include <linux/delay.h> 10 #include <linux/device.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/firmware.h> 13 #include <linux/interrupt.h> 14 #include <linux/iopoll.h> 15 #include <linux/math64.h> 16 #include <linux/mm.h> 17 #include <linux/mutex.h> 18 #include <linux/pci.h> 19 #include <linux/pfn.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/scatterlist.h> 22 #include <linux/slab.h> 23 #include <linux/time64.h> 24 25 #include "ipu6.h" 26 #include "ipu6-bus.h" 27 #include "ipu6-dma.h" 28 #include "ipu6-buttress.h" 29 #include "ipu6-platform-buttress-regs.h" 30 31 #define BOOTLOADER_STATUS_OFFSET 0x15c 32 33 #define BOOTLOADER_MAGIC_KEY 0xb00710ad 34 35 #define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 36 #define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 37 #define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE 38 39 #define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10 40 41 #define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC) 42 43 #define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC) 44 #define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC) 45 #define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC) 46 47 #define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC 48 #define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC 49 #define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC) 50 #define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC) 51 52 #define BUTTRESS_IPC_RESET_RETRY 2000 53 #define BUTTRESS_CSE_IPC_RESET_RETRY 4 54 #define BUTTRESS_IPC_CMD_SEND_RETRY 1 55 56 #define BUTTRESS_MAX_CONSECUTIVE_IRQS 100 57 58 static const u32 ipu6_adev_irq_mask[2] = { 59 BUTTRESS_ISR_IS_IRQ, 60 BUTTRESS_ISR_PS_IRQ 61 }; 62 63 int ipu6_buttress_ipc_reset(struct ipu6_device *isp, 64 struct ipu6_buttress_ipc *ipc) 65 { 66 unsigned int retries = BUTTRESS_IPC_RESET_RETRY; 67 struct ipu6_buttress *b = &isp->buttress; 68 u32 val = 0, csr_in_clr; 69 70 if (!isp->secure_mode) { 71 dev_dbg(&isp->pdev->dev, "Skip IPC reset for non-secure mode"); 72 return 0; 73 } 74 75 mutex_lock(&b->ipc_mutex); 76 77 /* Clear-by-1 CSR (all bits), corresponding internal states. */ 78 val = readl(isp->base + ipc->csr_in); 79 writel(val, isp->base + ipc->csr_in); 80 81 /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ 82 writel(ENTRY, isp->base + ipc->csr_out); 83 /* 84 * Clear-by-1 all CSR bits EXCEPT following 85 * bits: 86 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. 87 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. 88 * C. Possibly custom bits, depending on 89 * their role. 90 */ 91 csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ | 92 BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID | 93 BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY; 94 95 do { 96 usleep_range(400, 500); 97 val = readl(isp->base + ipc->csr_in); 98 switch (val) { 99 case ENTRY | EXIT: 100 case ENTRY | EXIT | QUERY: 101 /* 102 * 1) Clear-by-1 CSR bits 103 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, 104 * IPC_PEER_COMP_ACTIONS_RST_PHASE2). 105 * 2) Set peer CSR bit 106 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. 107 */ 108 writel(ENTRY | EXIT, isp->base + ipc->csr_in); 109 writel(QUERY, isp->base + ipc->csr_out); 110 break; 111 case ENTRY: 112 case ENTRY | QUERY: 113 /* 114 * 1) Clear-by-1 CSR bits 115 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, 116 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE). 117 * 2) Set peer CSR bit 118 * IPC_PEER_COMP_ACTIONS_RST_PHASE1. 119 */ 120 writel(ENTRY | QUERY, isp->base + ipc->csr_in); 121 writel(ENTRY, isp->base + ipc->csr_out); 122 break; 123 case EXIT: 124 case EXIT | QUERY: 125 /* 126 * Clear-by-1 CSR bit 127 * IPC_PEER_COMP_ACTIONS_RST_PHASE2. 128 * 1) Clear incoming doorbell. 129 * 2) Clear-by-1 all CSR bits EXCEPT following 130 * bits: 131 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. 132 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. 133 * C. Possibly custom bits, depending on 134 * their role. 135 * 3) Set peer CSR bit 136 * IPC_PEER_COMP_ACTIONS_RST_PHASE2. 137 */ 138 writel(EXIT, isp->base + ipc->csr_in); 139 writel(0, isp->base + ipc->db0_in); 140 writel(csr_in_clr, isp->base + ipc->csr_in); 141 writel(EXIT, isp->base + ipc->csr_out); 142 143 /* 144 * Read csr_in again to make sure if RST_PHASE2 is done. 145 * If csr_in is QUERY, it should be handled again. 146 */ 147 usleep_range(200, 300); 148 val = readl(isp->base + ipc->csr_in); 149 if (val & QUERY) { 150 dev_dbg(&isp->pdev->dev, 151 "RST_PHASE2 retry csr_in = %x\n", val); 152 break; 153 } 154 mutex_unlock(&b->ipc_mutex); 155 return 0; 156 case QUERY: 157 /* 158 * 1) Clear-by-1 CSR bit 159 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. 160 * 2) Set peer CSR bit 161 * IPC_PEER_COMP_ACTIONS_RST_PHASE1 162 */ 163 writel(QUERY, isp->base + ipc->csr_in); 164 writel(ENTRY, isp->base + ipc->csr_out); 165 break; 166 default: 167 dev_dbg_ratelimited(&isp->pdev->dev, 168 "Unexpected CSR 0x%x\n", val); 169 break; 170 } 171 } while (retries--); 172 173 mutex_unlock(&b->ipc_mutex); 174 dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n"); 175 176 return -ETIMEDOUT; 177 } 178 179 static void ipu6_buttress_ipc_validity_close(struct ipu6_device *isp, 180 struct ipu6_buttress_ipc *ipc) 181 { 182 writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ, 183 isp->base + ipc->csr_out); 184 } 185 186 static int 187 ipu6_buttress_ipc_validity_open(struct ipu6_device *isp, 188 struct ipu6_buttress_ipc *ipc) 189 { 190 unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID; 191 void __iomem *addr; 192 int ret; 193 u32 val; 194 195 writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ, 196 isp->base + ipc->csr_out); 197 198 addr = isp->base + ipc->csr_in; 199 ret = readl_poll_timeout(addr, val, val & mask, 200, 200 BUTTRESS_IPC_VALIDITY_TIMEOUT_US); 201 if (ret) { 202 dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val); 203 ipu6_buttress_ipc_validity_close(isp, ipc); 204 } 205 206 return ret; 207 } 208 209 static void ipu6_buttress_ipc_recv(struct ipu6_device *isp, 210 struct ipu6_buttress_ipc *ipc, u32 *ipc_msg) 211 { 212 if (ipc_msg) 213 *ipc_msg = readl(isp->base + ipc->data0_in); 214 writel(0, isp->base + ipc->db0_in); 215 } 216 217 static int ipu6_buttress_ipc_send_bulk(struct ipu6_device *isp, 218 struct ipu6_ipc_buttress_bulk_msg *msgs, 219 u32 size) 220 { 221 unsigned long tx_timeout_jiffies, rx_timeout_jiffies; 222 unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY; 223 struct ipu6_buttress *b = &isp->buttress; 224 struct ipu6_buttress_ipc *ipc = &b->cse; 225 u32 val; 226 int ret; 227 int tout; 228 229 mutex_lock(&b->ipc_mutex); 230 231 ret = ipu6_buttress_ipc_validity_open(isp, ipc); 232 if (ret) { 233 dev_err(&isp->pdev->dev, "IPC validity open failed\n"); 234 goto out; 235 } 236 237 tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS); 238 rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS); 239 240 for (i = 0; i < size; i++) { 241 reinit_completion(&ipc->send_complete); 242 if (msgs[i].require_resp) 243 reinit_completion(&ipc->recv_complete); 244 245 dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n", 246 msgs[i].cmd); 247 writel(msgs[i].cmd, isp->base + ipc->data0_out); 248 val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size; 249 writel(val, isp->base + ipc->db0_out); 250 251 tout = wait_for_completion_timeout(&ipc->send_complete, 252 tx_timeout_jiffies); 253 if (!tout) { 254 dev_err(&isp->pdev->dev, "send IPC response timeout\n"); 255 if (!retry--) { 256 ret = -ETIMEDOUT; 257 goto out; 258 } 259 260 /* Try again if CSE is not responding on first try */ 261 writel(0, isp->base + ipc->db0_out); 262 i--; 263 continue; 264 } 265 266 retry = BUTTRESS_IPC_CMD_SEND_RETRY; 267 268 if (!msgs[i].require_resp) 269 continue; 270 271 tout = wait_for_completion_timeout(&ipc->recv_complete, 272 rx_timeout_jiffies); 273 if (!tout) { 274 dev_err(&isp->pdev->dev, "recv IPC response timeout\n"); 275 ret = -ETIMEDOUT; 276 goto out; 277 } 278 279 if (ipc->nack_mask && 280 (ipc->recv_data & ipc->nack_mask) == ipc->nack) { 281 dev_err(&isp->pdev->dev, 282 "IPC NACK for cmd 0x%x\n", msgs[i].cmd); 283 ret = -EIO; 284 goto out; 285 } 286 287 if (ipc->recv_data != msgs[i].expected_resp) { 288 dev_err(&isp->pdev->dev, 289 "expected resp: 0x%x, IPC response: 0x%x ", 290 msgs[i].expected_resp, ipc->recv_data); 291 ret = -EIO; 292 goto out; 293 } 294 } 295 296 dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n"); 297 298 out: 299 ipu6_buttress_ipc_validity_close(isp, ipc); 300 mutex_unlock(&b->ipc_mutex); 301 return ret; 302 } 303 304 static int 305 ipu6_buttress_ipc_send(struct ipu6_device *isp, 306 u32 ipc_msg, u32 size, bool require_resp, 307 u32 expected_resp) 308 { 309 struct ipu6_ipc_buttress_bulk_msg msg = { 310 .cmd = ipc_msg, 311 .cmd_size = size, 312 .require_resp = require_resp, 313 .expected_resp = expected_resp, 314 }; 315 316 return ipu6_buttress_ipc_send_bulk(isp, &msg, 1); 317 } 318 319 static irqreturn_t ipu6_buttress_call_isr(struct ipu6_bus_device *adev) 320 { 321 irqreturn_t ret = IRQ_WAKE_THREAD; 322 323 if (!adev || !adev->auxdrv || !adev->auxdrv_data) 324 return IRQ_NONE; 325 326 if (adev->auxdrv_data->isr) 327 ret = adev->auxdrv_data->isr(adev); 328 329 if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded) 330 ret = IRQ_NONE; 331 332 return ret; 333 } 334 335 irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr) 336 { 337 struct ipu6_device *isp = isp_ptr; 338 struct ipu6_bus_device *adev[] = { isp->isys, isp->psys }; 339 struct ipu6_buttress *b = &isp->buttress; 340 u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS; 341 irqreturn_t ret = IRQ_NONE; 342 u32 disable_irqs = 0; 343 u32 irq_status; 344 u32 i, count = 0; 345 int active; 346 347 active = pm_runtime_get_if_active(&isp->pdev->dev); 348 if (!active) 349 return IRQ_NONE; 350 351 irq_status = readl(isp->base + reg_irq_sts); 352 if (irq_status == 0 || WARN_ON_ONCE(irq_status == 0xffffffffu)) { 353 if (active > 0) 354 pm_runtime_put_noidle(&isp->pdev->dev); 355 return IRQ_NONE; 356 } 357 358 do { 359 writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR); 360 361 for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask); i++) { 362 irqreturn_t r = ipu6_buttress_call_isr(adev[i]); 363 364 if (!(irq_status & ipu6_adev_irq_mask[i])) 365 continue; 366 367 if (r == IRQ_WAKE_THREAD) { 368 ret = IRQ_WAKE_THREAD; 369 disable_irqs |= ipu6_adev_irq_mask[i]; 370 } else if (ret == IRQ_NONE && r == IRQ_HANDLED) { 371 ret = IRQ_HANDLED; 372 } 373 } 374 375 if ((irq_status & BUTTRESS_EVENT) && ret == IRQ_NONE) 376 ret = IRQ_HANDLED; 377 378 if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) { 379 dev_dbg(&isp->pdev->dev, 380 "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n"); 381 ipu6_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data); 382 complete(&b->cse.recv_complete); 383 } 384 385 if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) { 386 dev_dbg(&isp->pdev->dev, 387 "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); 388 complete(&b->cse.send_complete); 389 } 390 391 if (irq_status & BUTTRESS_ISR_SAI_VIOLATION && 392 ipu6_buttress_get_secure_mode(isp)) 393 dev_err(&isp->pdev->dev, 394 "BUTTRESS_ISR_SAI_VIOLATION\n"); 395 396 if (irq_status & (BUTTRESS_ISR_IS_FATAL_MEM_ERR | 397 BUTTRESS_ISR_PS_FATAL_MEM_ERR)) 398 dev_err(&isp->pdev->dev, 399 "BUTTRESS_ISR_FATAL_MEM_ERR\n"); 400 401 if (irq_status & BUTTRESS_ISR_UFI_ERROR) 402 dev_err(&isp->pdev->dev, "BUTTRESS_ISR_UFI_ERROR\n"); 403 404 if (++count == BUTTRESS_MAX_CONSECUTIVE_IRQS) { 405 dev_err(&isp->pdev->dev, "too many consecutive IRQs\n"); 406 ret = IRQ_NONE; 407 break; 408 } 409 410 irq_status = readl(isp->base + reg_irq_sts); 411 } while (irq_status); 412 413 if (disable_irqs) 414 writel(BUTTRESS_IRQS & ~disable_irqs, 415 isp->base + BUTTRESS_REG_ISR_ENABLE); 416 417 if (active > 0) 418 pm_runtime_put(&isp->pdev->dev); 419 420 return ret; 421 } 422 423 irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr) 424 { 425 struct ipu6_device *isp = isp_ptr; 426 struct ipu6_bus_device *adev[] = { isp->isys, isp->psys }; 427 const struct ipu6_auxdrv_data *drv_data = NULL; 428 irqreturn_t ret = IRQ_NONE; 429 unsigned int i; 430 431 for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask) && adev[i]; i++) { 432 drv_data = adev[i]->auxdrv_data; 433 if (!drv_data) 434 continue; 435 436 if (drv_data->wake_isr_thread && 437 drv_data->isr_threaded(adev[i]) == IRQ_HANDLED) 438 ret = IRQ_HANDLED; 439 } 440 441 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); 442 443 return ret; 444 } 445 446 int ipu6_buttress_power(struct device *dev, 447 const struct ipu6_buttress_ctrl *ctrl, bool on) 448 { 449 struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp; 450 u32 pwr_sts, val; 451 int ret; 452 453 if (!ctrl) 454 return 0; 455 456 mutex_lock(&isp->buttress.power_mutex); 457 458 if (!on) { 459 val = 0; 460 pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift; 461 } else { 462 val = BUTTRESS_FREQ_CTL_START | 463 FIELD_PREP(BUTTRESS_FREQ_CTL_RATIO_MASK, 464 ctrl->ratio) | 465 FIELD_PREP(BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK, 466 ctrl->qos_floor) | 467 BUTTRESS_FREQ_CTL_ICCMAX_LEVEL; 468 469 pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift; 470 } 471 472 writel(val, isp->base + ctrl->freq_ctl); 473 474 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, 475 val, (val & ctrl->pwr_sts_mask) == pwr_sts, 476 100, BUTTRESS_POWER_TIMEOUT_US); 477 if (ret) 478 dev_err(&isp->pdev->dev, 479 "Change power status timeout with 0x%x\n", val); 480 481 mutex_unlock(&isp->buttress.power_mutex); 482 483 return ret; 484 } 485 486 bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp) 487 { 488 u32 val; 489 490 val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); 491 492 return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE; 493 } 494 495 bool ipu6_buttress_auth_done(struct ipu6_device *isp) 496 { 497 u32 val; 498 499 if (!isp->secure_mode) 500 return true; 501 502 val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); 503 val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val); 504 505 return val == BUTTRESS_SECURITY_CTL_AUTH_DONE; 506 } 507 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done, "INTEL_IPU6"); 508 509 int ipu6_buttress_reset_authentication(struct ipu6_device *isp) 510 { 511 int ret; 512 u32 val; 513 514 if (!isp->secure_mode) { 515 dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); 516 return 0; 517 } 518 519 writel(BUTTRESS_FW_RESET_CTL_START, isp->base + 520 BUTTRESS_REG_FW_RESET_CTL); 521 522 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val, 523 val & BUTTRESS_FW_RESET_CTL_DONE, 500, 524 BUTTRESS_CSE_FWRESET_TIMEOUT_US); 525 if (ret) { 526 dev_err(&isp->pdev->dev, 527 "Time out while resetting authentication state\n"); 528 return ret; 529 } 530 531 dev_dbg(&isp->pdev->dev, "FW reset for authentication done\n"); 532 writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL); 533 /* leave some time for HW restore */ 534 usleep_range(800, 1000); 535 536 return 0; 537 } 538 539 int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, 540 const struct firmware *fw, struct sg_table *sgt) 541 { 542 bool is_vmalloc = is_vmalloc_addr(fw->data); 543 struct pci_dev *pdev = sys->isp->pdev; 544 struct page **pages; 545 const void *addr; 546 unsigned long n_pages; 547 unsigned int i; 548 int ret; 549 550 if (!is_vmalloc && !virt_addr_valid(fw->data)) 551 return -EDOM; 552 553 n_pages = PFN_UP(fw->size); 554 555 pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); 556 if (!pages) 557 return -ENOMEM; 558 559 addr = fw->data; 560 for (i = 0; i < n_pages; i++) { 561 struct page *p = is_vmalloc ? 562 vmalloc_to_page(addr) : virt_to_page(addr); 563 564 if (!p) { 565 ret = -ENOMEM; 566 goto out; 567 } 568 pages[i] = p; 569 addr += PAGE_SIZE; 570 } 571 572 ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size, 573 GFP_KERNEL); 574 if (ret) { 575 ret = -ENOMEM; 576 goto out; 577 } 578 579 ret = dma_map_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); 580 if (ret) { 581 sg_free_table(sgt); 582 goto out; 583 } 584 585 ret = ipu6_dma_map_sgtable(sys, sgt, DMA_TO_DEVICE, 0); 586 if (ret) { 587 dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); 588 sg_free_table(sgt); 589 goto out; 590 } 591 592 ipu6_dma_sync_sgtable(sys, sgt); 593 594 out: 595 kfree(pages); 596 597 return ret; 598 } 599 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, "INTEL_IPU6"); 600 601 void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, 602 struct sg_table *sgt) 603 { 604 struct pci_dev *pdev = sys->isp->pdev; 605 606 ipu6_dma_unmap_sgtable(sys, sgt, DMA_TO_DEVICE, 0); 607 dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0); 608 sg_free_table(sgt); 609 } 610 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, "INTEL_IPU6"); 611 612 int ipu6_buttress_authenticate(struct ipu6_device *isp) 613 { 614 struct ipu6_buttress *b = &isp->buttress; 615 struct ipu6_psys_pdata *psys_pdata; 616 u32 data, mask, done, fail; 617 int ret; 618 619 if (!isp->secure_mode) { 620 dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); 621 return 0; 622 } 623 624 psys_pdata = isp->psys->pdata; 625 626 mutex_lock(&b->auth_mutex); 627 628 if (ipu6_buttress_auth_done(isp)) { 629 ret = 0; 630 goto out_unlock; 631 } 632 633 /* 634 * Write address of FIT table to FW_SOURCE register 635 * Let's use fw address. I.e. not using FIT table yet 636 */ 637 data = lower_32_bits(isp->psys->pkg_dir_dma_addr); 638 writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO); 639 640 data = upper_32_bits(isp->psys->pkg_dir_dma_addr); 641 writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI); 642 643 /* 644 * Write boot_load into IU2CSEDATA0 645 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to 646 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as 647 */ 648 dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n"); 649 650 ret = ipu6_buttress_ipc_send(isp, 651 BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, 652 1, true, 653 BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE); 654 if (ret) { 655 dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); 656 goto out_unlock; 657 } 658 659 mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; 660 done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE; 661 fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED; 662 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, 663 ((data & mask) == done || 664 (data & mask) == fail), 500, 665 BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); 666 if (ret) { 667 dev_err(&isp->pdev->dev, "CSE boot_load timeout\n"); 668 goto out_unlock; 669 } 670 671 if ((data & mask) == fail) { 672 dev_err(&isp->pdev->dev, "CSE auth failed\n"); 673 ret = -EINVAL; 674 goto out_unlock; 675 } 676 677 ret = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET, 678 data, data == BOOTLOADER_MAGIC_KEY, 500, 679 BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); 680 if (ret) { 681 dev_err(&isp->pdev->dev, "Unexpected magic number 0x%x\n", 682 data); 683 goto out_unlock; 684 } 685 686 /* 687 * Write authenticate_run into IU2CSEDATA0 688 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to 689 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as 690 */ 691 dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n"); 692 ret = ipu6_buttress_ipc_send(isp, 693 BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN, 694 1, true, 695 BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE); 696 if (ret) { 697 dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n"); 698 goto out_unlock; 699 } 700 701 done = BUTTRESS_SECURITY_CTL_AUTH_DONE; 702 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, 703 ((data & mask) == done || 704 (data & mask) == fail), 500, 705 BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US); 706 if (ret) { 707 dev_err(&isp->pdev->dev, "CSE authenticate timeout\n"); 708 goto out_unlock; 709 } 710 711 if ((data & mask) == fail) { 712 dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); 713 ret = -EINVAL; 714 goto out_unlock; 715 } 716 717 dev_info(&isp->pdev->dev, "CSE authenticate_run done\n"); 718 719 out_unlock: 720 mutex_unlock(&b->auth_mutex); 721 722 return ret; 723 } 724 725 static int ipu6_buttress_send_tsc_request(struct ipu6_device *isp) 726 { 727 u32 val, mask, done; 728 int ret; 729 730 mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK; 731 732 writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC, 733 isp->base + BUTTRESS_REG_FABRIC_CMD); 734 735 val = readl(isp->base + BUTTRESS_REG_PWR_STATE); 736 val = FIELD_GET(mask, val); 737 if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) { 738 dev_err(&isp->pdev->dev, "Start tsc sync failed\n"); 739 return -EINVAL; 740 } 741 742 done = BUTTRESS_PWR_STATE_HH_STATE_DONE; 743 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val, 744 FIELD_GET(mask, val) == done, 500, 745 BUTTRESS_TSC_SYNC_TIMEOUT_US); 746 if (ret) 747 dev_err(&isp->pdev->dev, "Start tsc sync timeout\n"); 748 749 return ret; 750 } 751 752 int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp) 753 { 754 unsigned int i; 755 756 for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { 757 u32 val; 758 int ret; 759 760 ret = ipu6_buttress_send_tsc_request(isp); 761 if (ret != -ETIMEDOUT) 762 return ret; 763 764 val = readl(isp->base + BUTTRESS_REG_TSW_CTL); 765 val = val | BUTTRESS_TSW_CTL_SOFT_RESET; 766 writel(val, isp->base + BUTTRESS_REG_TSW_CTL); 767 val = val & ~BUTTRESS_TSW_CTL_SOFT_RESET; 768 writel(val, isp->base + BUTTRESS_REG_TSW_CTL); 769 } 770 771 dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n"); 772 773 return -ETIMEDOUT; 774 } 775 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, "INTEL_IPU6"); 776 777 void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val) 778 { 779 u32 tsc_hi_1, tsc_hi_2, tsc_lo; 780 unsigned long flags; 781 782 local_irq_save(flags); 783 tsc_hi_1 = readl(isp->base + BUTTRESS_REG_TSC_HI); 784 tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO); 785 tsc_hi_2 = readl(isp->base + BUTTRESS_REG_TSC_HI); 786 if (tsc_hi_1 == tsc_hi_2) { 787 *val = (u64)tsc_hi_1 << 32 | tsc_lo; 788 } else { 789 /* Check if TSC has rolled over */ 790 if (tsc_lo & BIT(31)) 791 *val = (u64)tsc_hi_1 << 32 | tsc_lo; 792 else 793 *val = (u64)tsc_hi_2 << 32 | tsc_lo; 794 } 795 local_irq_restore(flags); 796 } 797 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read, "INTEL_IPU6"); 798 799 u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp) 800 { 801 u64 ns = ticks * 10000; 802 803 /* 804 * converting TSC tick count to ns is calculated by: 805 * Example (TSC clock frequency is 19.2MHz): 806 * ns = ticks * 1000 000 000 / 19.2Mhz 807 * = ticks * 1000 000 000 / 19200000Hz 808 * = ticks * 10000 / 192 ns 809 */ 810 return div_u64(ns, isp->buttress.ref_clk); 811 } 812 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns, "INTEL_IPU6"); 813 814 void ipu6_buttress_restore(struct ipu6_device *isp) 815 { 816 struct ipu6_buttress *b = &isp->buttress; 817 818 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); 819 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); 820 writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT); 821 } 822 823 int ipu6_buttress_init(struct ipu6_device *isp) 824 { 825 int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY; 826 struct ipu6_buttress *b = &isp->buttress; 827 u32 val; 828 829 mutex_init(&b->power_mutex); 830 mutex_init(&b->auth_mutex); 831 mutex_init(&b->cons_mutex); 832 mutex_init(&b->ipc_mutex); 833 init_completion(&b->cse.send_complete); 834 init_completion(&b->cse.recv_complete); 835 836 b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; 837 b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK; 838 b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR; 839 b->cse.csr_out = BUTTRESS_REG_IU2CSECSR; 840 b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0; 841 b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0; 842 b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; 843 b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; 844 845 INIT_LIST_HEAD(&b->constraints); 846 847 isp->secure_mode = ipu6_buttress_get_secure_mode(isp); 848 dev_dbg(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n", 849 isp->secure_mode ? "secure" : "non-secure", 850 readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH), 851 readl(isp->base + BUTTRESS_REG_CAMERA_MASK)); 852 853 b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT); 854 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); 855 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); 856 857 /* get ref_clk frequency by reading the indication in btrs control */ 858 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); 859 val = FIELD_GET(BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND, val); 860 861 switch (val) { 862 case 0x0: 863 b->ref_clk = 240; 864 break; 865 case 0x1: 866 b->ref_clk = 192; 867 break; 868 case 0x2: 869 b->ref_clk = 384; 870 break; 871 default: 872 dev_warn(&isp->pdev->dev, 873 "Unsupported ref clock, use 19.2Mhz by default.\n"); 874 b->ref_clk = 192; 875 break; 876 } 877 878 /* Retry couple of times in case of CSE initialization is delayed */ 879 do { 880 ret = ipu6_buttress_ipc_reset(isp, &b->cse); 881 if (ret) { 882 dev_warn(&isp->pdev->dev, 883 "IPC reset protocol failed, retrying\n"); 884 } else { 885 dev_dbg(&isp->pdev->dev, "IPC reset done\n"); 886 return 0; 887 } 888 } while (ipc_reset_retry--); 889 890 dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); 891 892 mutex_destroy(&b->power_mutex); 893 mutex_destroy(&b->auth_mutex); 894 mutex_destroy(&b->cons_mutex); 895 mutex_destroy(&b->ipc_mutex); 896 897 return ret; 898 } 899 900 void ipu6_buttress_exit(struct ipu6_device *isp) 901 { 902 struct ipu6_buttress *b = &isp->buttress; 903 904 writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE); 905 906 mutex_destroy(&b->power_mutex); 907 mutex_destroy(&b->auth_mutex); 908 mutex_destroy(&b->cons_mutex); 909 mutex_destroy(&b->ipc_mutex); 910 } 911