1 /* 2 * ARM debug helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "cpu.h" 11 #include "internals.h" 12 #include "cpu-features.h" 13 #include "cpregs.h" 14 #include "exec/exec-all.h" 15 #include "exec/helper-proto.h" 16 #include "exec/watchpoint.h" 17 #include "system/tcg.h" 18 19 #ifdef CONFIG_TCG 20 /* Return the Exception Level targeted by debug exceptions. */ 21 static int arm_debug_target_el(CPUARMState *env) 22 { 23 bool secure = arm_is_secure(env); 24 bool route_to_el2 = false; 25 26 if (arm_feature(env, ARM_FEATURE_M)) { 27 return 1; 28 } 29 30 if (arm_is_el2_enabled(env)) { 31 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || 32 env->cp15.mdcr_el2 & MDCR_TDE; 33 } 34 35 if (route_to_el2) { 36 return 2; 37 } else if (arm_feature(env, ARM_FEATURE_EL3) && 38 !arm_el_is_aa64(env, 3) && secure) { 39 return 3; 40 } else { 41 return 1; 42 } 43 } 44 45 /* 46 * Raise an exception to the debug target el. 47 * Modify syndrome to indicate when origin and target EL are the same. 48 */ 49 G_NORETURN static void 50 raise_exception_debug(CPUARMState *env, uint32_t excp, uint32_t syndrome) 51 { 52 int debug_el = arm_debug_target_el(env); 53 int cur_el = arm_current_el(env); 54 55 /* 56 * If singlestep is targeting a lower EL than the current one, then 57 * DisasContext.ss_active must be false and we can never get here. 58 * Similarly for watchpoint and breakpoint matches. 59 */ 60 assert(debug_el >= cur_el); 61 syndrome |= (debug_el == cur_el) << ARM_EL_EC_SHIFT; 62 raise_exception(env, excp, syndrome, debug_el); 63 } 64 65 /* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */ 66 static bool aa64_generate_debug_exceptions(CPUARMState *env) 67 { 68 int cur_el = arm_current_el(env); 69 int debug_el; 70 71 if (cur_el == 3) { 72 return false; 73 } 74 75 /* MDCR_EL3.SDD disables debug events from Secure state */ 76 if (arm_is_secure_below_el3(env) 77 && extract32(env->cp15.mdcr_el3, 16, 1)) { 78 return false; 79 } 80 81 /* 82 * Same EL to same EL debug exceptions need MDSCR_KDE enabled 83 * while not masking the (D)ebug bit in DAIF. 84 */ 85 debug_el = arm_debug_target_el(env); 86 87 if (cur_el == debug_el) { 88 return extract32(env->cp15.mdscr_el1, 13, 1) 89 && !(env->daif & PSTATE_D); 90 } 91 92 /* Otherwise the debug target needs to be a higher EL */ 93 return debug_el > cur_el; 94 } 95 96 static bool aa32_generate_debug_exceptions(CPUARMState *env) 97 { 98 int el = arm_current_el(env); 99 100 if (el == 0 && arm_el_is_aa64(env, 1)) { 101 return aa64_generate_debug_exceptions(env); 102 } 103 104 if (arm_is_secure(env)) { 105 int spd; 106 107 if (el == 0 && (env->cp15.sder & 1)) { 108 /* 109 * SDER.SUIDEN means debug exceptions from Secure EL0 110 * are always enabled. Otherwise they are controlled by 111 * SDCR.SPD like those from other Secure ELs. 112 */ 113 return true; 114 } 115 116 spd = extract32(env->cp15.mdcr_el3, 14, 2); 117 switch (spd) { 118 case 1: 119 /* SPD == 0b01 is reserved, but behaves as 0b00. */ 120 case 0: 121 /* 122 * For 0b00 we return true if external secure invasive debug 123 * is enabled. On real hardware this is controlled by external 124 * signals to the core. QEMU always permits debug, and behaves 125 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. 126 */ 127 return true; 128 case 2: 129 return false; 130 case 3: 131 return true; 132 } 133 } 134 135 return el != 2; 136 } 137 138 /* 139 * Return true if debugging exceptions are currently enabled. 140 * This corresponds to what in ARM ARM pseudocode would be 141 * if UsingAArch32() then 142 * return AArch32.GenerateDebugExceptions() 143 * else 144 * return AArch64.GenerateDebugExceptions() 145 * We choose to push the if() down into this function for clarity, 146 * since the pseudocode has it at all callsites except for the one in 147 * CheckSoftwareStep(), where it is elided because both branches would 148 * always return the same value. 149 */ 150 bool arm_generate_debug_exceptions(CPUARMState *env) 151 { 152 if ((env->cp15.oslsr_el1 & 1) || (env->cp15.osdlr_el1 & 1)) { 153 return false; 154 } 155 if (is_a64(env)) { 156 return aa64_generate_debug_exceptions(env); 157 } else { 158 return aa32_generate_debug_exceptions(env); 159 } 160 } 161 162 /* 163 * Is single-stepping active? (Note that the "is EL_D AArch64?" check 164 * implicitly means this always returns false in pre-v8 CPUs.) 165 */ 166 bool arm_singlestep_active(CPUARMState *env) 167 { 168 return extract32(env->cp15.mdscr_el1, 0, 1) 169 && arm_el_is_aa64(env, arm_debug_target_el(env)) 170 && arm_generate_debug_exceptions(env); 171 } 172 173 /* Return true if the linked breakpoint entry lbn passes its checks */ 174 static bool linked_bp_matches(ARMCPU *cpu, int lbn) 175 { 176 CPUARMState *env = &cpu->env; 177 uint64_t bcr = env->cp15.dbgbcr[lbn]; 178 int brps = arm_num_brps(cpu); 179 int ctx_cmps = arm_num_ctx_cmps(cpu); 180 int bt; 181 uint32_t contextidr; 182 uint64_t hcr_el2; 183 184 /* 185 * Links to unimplemented or non-context aware breakpoints are 186 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or 187 * as if linked to an UNKNOWN context-aware breakpoint (in which 188 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint). 189 * We choose the former. 190 */ 191 if (lbn >= brps || lbn < (brps - ctx_cmps)) { 192 return false; 193 } 194 195 bcr = env->cp15.dbgbcr[lbn]; 196 197 if (extract64(bcr, 0, 1) == 0) { 198 /* Linked breakpoint disabled : generate no events */ 199 return false; 200 } 201 202 bt = extract64(bcr, 20, 4); 203 hcr_el2 = arm_hcr_el2_eff(env); 204 205 switch (bt) { 206 case 3: /* linked context ID match */ 207 switch (arm_current_el(env)) { 208 default: 209 /* Context matches never fire in AArch64 EL3 */ 210 return false; 211 case 2: 212 if (!(hcr_el2 & HCR_E2H)) { 213 /* Context matches never fire in EL2 without E2H enabled. */ 214 return false; 215 } 216 contextidr = env->cp15.contextidr_el[2]; 217 break; 218 case 1: 219 contextidr = env->cp15.contextidr_el[1]; 220 break; 221 case 0: 222 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 223 contextidr = env->cp15.contextidr_el[2]; 224 } else { 225 contextidr = env->cp15.contextidr_el[1]; 226 } 227 break; 228 } 229 break; 230 231 case 7: /* linked contextidr_el1 match */ 232 contextidr = env->cp15.contextidr_el[1]; 233 break; 234 case 13: /* linked contextidr_el2 match */ 235 contextidr = env->cp15.contextidr_el[2]; 236 break; 237 238 case 9: /* linked VMID match (reserved if no EL2) */ 239 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 240 case 15: /* linked full context ID match */ 241 default: 242 /* 243 * Links to Unlinked context breakpoints must generate no 244 * events; we choose to do the same for reserved values too. 245 */ 246 return false; 247 } 248 249 /* 250 * We match the whole register even if this is AArch32 using the 251 * short descriptor format (in which case it holds both PROCID and ASID), 252 * since we don't implement the optional v7 context ID masking. 253 */ 254 return contextidr == (uint32_t)env->cp15.dbgbvr[lbn]; 255 } 256 257 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) 258 { 259 CPUARMState *env = &cpu->env; 260 uint64_t cr; 261 int pac, hmc, ssc, wt, lbn; 262 /* 263 * Note that for watchpoints the check is against the CPU security 264 * state, not the S/NS attribute on the offending data access. 265 */ 266 bool is_secure = arm_is_secure(env); 267 int access_el = arm_current_el(env); 268 269 if (is_wp) { 270 CPUWatchpoint *wp = env->cpu_watchpoint[n]; 271 272 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { 273 return false; 274 } 275 cr = env->cp15.dbgwcr[n]; 276 if (wp->hitattrs.user) { 277 /* 278 * The LDRT/STRT/LDT/STT "unprivileged access" instructions should 279 * match watchpoints as if they were accesses done at EL0, even if 280 * the CPU is at EL1 or higher. 281 */ 282 access_el = 0; 283 } 284 } else { 285 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 286 287 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { 288 return false; 289 } 290 cr = env->cp15.dbgbcr[n]; 291 } 292 /* 293 * The WATCHPOINT_HIT flag guarantees us that the watchpoint is 294 * enabled and that the address and access type match; for breakpoints 295 * we know the address matched; check the remaining fields, including 296 * linked breakpoints. We rely on WCR and BCR having the same layout 297 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. 298 * Note that some combinations of {PAC, HMC, SSC} are reserved and 299 * must act either like some valid combination or as if the watchpoint 300 * were disabled. We choose the former, and use this together with 301 * the fact that EL3 must always be Secure and EL2 must always be 302 * Non-Secure to simplify the code slightly compared to the full 303 * table in the ARM ARM. 304 */ 305 pac = FIELD_EX64(cr, DBGWCR, PAC); 306 hmc = FIELD_EX64(cr, DBGWCR, HMC); 307 ssc = FIELD_EX64(cr, DBGWCR, SSC); 308 309 switch (ssc) { 310 case 0: 311 break; 312 case 1: 313 case 3: 314 if (is_secure) { 315 return false; 316 } 317 break; 318 case 2: 319 if (!is_secure) { 320 return false; 321 } 322 break; 323 } 324 325 switch (access_el) { 326 case 3: 327 case 2: 328 if (!hmc) { 329 return false; 330 } 331 break; 332 case 1: 333 if (extract32(pac, 0, 1) == 0) { 334 return false; 335 } 336 break; 337 case 0: 338 if (extract32(pac, 1, 1) == 0) { 339 return false; 340 } 341 break; 342 default: 343 g_assert_not_reached(); 344 } 345 346 wt = FIELD_EX64(cr, DBGWCR, WT); 347 lbn = FIELD_EX64(cr, DBGWCR, LBN); 348 349 if (wt && !linked_bp_matches(cpu, lbn)) { 350 return false; 351 } 352 353 return true; 354 } 355 356 static bool check_watchpoints(ARMCPU *cpu) 357 { 358 CPUARMState *env = &cpu->env; 359 int n; 360 361 /* 362 * If watchpoints are disabled globally or we can't take debug 363 * exceptions here then watchpoint firings are ignored. 364 */ 365 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 366 || !arm_generate_debug_exceptions(env)) { 367 return false; 368 } 369 370 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { 371 if (bp_wp_matches(cpu, n, true)) { 372 return true; 373 } 374 } 375 return false; 376 } 377 378 bool arm_debug_check_breakpoint(CPUState *cs) 379 { 380 ARMCPU *cpu = ARM_CPU(cs); 381 CPUARMState *env = &cpu->env; 382 target_ulong pc; 383 int n; 384 385 /* 386 * If breakpoints are disabled globally or we can't take debug 387 * exceptions here then breakpoint firings are ignored. 388 */ 389 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 390 || !arm_generate_debug_exceptions(env)) { 391 return false; 392 } 393 394 /* 395 * Single-step exceptions have priority over breakpoint exceptions. 396 * If single-step state is active-pending, suppress the bp. 397 */ 398 if (arm_singlestep_active(env) && !(env->pstate & PSTATE_SS)) { 399 return false; 400 } 401 402 /* 403 * PC alignment faults have priority over breakpoint exceptions. 404 */ 405 pc = is_a64(env) ? env->pc : env->regs[15]; 406 if ((is_a64(env) || !env->thumb) && (pc & 3) != 0) { 407 return false; 408 } 409 410 /* 411 * Instruction aborts have priority over breakpoint exceptions. 412 * TODO: We would need to look up the page for PC and verify that 413 * it is present and executable. 414 */ 415 416 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { 417 if (bp_wp_matches(cpu, n, false)) { 418 return true; 419 } 420 } 421 return false; 422 } 423 424 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) 425 { 426 /* 427 * Called by core code when a CPU watchpoint fires; need to check if this 428 * is also an architectural watchpoint match. 429 */ 430 ARMCPU *cpu = ARM_CPU(cs); 431 432 return check_watchpoints(cpu); 433 } 434 435 /* 436 * Return the FSR value for a debug exception (watchpoint, hardware 437 * breakpoint or BKPT insn) targeting the specified exception level. 438 */ 439 static uint32_t arm_debug_exception_fsr(CPUARMState *env) 440 { 441 ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; 442 int target_el = arm_debug_target_el(env); 443 bool using_lpae; 444 445 if (arm_feature(env, ARM_FEATURE_M)) { 446 using_lpae = false; 447 } else if (target_el == 2 || arm_el_is_aa64(env, target_el)) { 448 using_lpae = true; 449 } else if (arm_feature(env, ARM_FEATURE_PMSA) && 450 arm_feature(env, ARM_FEATURE_V8)) { 451 using_lpae = true; 452 } else if (arm_feature(env, ARM_FEATURE_LPAE) && 453 (env->cp15.tcr_el[target_el] & TTBCR_EAE)) { 454 using_lpae = true; 455 } else { 456 using_lpae = false; 457 } 458 459 if (using_lpae) { 460 return arm_fi_to_lfsc(&fi); 461 } else { 462 return arm_fi_to_sfsc(&fi); 463 } 464 } 465 466 void arm_debug_excp_handler(CPUState *cs) 467 { 468 /* 469 * Called by core code when a watchpoint or breakpoint fires; 470 * need to check which one and raise the appropriate exception. 471 */ 472 ARMCPU *cpu = ARM_CPU(cs); 473 CPUARMState *env = &cpu->env; 474 CPUWatchpoint *wp_hit = cs->watchpoint_hit; 475 476 if (wp_hit) { 477 if (wp_hit->flags & BP_CPU) { 478 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; 479 480 cs->watchpoint_hit = NULL; 481 482 env->exception.fsr = arm_debug_exception_fsr(env); 483 env->exception.vaddress = wp_hit->hitaddr; 484 raise_exception_debug(env, EXCP_DATA_ABORT, 485 syn_watchpoint(0, 0, wnr)); 486 } 487 } else { 488 uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; 489 490 /* 491 * (1) GDB breakpoints should be handled first. 492 * (2) Do not raise a CPU exception if no CPU breakpoint has fired, 493 * since singlestep is also done by generating a debug internal 494 * exception. 495 */ 496 if (cpu_breakpoint_test(cs, pc, BP_GDB) 497 || !cpu_breakpoint_test(cs, pc, BP_CPU)) { 498 return; 499 } 500 501 env->exception.fsr = arm_debug_exception_fsr(env); 502 /* 503 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 504 * values to the guest that it shouldn't be able to see at its 505 * exception/security level. 506 */ 507 env->exception.vaddress = 0; 508 raise_exception_debug(env, EXCP_PREFETCH_ABORT, syn_breakpoint(0)); 509 } 510 } 511 512 /* 513 * Raise an EXCP_BKPT with the specified syndrome register value, 514 * targeting the correct exception level for debug exceptions. 515 */ 516 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) 517 { 518 int debug_el = arm_debug_target_el(env); 519 int cur_el = arm_current_el(env); 520 521 /* FSR will only be used if the debug target EL is AArch32. */ 522 env->exception.fsr = arm_debug_exception_fsr(env); 523 /* 524 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing 525 * values to the guest that it shouldn't be able to see at its 526 * exception/security level. 527 */ 528 env->exception.vaddress = 0; 529 /* 530 * Other kinds of architectural debug exception are ignored if 531 * they target an exception level below the current one (in QEMU 532 * this is checked by arm_generate_debug_exceptions()). Breakpoint 533 * instructions are special because they always generate an exception 534 * to somewhere: if they can't go to the configured debug exception 535 * level they are taken to the current exception level. 536 */ 537 if (debug_el < cur_el) { 538 debug_el = cur_el; 539 } 540 raise_exception(env, EXCP_BKPT, syndrome, debug_el); 541 } 542 543 void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome) 544 { 545 raise_exception_debug(env, EXCP_UDEF, syndrome); 546 } 547 548 void hw_watchpoint_update(ARMCPU *cpu, int n) 549 { 550 CPUARMState *env = &cpu->env; 551 vaddr len = 0; 552 vaddr wvr = env->cp15.dbgwvr[n]; 553 uint64_t wcr = env->cp15.dbgwcr[n]; 554 int mask; 555 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 556 557 if (env->cpu_watchpoint[n]) { 558 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 559 env->cpu_watchpoint[n] = NULL; 560 } 561 562 if (!FIELD_EX64(wcr, DBGWCR, E)) { 563 /* E bit clear : watchpoint disabled */ 564 return; 565 } 566 567 switch (FIELD_EX64(wcr, DBGWCR, LSC)) { 568 case 0: 569 /* LSC 00 is reserved and must behave as if the wp is disabled */ 570 return; 571 case 1: 572 flags |= BP_MEM_READ; 573 break; 574 case 2: 575 flags |= BP_MEM_WRITE; 576 break; 577 case 3: 578 flags |= BP_MEM_ACCESS; 579 break; 580 } 581 582 /* 583 * Attempts to use both MASK and BAS fields simultaneously are 584 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 585 * thus generating a watchpoint for every byte in the masked region. 586 */ 587 mask = FIELD_EX64(wcr, DBGWCR, MASK); 588 if (mask == 1 || mask == 2) { 589 /* 590 * Reserved values of MASK; we must act as if the mask value was 591 * some non-reserved value, or as if the watchpoint were disabled. 592 * We choose the latter. 593 */ 594 return; 595 } else if (mask) { 596 /* Watchpoint covers an aligned area up to 2GB in size */ 597 len = 1ULL << mask; 598 /* 599 * If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 600 * whether the watchpoint fires when the unmasked bits match; we opt 601 * to generate the exceptions. 602 */ 603 wvr &= ~(len - 1); 604 } else { 605 /* Watchpoint covers bytes defined by the byte address select bits */ 606 int bas = FIELD_EX64(wcr, DBGWCR, BAS); 607 int basstart; 608 609 if (extract64(wvr, 2, 1)) { 610 /* 611 * Deprecated case of an only 4-aligned address. BAS[7:4] are 612 * ignored, and BAS[3:0] define which bytes to watch. 613 */ 614 bas &= 0xf; 615 } 616 617 if (bas == 0) { 618 /* This must act as if the watchpoint is disabled */ 619 return; 620 } 621 622 /* 623 * The BAS bits are supposed to be programmed to indicate a contiguous 624 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 625 * we fire for each byte in the word/doubleword addressed by the WVR. 626 * We choose to ignore any non-zero bits after the first range of 1s. 627 */ 628 basstart = ctz32(bas); 629 len = cto32(bas >> basstart); 630 wvr += basstart; 631 } 632 633 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 634 &env->cpu_watchpoint[n]); 635 } 636 637 void hw_watchpoint_update_all(ARMCPU *cpu) 638 { 639 int i; 640 CPUARMState *env = &cpu->env; 641 642 /* 643 * Completely clear out existing QEMU watchpoints and our array, to 644 * avoid possible stale entries following migration load. 645 */ 646 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 647 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 648 649 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 650 hw_watchpoint_update(cpu, i); 651 } 652 } 653 654 void hw_breakpoint_update(ARMCPU *cpu, int n) 655 { 656 CPUARMState *env = &cpu->env; 657 uint64_t bvr = env->cp15.dbgbvr[n]; 658 uint64_t bcr = env->cp15.dbgbcr[n]; 659 vaddr addr; 660 int bt; 661 int flags = BP_CPU; 662 663 if (env->cpu_breakpoint[n]) { 664 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 665 env->cpu_breakpoint[n] = NULL; 666 } 667 668 if (!extract64(bcr, 0, 1)) { 669 /* E bit clear : watchpoint disabled */ 670 return; 671 } 672 673 bt = extract64(bcr, 20, 4); 674 675 switch (bt) { 676 case 4: /* unlinked address mismatch (reserved if AArch64) */ 677 case 5: /* linked address mismatch (reserved if AArch64) */ 678 qemu_log_mask(LOG_UNIMP, 679 "arm: address mismatch breakpoint types not implemented\n"); 680 return; 681 case 0: /* unlinked address match */ 682 case 1: /* linked address match */ 683 { 684 /* 685 * Bits [1:0] are RES0. 686 * 687 * It is IMPLEMENTATION DEFINED whether bits [63:49] 688 * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit 689 * of the VA field ([48] or [52] for FEAT_LVA), or whether the 690 * value is read as written. It is CONSTRAINED UNPREDICTABLE 691 * whether the RESS bits are ignored when comparing an address. 692 * Therefore we are allowed to compare the entire register, which 693 * lets us avoid considering whether FEAT_LVA is actually enabled. 694 * 695 * The BAS field is used to allow setting breakpoints on 16-bit 696 * wide instructions; it is CONSTRAINED UNPREDICTABLE whether 697 * a bp will fire if the addresses covered by the bp and the addresses 698 * covered by the insn overlap but the insn doesn't start at the 699 * start of the bp address range. We choose to require the insn and 700 * the bp to have the same address. The constraints on writing to 701 * BAS enforced in dbgbcr_write mean we have only four cases: 702 * 0b0000 => no breakpoint 703 * 0b0011 => breakpoint on addr 704 * 0b1100 => breakpoint on addr + 2 705 * 0b1111 => breakpoint on addr 706 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 707 */ 708 int bas = extract64(bcr, 5, 4); 709 addr = bvr & ~3ULL; 710 if (bas == 0) { 711 return; 712 } 713 if (bas == 0xc) { 714 addr += 2; 715 } 716 break; 717 } 718 case 2: /* unlinked context ID match */ 719 case 8: /* unlinked VMID match (reserved if no EL2) */ 720 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 721 qemu_log_mask(LOG_UNIMP, 722 "arm: unlinked context breakpoint types not implemented\n"); 723 return; 724 case 9: /* linked VMID match (reserved if no EL2) */ 725 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 726 case 3: /* linked context ID match */ 727 default: 728 /* 729 * We must generate no events for Linked context matches (unless 730 * they are linked to by some other bp/wp, which is handled in 731 * updates for the linking bp/wp). We choose to also generate no events 732 * for reserved values. 733 */ 734 return; 735 } 736 737 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 738 } 739 740 void hw_breakpoint_update_all(ARMCPU *cpu) 741 { 742 int i; 743 CPUARMState *env = &cpu->env; 744 745 /* 746 * Completely clear out existing QEMU breakpoints and our array, to 747 * avoid possible stale entries following migration load. 748 */ 749 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 750 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 751 752 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 753 hw_breakpoint_update(cpu, i); 754 } 755 } 756 757 #if !defined(CONFIG_USER_ONLY) 758 759 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len) 760 { 761 ARMCPU *cpu = ARM_CPU(cs); 762 CPUARMState *env = &cpu->env; 763 764 /* 765 * In BE32 system mode, target memory is stored byteswapped (on a 766 * little-endian host system), and by the time we reach here (via an 767 * opcode helper) the addresses of subword accesses have been adjusted 768 * to account for that, which means that watchpoints will not match. 769 * Undo the adjustment here. 770 */ 771 if (arm_sctlr_b(env)) { 772 if (len == 1) { 773 addr ^= 3; 774 } else if (len == 2) { 775 addr ^= 2; 776 } 777 } 778 779 return addr; 780 } 781 782 #endif /* !CONFIG_USER_ONLY */ 783 #endif /* CONFIG_TCG */ 784 785 /* 786 * Check for traps to "powerdown debug" registers, which are controlled 787 * by MDCR.TDOSA 788 */ 789 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 790 bool isread) 791 { 792 int el = arm_current_el(env); 793 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 794 bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || 795 (arm_hcr_el2_eff(env) & HCR_TGE); 796 797 if (el < 2 && mdcr_el2_tdosa) { 798 return CP_ACCESS_TRAP_EL2; 799 } 800 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 801 return CP_ACCESS_TRAP_EL3; 802 } 803 return CP_ACCESS_OK; 804 } 805 806 /* 807 * Check for traps to "debug ROM" registers, which are controlled 808 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 809 */ 810 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 811 bool isread) 812 { 813 int el = arm_current_el(env); 814 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 815 bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || 816 (arm_hcr_el2_eff(env) & HCR_TGE); 817 818 if (el < 2 && mdcr_el2_tdra) { 819 return CP_ACCESS_TRAP_EL2; 820 } 821 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 822 return CP_ACCESS_TRAP_EL3; 823 } 824 return CP_ACCESS_OK; 825 } 826 827 /* 828 * Check for traps to general debug registers, which are controlled 829 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 830 */ 831 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 832 bool isread) 833 { 834 int el = arm_current_el(env); 835 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 836 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 837 (arm_hcr_el2_eff(env) & HCR_TGE); 838 839 if (el < 2 && mdcr_el2_tda) { 840 return CP_ACCESS_TRAP_EL2; 841 } 842 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 843 return CP_ACCESS_TRAP_EL3; 844 } 845 return CP_ACCESS_OK; 846 } 847 848 static CPAccessResult access_dbgvcr32(CPUARMState *env, const ARMCPRegInfo *ri, 849 bool isread) 850 { 851 /* MCDR_EL3.TDMA doesn't apply for FEAT_NV traps */ 852 if (arm_current_el(env) == 2 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 853 return CP_ACCESS_TRAP_EL3; 854 } 855 return CP_ACCESS_OK; 856 } 857 858 /* 859 * Check for traps to Debug Comms Channel registers. If FEAT_FGT 860 * is implemented then these are controlled by MDCR_EL2.TDCC for 861 * EL2 and MDCR_EL3.TDCC for EL3. They are also controlled by 862 * the general debug access trap bits MDCR_EL2.TDA and MDCR_EL3.TDA. 863 * For EL0, they are also controlled by MDSCR_EL1.TDCC. 864 */ 865 static CPAccessResult access_tdcc(CPUARMState *env, const ARMCPRegInfo *ri, 866 bool isread) 867 { 868 int el = arm_current_el(env); 869 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 870 bool mdscr_el1_tdcc = extract32(env->cp15.mdscr_el1, 12, 1); 871 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 872 (arm_hcr_el2_eff(env) & HCR_TGE); 873 bool mdcr_el2_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 874 (mdcr_el2 & MDCR_TDCC); 875 bool mdcr_el3_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 876 (env->cp15.mdcr_el3 & MDCR_TDCC); 877 878 if (el < 1 && mdscr_el1_tdcc) { 879 return CP_ACCESS_TRAP_EL1; 880 } 881 if (el < 2 && (mdcr_el2_tda || mdcr_el2_tdcc)) { 882 return CP_ACCESS_TRAP_EL2; 883 } 884 if (!arm_is_el3_or_mon(env) && 885 ((env->cp15.mdcr_el3 & MDCR_TDA) || mdcr_el3_tdcc)) { 886 return CP_ACCESS_TRAP_EL3; 887 } 888 return CP_ACCESS_OK; 889 } 890 891 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 892 uint64_t value) 893 { 894 /* 895 * Writes to OSLAR_EL1 may update the OS lock status, which can be 896 * read via a bit in OSLSR_EL1. 897 */ 898 int oslock; 899 900 if (ri->state == ARM_CP_STATE_AA32) { 901 oslock = (value == 0xC5ACCE55); 902 } else { 903 oslock = value & 1; 904 } 905 906 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 907 } 908 909 static void osdlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 910 uint64_t value) 911 { 912 ARMCPU *cpu = env_archcpu(env); 913 /* 914 * Only defined bit is bit 0 (DLK); if Feat_DoubleLock is not 915 * implemented this is RAZ/WI. 916 */ 917 if(arm_feature(env, ARM_FEATURE_AARCH64) 918 ? cpu_isar_feature(aa64_doublelock, cpu) 919 : cpu_isar_feature(aa32_doublelock, cpu)) { 920 env->cp15.osdlr_el1 = value & 1; 921 } 922 } 923 924 static void dbgclaimset_write(CPUARMState *env, const ARMCPRegInfo *ri, 925 uint64_t value) 926 { 927 env->cp15.dbgclaim |= (value & 0xFF); 928 } 929 930 static uint64_t dbgclaimset_read(CPUARMState *env, const ARMCPRegInfo *ri) 931 { 932 /* CLAIM bits are RAO */ 933 return 0xFF; 934 } 935 936 static void dbgclaimclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 937 uint64_t value) 938 { 939 env->cp15.dbgclaim &= ~(value & 0xFF); 940 } 941 942 static const ARMCPRegInfo debug_cp_reginfo[] = { 943 /* 944 * DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 945 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 946 * unlike DBGDRAR it is never accessible from EL0. 947 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 948 * accessor. 949 */ 950 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 951 .access = PL0_R, .accessfn = access_tdra, 952 .type = ARM_CP_CONST | ARM_CP_NO_GDB, .resetvalue = 0 }, 953 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 954 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 955 .access = PL1_R, .accessfn = access_tdra, 956 .type = ARM_CP_CONST, .resetvalue = 0 }, 957 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 958 .access = PL0_R, .accessfn = access_tdra, 959 .type = ARM_CP_CONST | ARM_CP_NO_GDB, .resetvalue = 0 }, 960 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 961 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 962 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 963 .access = PL1_RW, .accessfn = access_tda, 964 .fgt = FGT_MDSCR_EL1, 965 .nv2_redirect_offset = 0x158, 966 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 967 .resetvalue = 0 }, 968 /* 969 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external 970 * Debug Communication Channel is not implemented. 971 */ 972 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, 973 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, 974 .access = PL0_R, .accessfn = access_tdcc, 975 .type = ARM_CP_CONST, .resetvalue = 0 }, 976 /* 977 * These registers belong to the Debug Communications Channel, 978 * which is not implemented. However we implement RAZ/WI behaviour 979 * with trapping to prevent spurious SIGILLs if the guest OS does 980 * access them as the support cannot be probed for. 981 */ 982 { .name = "OSDTRRX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 983 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 2, 984 .access = PL1_RW, .accessfn = access_tdcc, 985 .type = ARM_CP_CONST, .resetvalue = 0 }, 986 { .name = "OSDTRTX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 987 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 988 .access = PL1_RW, .accessfn = access_tdcc, 989 .type = ARM_CP_CONST, .resetvalue = 0 }, 990 /* DBGDTRTX_EL0/DBGDTRRX_EL0 depend on direction */ 991 { .name = "DBGDTR_EL0", .state = ARM_CP_STATE_BOTH, .cp = 14, 992 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0, 993 .access = PL0_RW, .accessfn = access_tdcc, 994 .type = ARM_CP_CONST, .resetvalue = 0 }, 995 /* 996 * OSECCR_EL1 provides a mechanism for an operating system 997 * to access the contents of EDECCR. EDECCR is not implemented though, 998 * as is the rest of external device mechanism. 999 */ 1000 { .name = "OSECCR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, 1001 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 1002 .access = PL1_RW, .accessfn = access_tda, 1003 .fgt = FGT_OSECCR_EL1, 1004 .type = ARM_CP_CONST, .resetvalue = 0 }, 1005 /* 1006 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as 1007 * it is unlikely a guest will care. 1008 * We don't implement the configurable EL0 access. 1009 */ 1010 { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, 1011 .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 1012 .type = ARM_CP_ALIAS, 1013 .access = PL1_R, .accessfn = access_tda, 1014 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 1015 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 1016 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 1017 .access = PL1_W, .type = ARM_CP_NO_RAW, 1018 .accessfn = access_tdosa, 1019 .fgt = FGT_OSLAR_EL1, 1020 .writefn = oslar_write }, 1021 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 1022 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 1023 .access = PL1_R, .resetvalue = 10, 1024 .accessfn = access_tdosa, 1025 .fgt = FGT_OSLSR_EL1, 1026 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 1027 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 1028 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 1029 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 1030 .access = PL1_RW, .accessfn = access_tdosa, 1031 .fgt = FGT_OSDLR_EL1, 1032 .writefn = osdlr_write, 1033 .fieldoffset = offsetof(CPUARMState, cp15.osdlr_el1) }, 1034 /* 1035 * Dummy DBGVCR: Linux wants to clear this on startup, but we don't 1036 * implement vector catch debug events yet. 1037 */ 1038 { .name = "DBGVCR", 1039 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 1040 .access = PL1_RW, .accessfn = access_tda, 1041 .type = ARM_CP_CONST, .resetvalue = 0 }, 1042 /* 1043 * Dummy MDCCINT_EL1, since we don't implement the Debug Communications 1044 * Channel but Linux may try to access this register. The 32-bit 1045 * alias is DBGDCCINT. 1046 */ 1047 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 1048 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 1049 .access = PL1_RW, .accessfn = access_tdcc, 1050 .type = ARM_CP_CONST, .resetvalue = 0 }, 1051 /* 1052 * Dummy DBGCLAIM registers. 1053 * "The architecture does not define any functionality for the CLAIM tag bits.", 1054 * so we only keep the raw bits 1055 */ 1056 { .name = "DBGCLAIMSET_EL1", .state = ARM_CP_STATE_BOTH, 1057 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 6, 1058 .type = ARM_CP_ALIAS, 1059 .access = PL1_RW, .accessfn = access_tda, 1060 .fgt = FGT_DBGCLAIM, 1061 .writefn = dbgclaimset_write, .readfn = dbgclaimset_read }, 1062 { .name = "DBGCLAIMCLR_EL1", .state = ARM_CP_STATE_BOTH, 1063 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 6, 1064 .access = PL1_RW, .accessfn = access_tda, 1065 .fgt = FGT_DBGCLAIM, 1066 .writefn = dbgclaimclr_write, .raw_writefn = raw_write, 1067 .fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) }, 1068 }; 1069 1070 /* These are present only when EL1 supports AArch32 */ 1071 static const ARMCPRegInfo debug_aa32_el1_reginfo[] = { 1072 /* 1073 * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 1074 * to save and restore a 32-bit guest's DBGVCR) 1075 */ 1076 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 1077 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 1078 .access = PL2_RW, .accessfn = access_dbgvcr32, 1079 .type = ARM_CP_CONST | ARM_CP_EL3_NO_EL2_KEEP, 1080 .resetvalue = 0 }, 1081 }; 1082 1083 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 1084 /* 64 bit access versions of the (dummy) debug registers */ 1085 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 1086 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT | ARM_CP_NO_GDB, 1087 .resetvalue = 0 }, 1088 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 1089 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT | ARM_CP_NO_GDB, 1090 .resetvalue = 0 }, 1091 }; 1092 1093 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1094 uint64_t value) 1095 { 1096 ARMCPU *cpu = env_archcpu(env); 1097 int i = ri->crm; 1098 1099 /* 1100 * Bits [1:0] are RES0. 1101 * 1102 * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA) 1103 * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if 1104 * they contain the value written. It is CONSTRAINED UNPREDICTABLE 1105 * whether the RESS bits are ignored when comparing an address. 1106 * 1107 * Therefore we are allowed to compare the entire register, which lets 1108 * us avoid considering whether or not FEAT_LVA is actually enabled. 1109 */ 1110 value &= ~3ULL; 1111 1112 raw_write(env, ri, value); 1113 if (tcg_enabled()) { 1114 hw_watchpoint_update(cpu, i); 1115 } 1116 } 1117 1118 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1119 uint64_t value) 1120 { 1121 ARMCPU *cpu = env_archcpu(env); 1122 int i = ri->crm; 1123 1124 raw_write(env, ri, value); 1125 if (tcg_enabled()) { 1126 hw_watchpoint_update(cpu, i); 1127 } 1128 } 1129 1130 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1131 uint64_t value) 1132 { 1133 ARMCPU *cpu = env_archcpu(env); 1134 int i = ri->crm; 1135 1136 raw_write(env, ri, value); 1137 if (tcg_enabled()) { 1138 hw_breakpoint_update(cpu, i); 1139 } 1140 } 1141 1142 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1143 uint64_t value) 1144 { 1145 ARMCPU *cpu = env_archcpu(env); 1146 int i = ri->crm; 1147 1148 /* 1149 * BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 1150 * copy of BAS[0]. 1151 */ 1152 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 1153 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 1154 1155 raw_write(env, ri, value); 1156 if (tcg_enabled()) { 1157 hw_breakpoint_update(cpu, i); 1158 } 1159 } 1160 1161 void define_debug_regs(ARMCPU *cpu) 1162 { 1163 /* 1164 * Define v7 and v8 architectural debug registers. 1165 * These are just dummy implementations for now. 1166 */ 1167 int i; 1168 int wrps, brps, ctx_cmps; 1169 1170 /* 1171 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot 1172 * use AArch32. Given that bit 15 is RES1, if the value is 0 then 1173 * the register must not exist for this cpu. 1174 */ 1175 if (cpu->isar.dbgdidr != 0) { 1176 ARMCPRegInfo dbgdidr = { 1177 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, 1178 .opc1 = 0, .opc2 = 0, 1179 .access = PL0_R, .accessfn = access_tda, 1180 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 1181 }; 1182 define_one_arm_cp_reg(cpu, &dbgdidr); 1183 } 1184 1185 /* 1186 * DBGDEVID is present in the v7 debug architecture if 1187 * DBGDIDR.DEVID_imp is 1 (bit 15); from v7.1 and on it is 1188 * mandatory (and bit 15 is RES1). DBGDEVID1 and DBGDEVID2 exist 1189 * from v7.1 of the debug architecture. Because no fields have yet 1190 * been defined in DBGDEVID2 (and quite possibly none will ever 1191 * be) we don't define an ARMISARegisters field for it. 1192 * These registers exist only if EL1 can use AArch32, but that 1193 * happens naturally because they are only PL1 accessible anyway. 1194 */ 1195 if (extract32(cpu->isar.dbgdidr, 15, 1)) { 1196 ARMCPRegInfo dbgdevid = { 1197 .name = "DBGDEVID", 1198 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 2, .crn = 7, 1199 .access = PL1_R, .accessfn = access_tda, 1200 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid, 1201 }; 1202 define_one_arm_cp_reg(cpu, &dbgdevid); 1203 } 1204 if (cpu_isar_feature(aa32_debugv7p1, cpu)) { 1205 ARMCPRegInfo dbgdevid12[] = { 1206 { 1207 .name = "DBGDEVID1", 1208 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 1, .crn = 7, 1209 .access = PL1_R, .accessfn = access_tda, 1210 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid1, 1211 }, { 1212 .name = "DBGDEVID2", 1213 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 0, .crn = 7, 1214 .access = PL1_R, .accessfn = access_tda, 1215 .type = ARM_CP_CONST, .resetvalue = 0, 1216 }, 1217 }; 1218 define_arm_cp_regs(cpu, dbgdevid12); 1219 } 1220 1221 brps = arm_num_brps(cpu); 1222 wrps = arm_num_wrps(cpu); 1223 ctx_cmps = arm_num_ctx_cmps(cpu); 1224 1225 assert(ctx_cmps <= brps); 1226 1227 define_arm_cp_regs(cpu, debug_cp_reginfo); 1228 if (cpu_isar_feature(aa64_aa32_el1, cpu)) { 1229 define_arm_cp_regs(cpu, debug_aa32_el1_reginfo); 1230 } 1231 1232 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 1233 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 1234 } 1235 1236 for (i = 0; i < brps; i++) { 1237 char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i); 1238 char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i); 1239 ARMCPRegInfo dbgregs[] = { 1240 { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH, 1241 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 1242 .access = PL1_RW, .accessfn = access_tda, 1243 .fgt = FGT_DBGBVRN_EL1, 1244 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 1245 .writefn = dbgbvr_write, .raw_writefn = raw_write 1246 }, 1247 { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH, 1248 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 1249 .access = PL1_RW, .accessfn = access_tda, 1250 .fgt = FGT_DBGBCRN_EL1, 1251 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 1252 .writefn = dbgbcr_write, .raw_writefn = raw_write 1253 }, 1254 }; 1255 define_arm_cp_regs(cpu, dbgregs); 1256 g_free(dbgbvr_el1_name); 1257 g_free(dbgbcr_el1_name); 1258 } 1259 1260 for (i = 0; i < wrps; i++) { 1261 char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i); 1262 char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i); 1263 ARMCPRegInfo dbgregs[] = { 1264 { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH, 1265 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 1266 .access = PL1_RW, .accessfn = access_tda, 1267 .fgt = FGT_DBGWVRN_EL1, 1268 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 1269 .writefn = dbgwvr_write, .raw_writefn = raw_write 1270 }, 1271 { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH, 1272 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 1273 .access = PL1_RW, .accessfn = access_tda, 1274 .fgt = FGT_DBGWCRN_EL1, 1275 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 1276 .writefn = dbgwcr_write, .raw_writefn = raw_write 1277 }, 1278 }; 1279 define_arm_cp_regs(cpu, dbgregs); 1280 g_free(dbgwvr_el1_name); 1281 g_free(dbgwcr_el1_name); 1282 } 1283 } 1284