1 /*
2 * ARM debug helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8 #include "qemu/osdep.h"
9 #include "qemu/log.h"
10 #include "cpu.h"
11 #include "internals.h"
12 #include "cpu-features.h"
13 #include "cpregs.h"
14 #include "exec/watchpoint.h"
15 #include "system/tcg.h"
16
17 #define HELPER_H "tcg/helper.h"
18 #include "exec/helper-proto.h.inc"
19
20 #ifdef CONFIG_TCG
21 /* Return the Exception Level targeted by debug exceptions. */
arm_debug_target_el(CPUARMState * env)22 static int arm_debug_target_el(CPUARMState *env)
23 {
24 bool secure = arm_is_secure(env);
25 bool route_to_el2 = false;
26
27 if (arm_feature(env, ARM_FEATURE_M)) {
28 return 1;
29 }
30
31 if (arm_is_el2_enabled(env)) {
32 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE ||
33 env->cp15.mdcr_el2 & MDCR_TDE;
34 }
35
36 if (route_to_el2) {
37 return 2;
38 } else if (arm_feature(env, ARM_FEATURE_EL3) &&
39 !arm_el_is_aa64(env, 3) && secure) {
40 return 3;
41 } else {
42 return 1;
43 }
44 }
45
46 /*
47 * Raise an exception to the debug target el.
48 * Modify syndrome to indicate when origin and target EL are the same.
49 */
50 G_NORETURN static void
raise_exception_debug(CPUARMState * env,uint32_t excp,uint32_t syndrome)51 raise_exception_debug(CPUARMState *env, uint32_t excp, uint32_t syndrome)
52 {
53 int debug_el = arm_debug_target_el(env);
54 int cur_el = arm_current_el(env);
55
56 /*
57 * If singlestep is targeting a lower EL than the current one, then
58 * DisasContext.ss_active must be false and we can never get here.
59 * Similarly for watchpoint and breakpoint matches.
60 */
61 assert(debug_el >= cur_el);
62 syndrome |= (debug_el == cur_el) << ARM_EL_EC_SHIFT;
63 raise_exception(env, excp, syndrome, debug_el);
64 }
65
66 /* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */
aa64_generate_debug_exceptions(CPUARMState * env)67 static bool aa64_generate_debug_exceptions(CPUARMState *env)
68 {
69 int cur_el = arm_current_el(env);
70 int debug_el;
71
72 if (cur_el == 3) {
73 return false;
74 }
75
76 /* MDCR_EL3.SDD disables debug events from Secure state */
77 if (arm_is_secure_below_el3(env)
78 && extract32(env->cp15.mdcr_el3, 16, 1)) {
79 return false;
80 }
81
82 /*
83 * Same EL to same EL debug exceptions need MDSCR_KDE enabled
84 * while not masking the (D)ebug bit in DAIF.
85 */
86 debug_el = arm_debug_target_el(env);
87
88 if (cur_el == debug_el) {
89 return extract32(env->cp15.mdscr_el1, 13, 1)
90 && !(env->daif & PSTATE_D);
91 }
92
93 /* Otherwise the debug target needs to be a higher EL */
94 return debug_el > cur_el;
95 }
96
aa32_generate_debug_exceptions(CPUARMState * env)97 static bool aa32_generate_debug_exceptions(CPUARMState *env)
98 {
99 int el = arm_current_el(env);
100
101 if (el == 0 && arm_el_is_aa64(env, 1)) {
102 return aa64_generate_debug_exceptions(env);
103 }
104
105 if (arm_is_secure(env)) {
106 int spd;
107
108 if (el == 0 && (env->cp15.sder & 1)) {
109 /*
110 * SDER.SUIDEN means debug exceptions from Secure EL0
111 * are always enabled. Otherwise they are controlled by
112 * SDCR.SPD like those from other Secure ELs.
113 */
114 return true;
115 }
116
117 spd = extract32(env->cp15.mdcr_el3, 14, 2);
118 switch (spd) {
119 case 1:
120 /* SPD == 0b01 is reserved, but behaves as 0b00. */
121 case 0:
122 /*
123 * For 0b00 we return true if external secure invasive debug
124 * is enabled. On real hardware this is controlled by external
125 * signals to the core. QEMU always permits debug, and behaves
126 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high.
127 */
128 return true;
129 case 2:
130 return false;
131 case 3:
132 return true;
133 }
134 }
135
136 return el != 2;
137 }
138
139 /*
140 * Return true if debugging exceptions are currently enabled.
141 * This corresponds to what in ARM ARM pseudocode would be
142 * if UsingAArch32() then
143 * return AArch32.GenerateDebugExceptions()
144 * else
145 * return AArch64.GenerateDebugExceptions()
146 * We choose to push the if() down into this function for clarity,
147 * since the pseudocode has it at all callsites except for the one in
148 * CheckSoftwareStep(), where it is elided because both branches would
149 * always return the same value.
150 */
arm_generate_debug_exceptions(CPUARMState * env)151 bool arm_generate_debug_exceptions(CPUARMState *env)
152 {
153 if ((env->cp15.oslsr_el1 & 1) || (env->cp15.osdlr_el1 & 1)) {
154 return false;
155 }
156 if (is_a64(env)) {
157 return aa64_generate_debug_exceptions(env);
158 } else {
159 return aa32_generate_debug_exceptions(env);
160 }
161 }
162
163 /*
164 * Is single-stepping active? (Note that the "is EL_D AArch64?" check
165 * implicitly means this always returns false in pre-v8 CPUs.)
166 */
arm_singlestep_active(CPUARMState * env)167 bool arm_singlestep_active(CPUARMState *env)
168 {
169 return extract32(env->cp15.mdscr_el1, 0, 1)
170 && arm_el_is_aa64(env, arm_debug_target_el(env))
171 && arm_generate_debug_exceptions(env);
172 }
173
174 /* Return true if the linked breakpoint entry lbn passes its checks */
linked_bp_matches(ARMCPU * cpu,int lbn)175 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
176 {
177 CPUARMState *env = &cpu->env;
178 uint64_t bcr = env->cp15.dbgbcr[lbn];
179 int brps = arm_num_brps(cpu);
180 int ctx_cmps = arm_num_ctx_cmps(cpu);
181 int bt;
182 uint32_t contextidr;
183 uint64_t hcr_el2;
184
185 /*
186 * Links to unimplemented or non-context aware breakpoints are
187 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
188 * as if linked to an UNKNOWN context-aware breakpoint (in which
189 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
190 * We choose the former.
191 */
192 if (lbn >= brps || lbn < (brps - ctx_cmps)) {
193 return false;
194 }
195
196 bcr = env->cp15.dbgbcr[lbn];
197
198 if (extract64(bcr, 0, 1) == 0) {
199 /* Linked breakpoint disabled : generate no events */
200 return false;
201 }
202
203 bt = extract64(bcr, 20, 4);
204 hcr_el2 = arm_hcr_el2_eff(env);
205
206 switch (bt) {
207 case 3: /* linked context ID match */
208 switch (arm_current_el(env)) {
209 default:
210 /* Context matches never fire in AArch64 EL3 */
211 return false;
212 case 2:
213 if (!(hcr_el2 & HCR_E2H)) {
214 /* Context matches never fire in EL2 without E2H enabled. */
215 return false;
216 }
217 contextidr = env->cp15.contextidr_el[2];
218 break;
219 case 1:
220 contextidr = env->cp15.contextidr_el[1];
221 break;
222 case 0:
223 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
224 contextidr = env->cp15.contextidr_el[2];
225 } else {
226 contextidr = env->cp15.contextidr_el[1];
227 }
228 break;
229 }
230 break;
231
232 case 7: /* linked contextidr_el1 match */
233 contextidr = env->cp15.contextidr_el[1];
234 break;
235 case 13: /* linked contextidr_el2 match */
236 contextidr = env->cp15.contextidr_el[2];
237 break;
238
239 case 9: /* linked VMID match (reserved if no EL2) */
240 case 11: /* linked context ID and VMID match (reserved if no EL2) */
241 case 15: /* linked full context ID match */
242 default:
243 /*
244 * Links to Unlinked context breakpoints must generate no
245 * events; we choose to do the same for reserved values too.
246 */
247 return false;
248 }
249
250 /*
251 * We match the whole register even if this is AArch32 using the
252 * short descriptor format (in which case it holds both PROCID and ASID),
253 * since we don't implement the optional v7 context ID masking.
254 */
255 return contextidr == (uint32_t)env->cp15.dbgbvr[lbn];
256 }
257
bp_wp_matches(ARMCPU * cpu,int n,bool is_wp)258 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
259 {
260 CPUARMState *env = &cpu->env;
261 uint64_t cr;
262 int pac, hmc, ssc, wt, lbn;
263 /*
264 * Note that for watchpoints the check is against the CPU security
265 * state, not the S/NS attribute on the offending data access.
266 */
267 bool is_secure = arm_is_secure(env);
268 int access_el = arm_current_el(env);
269
270 if (is_wp) {
271 CPUWatchpoint *wp = env->cpu_watchpoint[n];
272
273 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
274 return false;
275 }
276 cr = env->cp15.dbgwcr[n];
277 if (wp->hitattrs.user) {
278 /*
279 * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
280 * match watchpoints as if they were accesses done at EL0, even if
281 * the CPU is at EL1 or higher.
282 */
283 access_el = 0;
284 }
285 } else {
286 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
287
288 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
289 return false;
290 }
291 cr = env->cp15.dbgbcr[n];
292 }
293 /*
294 * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
295 * enabled and that the address and access type match; for breakpoints
296 * we know the address matched; check the remaining fields, including
297 * linked breakpoints. We rely on WCR and BCR having the same layout
298 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
299 * Note that some combinations of {PAC, HMC, SSC} are reserved and
300 * must act either like some valid combination or as if the watchpoint
301 * were disabled. We choose the former, and use this together with
302 * the fact that EL3 must always be Secure and EL2 must always be
303 * Non-Secure to simplify the code slightly compared to the full
304 * table in the ARM ARM.
305 */
306 pac = FIELD_EX64(cr, DBGWCR, PAC);
307 hmc = FIELD_EX64(cr, DBGWCR, HMC);
308 ssc = FIELD_EX64(cr, DBGWCR, SSC);
309
310 switch (ssc) {
311 case 0:
312 break;
313 case 1:
314 case 3:
315 if (is_secure) {
316 return false;
317 }
318 break;
319 case 2:
320 if (!is_secure) {
321 return false;
322 }
323 break;
324 }
325
326 switch (access_el) {
327 case 3:
328 case 2:
329 if (!hmc) {
330 return false;
331 }
332 break;
333 case 1:
334 if (extract32(pac, 0, 1) == 0) {
335 return false;
336 }
337 break;
338 case 0:
339 if (extract32(pac, 1, 1) == 0) {
340 return false;
341 }
342 break;
343 default:
344 g_assert_not_reached();
345 }
346
347 wt = FIELD_EX64(cr, DBGWCR, WT);
348 lbn = FIELD_EX64(cr, DBGWCR, LBN);
349
350 if (wt && !linked_bp_matches(cpu, lbn)) {
351 return false;
352 }
353
354 return true;
355 }
356
check_watchpoints(ARMCPU * cpu)357 static bool check_watchpoints(ARMCPU *cpu)
358 {
359 CPUARMState *env = &cpu->env;
360 int n;
361
362 /*
363 * If watchpoints are disabled globally or we can't take debug
364 * exceptions here then watchpoint firings are ignored.
365 */
366 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
367 || !arm_generate_debug_exceptions(env)) {
368 return false;
369 }
370
371 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
372 if (bp_wp_matches(cpu, n, true)) {
373 return true;
374 }
375 }
376 return false;
377 }
378
arm_debug_check_breakpoint(CPUState * cs)379 bool arm_debug_check_breakpoint(CPUState *cs)
380 {
381 ARMCPU *cpu = ARM_CPU(cs);
382 CPUARMState *env = &cpu->env;
383 vaddr pc;
384 int n;
385
386 /*
387 * If breakpoints are disabled globally or we can't take debug
388 * exceptions here then breakpoint firings are ignored.
389 */
390 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
391 || !arm_generate_debug_exceptions(env)) {
392 return false;
393 }
394
395 /*
396 * Single-step exceptions have priority over breakpoint exceptions.
397 * If single-step state is active-pending, suppress the bp.
398 */
399 if (arm_singlestep_active(env) && !(env->pstate & PSTATE_SS)) {
400 return false;
401 }
402
403 /*
404 * PC alignment faults have priority over breakpoint exceptions.
405 */
406 pc = is_a64(env) ? env->pc : env->regs[15];
407 if ((is_a64(env) || !env->thumb) && (pc & 3) != 0) {
408 return false;
409 }
410
411 /*
412 * Instruction aborts have priority over breakpoint exceptions.
413 * TODO: We would need to look up the page for PC and verify that
414 * it is present and executable.
415 */
416
417 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
418 if (bp_wp_matches(cpu, n, false)) {
419 return true;
420 }
421 }
422 return false;
423 }
424
arm_debug_check_watchpoint(CPUState * cs,CPUWatchpoint * wp)425 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
426 {
427 /*
428 * Called by core code when a CPU watchpoint fires; need to check if this
429 * is also an architectural watchpoint match.
430 */
431 ARMCPU *cpu = ARM_CPU(cs);
432
433 return check_watchpoints(cpu);
434 }
435
436 /*
437 * Return the FSR value for a debug exception (watchpoint, hardware
438 * breakpoint or BKPT insn) targeting the specified exception level.
439 */
arm_debug_exception_fsr(CPUARMState * env)440 static uint32_t arm_debug_exception_fsr(CPUARMState *env)
441 {
442 ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
443 int target_el = arm_debug_target_el(env);
444 bool using_lpae;
445
446 if (arm_feature(env, ARM_FEATURE_M)) {
447 using_lpae = false;
448 } else if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
449 using_lpae = true;
450 } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
451 arm_feature(env, ARM_FEATURE_V8)) {
452 using_lpae = true;
453 } else if (arm_feature(env, ARM_FEATURE_LPAE) &&
454 (env->cp15.tcr_el[target_el] & TTBCR_EAE)) {
455 using_lpae = true;
456 } else {
457 using_lpae = false;
458 }
459
460 if (using_lpae) {
461 return arm_fi_to_lfsc(&fi);
462 } else {
463 return arm_fi_to_sfsc(&fi);
464 }
465 }
466
arm_debug_excp_handler(CPUState * cs)467 void arm_debug_excp_handler(CPUState *cs)
468 {
469 /*
470 * Called by core code when a watchpoint or breakpoint fires;
471 * need to check which one and raise the appropriate exception.
472 */
473 ARMCPU *cpu = ARM_CPU(cs);
474 CPUARMState *env = &cpu->env;
475 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
476
477 if (wp_hit) {
478 if (wp_hit->flags & BP_CPU) {
479 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
480
481 cs->watchpoint_hit = NULL;
482
483 env->exception.fsr = arm_debug_exception_fsr(env);
484 env->exception.vaddress = wp_hit->hitaddr;
485 raise_exception_debug(env, EXCP_DATA_ABORT,
486 syn_watchpoint(0, 0, wnr));
487 }
488 } else {
489 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
490
491 /*
492 * (1) GDB breakpoints should be handled first.
493 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
494 * since singlestep is also done by generating a debug internal
495 * exception.
496 */
497 if (cpu_breakpoint_test(cs, pc, BP_GDB)
498 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
499 return;
500 }
501
502 env->exception.fsr = arm_debug_exception_fsr(env);
503 /*
504 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
505 * values to the guest that it shouldn't be able to see at its
506 * exception/security level.
507 */
508 env->exception.vaddress = 0;
509 raise_exception_debug(env, EXCP_PREFETCH_ABORT, syn_breakpoint(0));
510 }
511 }
512
513 /*
514 * Raise an EXCP_BKPT with the specified syndrome register value,
515 * targeting the correct exception level for debug exceptions.
516 */
HELPER(exception_bkpt_insn)517 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
518 {
519 int debug_el = arm_debug_target_el(env);
520 int cur_el = arm_current_el(env);
521
522 /* FSR will only be used if the debug target EL is AArch32. */
523 env->exception.fsr = arm_debug_exception_fsr(env);
524 /*
525 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
526 * values to the guest that it shouldn't be able to see at its
527 * exception/security level.
528 */
529 env->exception.vaddress = 0;
530 /*
531 * Other kinds of architectural debug exception are ignored if
532 * they target an exception level below the current one (in QEMU
533 * this is checked by arm_generate_debug_exceptions()). Breakpoint
534 * instructions are special because they always generate an exception
535 * to somewhere: if they can't go to the configured debug exception
536 * level they are taken to the current exception level.
537 */
538 if (debug_el < cur_el) {
539 debug_el = cur_el;
540 }
541 raise_exception(env, EXCP_BKPT, syndrome, debug_el);
542 }
543
HELPER(exception_swstep)544 void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome)
545 {
546 raise_exception_debug(env, EXCP_UDEF, syndrome);
547 }
548
hw_watchpoint_update(ARMCPU * cpu,int n)549 void hw_watchpoint_update(ARMCPU *cpu, int n)
550 {
551 CPUARMState *env = &cpu->env;
552 vaddr len = 0;
553 vaddr wvr = env->cp15.dbgwvr[n];
554 uint64_t wcr = env->cp15.dbgwcr[n];
555 int mask;
556 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
557
558 if (env->cpu_watchpoint[n]) {
559 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
560 env->cpu_watchpoint[n] = NULL;
561 }
562
563 if (!FIELD_EX64(wcr, DBGWCR, E)) {
564 /* E bit clear : watchpoint disabled */
565 return;
566 }
567
568 switch (FIELD_EX64(wcr, DBGWCR, LSC)) {
569 case 0:
570 /* LSC 00 is reserved and must behave as if the wp is disabled */
571 return;
572 case 1:
573 flags |= BP_MEM_READ;
574 break;
575 case 2:
576 flags |= BP_MEM_WRITE;
577 break;
578 case 3:
579 flags |= BP_MEM_ACCESS;
580 break;
581 }
582
583 /*
584 * Attempts to use both MASK and BAS fields simultaneously are
585 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
586 * thus generating a watchpoint for every byte in the masked region.
587 */
588 mask = FIELD_EX64(wcr, DBGWCR, MASK);
589 if (mask == 1 || mask == 2) {
590 /*
591 * Reserved values of MASK; we must act as if the mask value was
592 * some non-reserved value, or as if the watchpoint were disabled.
593 * We choose the latter.
594 */
595 return;
596 } else if (mask) {
597 /* Watchpoint covers an aligned area up to 2GB in size */
598 len = 1ULL << mask;
599 /*
600 * If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
601 * whether the watchpoint fires when the unmasked bits match; we opt
602 * to generate the exceptions.
603 */
604 wvr &= ~(len - 1);
605 } else {
606 /* Watchpoint covers bytes defined by the byte address select bits */
607 int bas = FIELD_EX64(wcr, DBGWCR, BAS);
608 int basstart;
609
610 if (extract64(wvr, 2, 1)) {
611 /*
612 * Deprecated case of an only 4-aligned address. BAS[7:4] are
613 * ignored, and BAS[3:0] define which bytes to watch.
614 */
615 bas &= 0xf;
616 }
617
618 if (bas == 0) {
619 /* This must act as if the watchpoint is disabled */
620 return;
621 }
622
623 /*
624 * The BAS bits are supposed to be programmed to indicate a contiguous
625 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
626 * we fire for each byte in the word/doubleword addressed by the WVR.
627 * We choose to ignore any non-zero bits after the first range of 1s.
628 */
629 basstart = ctz32(bas);
630 len = cto32(bas >> basstart);
631 wvr += basstart;
632 }
633
634 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
635 &env->cpu_watchpoint[n]);
636 }
637
hw_watchpoint_update_all(ARMCPU * cpu)638 void hw_watchpoint_update_all(ARMCPU *cpu)
639 {
640 int i;
641 CPUARMState *env = &cpu->env;
642
643 /*
644 * Completely clear out existing QEMU watchpoints and our array, to
645 * avoid possible stale entries following migration load.
646 */
647 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
648 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
649
650 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
651 hw_watchpoint_update(cpu, i);
652 }
653 }
654
hw_breakpoint_update(ARMCPU * cpu,int n)655 void hw_breakpoint_update(ARMCPU *cpu, int n)
656 {
657 CPUARMState *env = &cpu->env;
658 uint64_t bvr = env->cp15.dbgbvr[n];
659 uint64_t bcr = env->cp15.dbgbcr[n];
660 vaddr addr;
661 int bt;
662 int flags = BP_CPU;
663
664 if (env->cpu_breakpoint[n]) {
665 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
666 env->cpu_breakpoint[n] = NULL;
667 }
668
669 if (!extract64(bcr, 0, 1)) {
670 /* E bit clear : watchpoint disabled */
671 return;
672 }
673
674 bt = extract64(bcr, 20, 4);
675
676 switch (bt) {
677 case 4: /* unlinked address mismatch (reserved if AArch64) */
678 case 5: /* linked address mismatch (reserved if AArch64) */
679 qemu_log_mask(LOG_UNIMP,
680 "arm: address mismatch breakpoint types not implemented\n");
681 return;
682 case 0: /* unlinked address match */
683 case 1: /* linked address match */
684 {
685 /*
686 * Bits [1:0] are RES0.
687 *
688 * It is IMPLEMENTATION DEFINED whether bits [63:49]
689 * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit
690 * of the VA field ([48] or [52] for FEAT_LVA), or whether the
691 * value is read as written. It is CONSTRAINED UNPREDICTABLE
692 * whether the RESS bits are ignored when comparing an address.
693 * Therefore we are allowed to compare the entire register, which
694 * lets us avoid considering whether FEAT_LVA is actually enabled.
695 *
696 * The BAS field is used to allow setting breakpoints on 16-bit
697 * wide instructions; it is CONSTRAINED UNPREDICTABLE whether
698 * a bp will fire if the addresses covered by the bp and the addresses
699 * covered by the insn overlap but the insn doesn't start at the
700 * start of the bp address range. We choose to require the insn and
701 * the bp to have the same address. The constraints on writing to
702 * BAS enforced in dbgbcr_write mean we have only four cases:
703 * 0b0000 => no breakpoint
704 * 0b0011 => breakpoint on addr
705 * 0b1100 => breakpoint on addr + 2
706 * 0b1111 => breakpoint on addr
707 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
708 */
709 int bas = extract64(bcr, 5, 4);
710 addr = bvr & ~3ULL;
711 if (bas == 0) {
712 return;
713 }
714 if (bas == 0xc) {
715 addr += 2;
716 }
717 break;
718 }
719 case 2: /* unlinked context ID match */
720 case 8: /* unlinked VMID match (reserved if no EL2) */
721 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
722 qemu_log_mask(LOG_UNIMP,
723 "arm: unlinked context breakpoint types not implemented\n");
724 return;
725 case 9: /* linked VMID match (reserved if no EL2) */
726 case 11: /* linked context ID and VMID match (reserved if no EL2) */
727 case 3: /* linked context ID match */
728 default:
729 /*
730 * We must generate no events for Linked context matches (unless
731 * they are linked to by some other bp/wp, which is handled in
732 * updates for the linking bp/wp). We choose to also generate no events
733 * for reserved values.
734 */
735 return;
736 }
737
738 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
739 }
740
hw_breakpoint_update_all(ARMCPU * cpu)741 void hw_breakpoint_update_all(ARMCPU *cpu)
742 {
743 int i;
744 CPUARMState *env = &cpu->env;
745
746 /*
747 * Completely clear out existing QEMU breakpoints and our array, to
748 * avoid possible stale entries following migration load.
749 */
750 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
751 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
752
753 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
754 hw_breakpoint_update(cpu, i);
755 }
756 }
757
758 #if !defined(CONFIG_USER_ONLY)
759
arm_adjust_watchpoint_address(CPUState * cs,vaddr addr,int len)760 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
761 {
762 ARMCPU *cpu = ARM_CPU(cs);
763 CPUARMState *env = &cpu->env;
764
765 /*
766 * In BE32 system mode, target memory is stored byteswapped (on a
767 * little-endian host system), and by the time we reach here (via an
768 * opcode helper) the addresses of subword accesses have been adjusted
769 * to account for that, which means that watchpoints will not match.
770 * Undo the adjustment here.
771 */
772 if (arm_sctlr_b(env)) {
773 if (len == 1) {
774 addr ^= 3;
775 } else if (len == 2) {
776 addr ^= 2;
777 }
778 }
779
780 return addr;
781 }
782
783 #endif /* !CONFIG_USER_ONLY */
784 #endif /* CONFIG_TCG */
785
786 /*
787 * Check for traps to "powerdown debug" registers, which are controlled
788 * by MDCR.TDOSA
789 */
access_tdosa(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)790 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
791 bool isread)
792 {
793 int el = arm_current_el(env);
794 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
795 bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) ||
796 (arm_hcr_el2_eff(env) & HCR_TGE);
797
798 if (el < 2 && mdcr_el2_tdosa) {
799 return CP_ACCESS_TRAP_EL2;
800 }
801 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
802 return CP_ACCESS_TRAP_EL3;
803 }
804 return CP_ACCESS_OK;
805 }
806
807 /*
808 * Check for traps to "debug ROM" registers, which are controlled
809 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
810 */
access_tdra(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)811 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
812 bool isread)
813 {
814 int el = arm_current_el(env);
815 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
816 bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) ||
817 (arm_hcr_el2_eff(env) & HCR_TGE);
818
819 if (el < 2 && mdcr_el2_tdra) {
820 return CP_ACCESS_TRAP_EL2;
821 }
822 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
823 return CP_ACCESS_TRAP_EL3;
824 }
825 return CP_ACCESS_OK;
826 }
827
828 /*
829 * Check for traps to general debug registers, which are controlled
830 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
831 */
access_tda(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)832 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
833 bool isread)
834 {
835 int el = arm_current_el(env);
836 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
837 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) ||
838 (arm_hcr_el2_eff(env) & HCR_TGE);
839
840 if (el < 2 && mdcr_el2_tda) {
841 return CP_ACCESS_TRAP_EL2;
842 }
843 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
844 return CP_ACCESS_TRAP_EL3;
845 }
846 return CP_ACCESS_OK;
847 }
848
access_dbgvcr32(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)849 static CPAccessResult access_dbgvcr32(CPUARMState *env, const ARMCPRegInfo *ri,
850 bool isread)
851 {
852 /* MCDR_EL3.TDMA doesn't apply for FEAT_NV traps */
853 if (arm_current_el(env) == 2 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
854 return CP_ACCESS_TRAP_EL3;
855 }
856 return CP_ACCESS_OK;
857 }
858
859 /*
860 * Check for traps to Debug Comms Channel registers. If FEAT_FGT
861 * is implemented then these are controlled by MDCR_EL2.TDCC for
862 * EL2 and MDCR_EL3.TDCC for EL3. They are also controlled by
863 * the general debug access trap bits MDCR_EL2.TDA and MDCR_EL3.TDA.
864 * For EL0, they are also controlled by MDSCR_EL1.TDCC.
865 */
access_tdcc(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)866 static CPAccessResult access_tdcc(CPUARMState *env, const ARMCPRegInfo *ri,
867 bool isread)
868 {
869 int el = arm_current_el(env);
870 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
871 bool mdscr_el1_tdcc = extract32(env->cp15.mdscr_el1, 12, 1);
872 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) ||
873 (arm_hcr_el2_eff(env) & HCR_TGE);
874 bool mdcr_el2_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
875 (mdcr_el2 & MDCR_TDCC);
876 bool mdcr_el3_tdcc = cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
877 (env->cp15.mdcr_el3 & MDCR_TDCC);
878
879 if (el < 1 && mdscr_el1_tdcc) {
880 return CP_ACCESS_TRAP_EL1;
881 }
882 if (el < 2 && (mdcr_el2_tda || mdcr_el2_tdcc)) {
883 return CP_ACCESS_TRAP_EL2;
884 }
885 if (!arm_is_el3_or_mon(env) &&
886 ((env->cp15.mdcr_el3 & MDCR_TDA) || mdcr_el3_tdcc)) {
887 return CP_ACCESS_TRAP_EL3;
888 }
889 return CP_ACCESS_OK;
890 }
891
oslar_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)892 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
893 uint64_t value)
894 {
895 /*
896 * Writes to OSLAR_EL1 may update the OS lock status, which can be
897 * read via a bit in OSLSR_EL1.
898 */
899 int oslock;
900
901 if (ri->state == ARM_CP_STATE_AA32) {
902 oslock = (value == 0xC5ACCE55);
903 } else {
904 oslock = value & 1;
905 }
906
907 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
908 }
909
osdlr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)910 static void osdlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
911 uint64_t value)
912 {
913 ARMCPU *cpu = env_archcpu(env);
914 /*
915 * Only defined bit is bit 0 (DLK); if Feat_DoubleLock is not
916 * implemented this is RAZ/WI.
917 */
918 if(arm_feature(env, ARM_FEATURE_AARCH64)
919 ? cpu_isar_feature(aa64_doublelock, cpu)
920 : cpu_isar_feature(aa32_doublelock, cpu)) {
921 env->cp15.osdlr_el1 = value & 1;
922 }
923 }
924
dbgclaimset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)925 static void dbgclaimset_write(CPUARMState *env, const ARMCPRegInfo *ri,
926 uint64_t value)
927 {
928 env->cp15.dbgclaim |= (value & 0xFF);
929 }
930
dbgclaimset_read(CPUARMState * env,const ARMCPRegInfo * ri)931 static uint64_t dbgclaimset_read(CPUARMState *env, const ARMCPRegInfo *ri)
932 {
933 /* CLAIM bits are RAO */
934 return 0xFF;
935 }
936
dbgclaimclr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)937 static void dbgclaimclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
938 uint64_t value)
939 {
940 env->cp15.dbgclaim &= ~(value & 0xFF);
941 }
942
943 static const ARMCPRegInfo debug_cp_reginfo[] = {
944 /*
945 * DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
946 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
947 * unlike DBGDRAR it is never accessible from EL0.
948 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
949 * accessor.
950 */
951 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
952 .access = PL0_R, .accessfn = access_tdra,
953 .type = ARM_CP_CONST | ARM_CP_NO_GDB, .resetvalue = 0 },
954 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
955 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
956 .access = PL1_R, .accessfn = access_tdra,
957 .type = ARM_CP_CONST, .resetvalue = 0 },
958 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
959 .access = PL0_R, .accessfn = access_tdra,
960 .type = ARM_CP_CONST | ARM_CP_NO_GDB, .resetvalue = 0 },
961 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
962 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
963 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
964 .access = PL1_RW, .accessfn = access_tda,
965 .fgt = FGT_MDSCR_EL1,
966 .nv2_redirect_offset = 0x158,
967 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
968 .resetvalue = 0 },
969 /*
970 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external
971 * Debug Communication Channel is not implemented.
972 */
973 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64,
974 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0,
975 .access = PL0_R, .accessfn = access_tdcc,
976 .type = ARM_CP_CONST, .resetvalue = 0 },
977 /*
978 * These registers belong to the Debug Communications Channel,
979 * which is not implemented. However we implement RAZ/WI behaviour
980 * with trapping to prevent spurious SIGILLs if the guest OS does
981 * access them as the support cannot be probed for.
982 */
983 { .name = "OSDTRRX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14,
984 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 2,
985 .access = PL1_RW, .accessfn = access_tdcc,
986 .type = ARM_CP_CONST, .resetvalue = 0 },
987 { .name = "OSDTRTX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14,
988 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
989 .access = PL1_RW, .accessfn = access_tdcc,
990 .type = ARM_CP_CONST, .resetvalue = 0 },
991 /* DBGDTRTX_EL0/DBGDTRRX_EL0 depend on direction */
992 { .name = "DBGDTR_EL0", .state = ARM_CP_STATE_BOTH, .cp = 14,
993 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0,
994 .access = PL0_RW, .accessfn = access_tdcc,
995 .type = ARM_CP_CONST, .resetvalue = 0 },
996 /*
997 * OSECCR_EL1 provides a mechanism for an operating system
998 * to access the contents of EDECCR. EDECCR is not implemented though,
999 * as is the rest of external device mechanism.
1000 */
1001 { .name = "OSECCR_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14,
1002 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
1003 .access = PL1_RW, .accessfn = access_tda,
1004 .fgt = FGT_OSECCR_EL1,
1005 .type = ARM_CP_CONST, .resetvalue = 0 },
1006 /*
1007 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as
1008 * it is unlikely a guest will care.
1009 * We don't implement the configurable EL0 access.
1010 */
1011 { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32,
1012 .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
1013 .type = ARM_CP_ALIAS,
1014 .access = PL1_R, .accessfn = access_tda,
1015 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
1016 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
1017 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
1018 .access = PL1_W, .type = ARM_CP_NO_RAW,
1019 .accessfn = access_tdosa,
1020 .fgt = FGT_OSLAR_EL1,
1021 .writefn = oslar_write },
1022 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
1023 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
1024 .access = PL1_R, .resetvalue = 10,
1025 .accessfn = access_tdosa,
1026 .fgt = FGT_OSLSR_EL1,
1027 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
1028 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
1029 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
1030 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
1031 .access = PL1_RW, .accessfn = access_tdosa,
1032 .fgt = FGT_OSDLR_EL1,
1033 .writefn = osdlr_write,
1034 .fieldoffset = offsetof(CPUARMState, cp15.osdlr_el1) },
1035 /*
1036 * Dummy DBGVCR: Linux wants to clear this on startup, but we don't
1037 * implement vector catch debug events yet.
1038 */
1039 { .name = "DBGVCR",
1040 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
1041 .access = PL1_RW, .accessfn = access_tda,
1042 .type = ARM_CP_CONST, .resetvalue = 0 },
1043 /*
1044 * Dummy MDCCINT_EL1, since we don't implement the Debug Communications
1045 * Channel but Linux may try to access this register. The 32-bit
1046 * alias is DBGDCCINT.
1047 */
1048 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
1049 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
1050 .access = PL1_RW, .accessfn = access_tdcc,
1051 .type = ARM_CP_CONST, .resetvalue = 0 },
1052 /*
1053 * Dummy DBGCLAIM registers.
1054 * "The architecture does not define any functionality for the CLAIM tag bits.",
1055 * so we only keep the raw bits
1056 */
1057 { .name = "DBGCLAIMSET_EL1", .state = ARM_CP_STATE_BOTH,
1058 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 6,
1059 .type = ARM_CP_ALIAS,
1060 .access = PL1_RW, .accessfn = access_tda,
1061 .fgt = FGT_DBGCLAIM,
1062 .writefn = dbgclaimset_write, .readfn = dbgclaimset_read },
1063 { .name = "DBGCLAIMCLR_EL1", .state = ARM_CP_STATE_BOTH,
1064 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 6,
1065 .access = PL1_RW, .accessfn = access_tda,
1066 .fgt = FGT_DBGCLAIM,
1067 .writefn = dbgclaimclr_write, .raw_writefn = raw_write,
1068 .fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) },
1069 };
1070
1071 /* These are present only when EL1 supports AArch32 */
1072 static const ARMCPRegInfo debug_aa32_el1_reginfo[] = {
1073 /*
1074 * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
1075 * to save and restore a 32-bit guest's DBGVCR)
1076 */
1077 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
1078 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
1079 .access = PL2_RW, .accessfn = access_dbgvcr32,
1080 .type = ARM_CP_CONST | ARM_CP_EL3_NO_EL2_KEEP,
1081 .resetvalue = 0 },
1082 };
1083
1084 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
1085 /* 64 bit access versions of the (dummy) debug registers */
1086 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
1087 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT | ARM_CP_NO_GDB,
1088 .resetvalue = 0 },
1089 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
1090 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT | ARM_CP_NO_GDB,
1091 .resetvalue = 0 },
1092 };
1093
dbgwvr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1094 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1095 uint64_t value)
1096 {
1097 ARMCPU *cpu = env_archcpu(env);
1098 int i = ri->crm;
1099
1100 /*
1101 * Bits [1:0] are RES0.
1102 *
1103 * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA)
1104 * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if
1105 * they contain the value written. It is CONSTRAINED UNPREDICTABLE
1106 * whether the RESS bits are ignored when comparing an address.
1107 *
1108 * Therefore we are allowed to compare the entire register, which lets
1109 * us avoid considering whether or not FEAT_LVA is actually enabled.
1110 */
1111 value &= ~3ULL;
1112
1113 raw_write(env, ri, value);
1114 if (tcg_enabled()) {
1115 hw_watchpoint_update(cpu, i);
1116 }
1117 }
1118
dbgwcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1119 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1120 uint64_t value)
1121 {
1122 ARMCPU *cpu = env_archcpu(env);
1123 int i = ri->crm;
1124
1125 raw_write(env, ri, value);
1126 if (tcg_enabled()) {
1127 hw_watchpoint_update(cpu, i);
1128 }
1129 }
1130
dbgbvr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1131 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1132 uint64_t value)
1133 {
1134 ARMCPU *cpu = env_archcpu(env);
1135 int i = ri->crm;
1136
1137 raw_write(env, ri, value);
1138 if (tcg_enabled()) {
1139 hw_breakpoint_update(cpu, i);
1140 }
1141 }
1142
dbgbcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1143 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1144 uint64_t value)
1145 {
1146 ARMCPU *cpu = env_archcpu(env);
1147 int i = ri->crm;
1148
1149 /*
1150 * BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
1151 * copy of BAS[0].
1152 */
1153 value = deposit64(value, 6, 1, extract64(value, 5, 1));
1154 value = deposit64(value, 8, 1, extract64(value, 7, 1));
1155
1156 raw_write(env, ri, value);
1157 if (tcg_enabled()) {
1158 hw_breakpoint_update(cpu, i);
1159 }
1160 }
1161
define_debug_regs(ARMCPU * cpu)1162 void define_debug_regs(ARMCPU *cpu)
1163 {
1164 /*
1165 * Define v7 and v8 architectural debug registers.
1166 * These are just dummy implementations for now.
1167 */
1168 int i;
1169 int wrps, brps, ctx_cmps;
1170
1171 /*
1172 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot
1173 * use AArch32. Given that bit 15 is RES1, if the value is 0 then
1174 * the register must not exist for this cpu.
1175 */
1176 if (cpu->isar.dbgdidr != 0) {
1177 ARMCPRegInfo dbgdidr = {
1178 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0,
1179 .opc1 = 0, .opc2 = 0,
1180 .access = PL0_R, .accessfn = access_tda,
1181 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr,
1182 };
1183 define_one_arm_cp_reg(cpu, &dbgdidr);
1184 }
1185
1186 /*
1187 * DBGDEVID is present in the v7 debug architecture if
1188 * DBGDIDR.DEVID_imp is 1 (bit 15); from v7.1 and on it is
1189 * mandatory (and bit 15 is RES1). DBGDEVID1 and DBGDEVID2 exist
1190 * from v7.1 of the debug architecture. Because no fields have yet
1191 * been defined in DBGDEVID2 (and quite possibly none will ever
1192 * be) we don't define an ARMISARegisters field for it.
1193 * These registers exist only if EL1 can use AArch32, but that
1194 * happens naturally because they are only PL1 accessible anyway.
1195 */
1196 if (extract32(cpu->isar.dbgdidr, 15, 1)) {
1197 ARMCPRegInfo dbgdevid = {
1198 .name = "DBGDEVID",
1199 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 2, .crn = 7,
1200 .access = PL1_R, .accessfn = access_tda,
1201 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid,
1202 };
1203 define_one_arm_cp_reg(cpu, &dbgdevid);
1204 }
1205 if (cpu_isar_feature(aa32_debugv7p1, cpu)) {
1206 ARMCPRegInfo dbgdevid12[] = {
1207 {
1208 .name = "DBGDEVID1",
1209 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 1, .crn = 7,
1210 .access = PL1_R, .accessfn = access_tda,
1211 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid1,
1212 }, {
1213 .name = "DBGDEVID2",
1214 .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 0, .crn = 7,
1215 .access = PL1_R, .accessfn = access_tda,
1216 .type = ARM_CP_CONST, .resetvalue = 0,
1217 },
1218 };
1219 define_arm_cp_regs(cpu, dbgdevid12);
1220 }
1221
1222 brps = arm_num_brps(cpu);
1223 wrps = arm_num_wrps(cpu);
1224 ctx_cmps = arm_num_ctx_cmps(cpu);
1225
1226 assert(ctx_cmps <= brps);
1227
1228 define_arm_cp_regs(cpu, debug_cp_reginfo);
1229 if (cpu_isar_feature(aa64_aa32_el1, cpu)) {
1230 define_arm_cp_regs(cpu, debug_aa32_el1_reginfo);
1231 }
1232
1233 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
1234 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
1235 }
1236
1237 for (i = 0; i < brps; i++) {
1238 char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i);
1239 char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i);
1240 ARMCPRegInfo dbgregs[] = {
1241 { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH,
1242 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
1243 .access = PL1_RW, .accessfn = access_tda,
1244 .fgt = FGT_DBGBVRN_EL1,
1245 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
1246 .writefn = dbgbvr_write, .raw_writefn = raw_write
1247 },
1248 { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH,
1249 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
1250 .access = PL1_RW, .accessfn = access_tda,
1251 .fgt = FGT_DBGBCRN_EL1,
1252 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
1253 .writefn = dbgbcr_write, .raw_writefn = raw_write
1254 },
1255 };
1256 define_arm_cp_regs(cpu, dbgregs);
1257 g_free(dbgbvr_el1_name);
1258 g_free(dbgbcr_el1_name);
1259 }
1260
1261 for (i = 0; i < wrps; i++) {
1262 char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i);
1263 char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i);
1264 ARMCPRegInfo dbgregs[] = {
1265 { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH,
1266 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
1267 .access = PL1_RW, .accessfn = access_tda,
1268 .fgt = FGT_DBGWVRN_EL1,
1269 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
1270 .writefn = dbgwvr_write, .raw_writefn = raw_write
1271 },
1272 { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH,
1273 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
1274 .access = PL1_RW, .accessfn = access_tda,
1275 .fgt = FGT_DBGWCRN_EL1,
1276 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
1277 .writefn = dbgwcr_write, .raw_writefn = raw_write
1278 },
1279 };
1280 define_arm_cp_regs(cpu, dbgregs);
1281 g_free(dbgwvr_el1_name);
1282 g_free(dbgwcr_el1_name);
1283 }
1284 }
1285