1 /*
2 * ARM gdb server stub: AArch64 specific functions.
3 *
4 * Copyright (c) 2013 SUSE LINUX Products GmbH
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "internals.h"
23 #include "gdbstub/helpers.h"
24 #include "gdbstub/commands.h"
25 #include "tcg/mte_helper.h"
26 #if defined(CONFIG_USER_ONLY) && defined(CONFIG_LINUX)
27 #include <sys/prctl.h>
28 #include "mte_user_helper.h"
29 #endif
30 #ifdef CONFIG_TCG
31 #include "accel/tcg/cpu-mmu-index.h"
32 #include "exec/target_page.h"
33 #endif
34
aarch64_cpu_gdb_read_register(CPUState * cs,GByteArray * mem_buf,int n)35 int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
36 {
37 ARMCPU *cpu = ARM_CPU(cs);
38 CPUARMState *env = &cpu->env;
39
40 if (n < 31) {
41 /* Core integer register. */
42 return gdb_get_reg64(mem_buf, env->xregs[n]);
43 }
44 switch (n) {
45 case 31:
46 return gdb_get_reg64(mem_buf, env->xregs[31]);
47 case 32:
48 return gdb_get_reg64(mem_buf, env->pc);
49 case 33:
50 return gdb_get_reg32(mem_buf, pstate_read(env));
51 }
52 /* Unknown register. */
53 return 0;
54 }
55
aarch64_cpu_gdb_write_register(CPUState * cs,uint8_t * mem_buf,int n)56 int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
57 {
58 ARMCPU *cpu = ARM_CPU(cs);
59 CPUARMState *env = &cpu->env;
60 uint64_t tmp;
61
62 tmp = ldq_p(mem_buf);
63
64 if (n < 31) {
65 /* Core integer register. */
66 env->xregs[n] = tmp;
67 return 8;
68 }
69 switch (n) {
70 case 31:
71 env->xregs[31] = tmp;
72 return 8;
73 case 32:
74 env->pc = tmp;
75 return 8;
76 case 33:
77 /* CPSR */
78 pstate_write(env, tmp);
79 return 4;
80 }
81 /* Unknown register. */
82 return 0;
83 }
84
aarch64_gdb_get_fpu_reg(CPUState * cs,GByteArray * buf,int reg)85 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg)
86 {
87 ARMCPU *cpu = ARM_CPU(cs);
88 CPUARMState *env = &cpu->env;
89
90 switch (reg) {
91 case 0 ... 31:
92 {
93 /* 128 bit FP register - quads are in LE order */
94 uint64_t *q = aa64_vfp_qreg(env, reg);
95 return gdb_get_reg128(buf, q[1], q[0]);
96 }
97 case 32:
98 /* FPSR */
99 return gdb_get_reg32(buf, vfp_get_fpsr(env));
100 case 33:
101 /* FPCR */
102 return gdb_get_reg32(buf, vfp_get_fpcr(env));
103 default:
104 return 0;
105 }
106 }
107
aarch64_gdb_set_fpu_reg(CPUState * cs,uint8_t * buf,int reg)108 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg)
109 {
110 ARMCPU *cpu = ARM_CPU(cs);
111 CPUARMState *env = &cpu->env;
112
113 switch (reg) {
114 case 0 ... 31:
115 /* 128 bit FP register */
116 {
117 uint64_t *q = aa64_vfp_qreg(env, reg);
118 q[0] = ldq_le_p(buf);
119 q[1] = ldq_le_p(buf + 8);
120 return 16;
121 }
122 case 32:
123 /* FPSR */
124 vfp_set_fpsr(env, ldl_p(buf));
125 return 4;
126 case 33:
127 /* FPCR */
128 vfp_set_fpcr(env, ldl_p(buf));
129 return 4;
130 default:
131 return 0;
132 }
133 }
134
aarch64_gdb_get_sve_reg(CPUState * cs,GByteArray * buf,int reg)135 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg)
136 {
137 ARMCPU *cpu = ARM_CPU(cs);
138 CPUARMState *env = &cpu->env;
139
140 switch (reg) {
141 /* The first 32 registers are the zregs */
142 case 0 ... 31:
143 {
144 int vq, len = 0;
145 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
146 len += gdb_get_reg128(buf,
147 env->vfp.zregs[reg].d[vq * 2 + 1],
148 env->vfp.zregs[reg].d[vq * 2]);
149 }
150 return len;
151 }
152 case 32:
153 return gdb_get_reg32(buf, vfp_get_fpsr(env));
154 case 33:
155 return gdb_get_reg32(buf, vfp_get_fpcr(env));
156 /* then 16 predicates and the ffr */
157 case 34 ... 50:
158 {
159 int preg = reg - 34;
160 int vq, len = 0;
161 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
162 len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]);
163 }
164 return len;
165 }
166 case 51:
167 {
168 /*
169 * We report in Vector Granules (VG) which is 64bit in a Z reg
170 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
171 */
172 int vq = sve_vqm1_for_el(env, arm_current_el(env)) + 1;
173 return gdb_get_reg64(buf, vq * 2);
174 }
175 default:
176 /* gdbstub asked for something out our range */
177 qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg);
178 break;
179 }
180
181 return 0;
182 }
183
aarch64_gdb_set_sve_reg(CPUState * cs,uint8_t * buf,int reg)184 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg)
185 {
186 ARMCPU *cpu = ARM_CPU(cs);
187 CPUARMState *env = &cpu->env;
188
189 /* The first 32 registers are the zregs */
190 switch (reg) {
191 /* The first 32 registers are the zregs */
192 case 0 ... 31:
193 {
194 int vq, len = 0;
195 uint64_t *p = (uint64_t *) buf;
196 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
197 env->vfp.zregs[reg].d[vq * 2 + 1] = *p++;
198 env->vfp.zregs[reg].d[vq * 2] = *p++;
199 len += 16;
200 }
201 return len;
202 }
203 case 32:
204 vfp_set_fpsr(env, *(uint32_t *)buf);
205 return 4;
206 case 33:
207 vfp_set_fpcr(env, *(uint32_t *)buf);
208 return 4;
209 case 34 ... 50:
210 {
211 int preg = reg - 34;
212 int vq, len = 0;
213 uint64_t *p = (uint64_t *) buf;
214 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
215 env->vfp.pregs[preg].p[vq / 4] = *p++;
216 len += 8;
217 }
218 return len;
219 }
220 case 51:
221 /* cannot set vg via gdbstub */
222 return 0;
223 default:
224 /* gdbstub asked for something out our range */
225 break;
226 }
227
228 return 0;
229 }
230
aarch64_gdb_get_pauth_reg(CPUState * cs,GByteArray * buf,int reg)231 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg)
232 {
233 ARMCPU *cpu = ARM_CPU(cs);
234 CPUARMState *env = &cpu->env;
235
236 switch (reg) {
237 case 0: /* pauth_dmask */
238 case 1: /* pauth_cmask */
239 case 2: /* pauth_dmask_high */
240 case 3: /* pauth_cmask_high */
241 /*
242 * Note that older versions of this feature only contained
243 * pauth_{d,c}mask, for use with Linux user processes, and
244 * thus exclusively in the low half of the address space.
245 *
246 * To support system mode, and to debug kernels, two new regs
247 * were added to cover the high half of the address space.
248 * For the purpose of pauth_ptr_mask, we can use any well-formed
249 * address within the address space half -- here, 0 and -1.
250 */
251 {
252 bool is_data = !(reg & 1);
253 bool is_high = reg & 2;
254 ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
255 ARMVAParameters param;
256
257 param = aa64_va_parameters(env, -is_high, mmu_idx, is_data, false);
258 return gdb_get_reg64(buf, pauth_ptr_mask(param));
259 }
260 default:
261 return 0;
262 }
263 }
264
aarch64_gdb_set_pauth_reg(CPUState * cs,uint8_t * buf,int reg)265 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg)
266 {
267 /* All pseudo registers are read-only. */
268 return 0;
269 }
270
output_vector_union_type(GDBFeatureBuilder * builder,int reg_width,const char * name)271 static void output_vector_union_type(GDBFeatureBuilder *builder, int reg_width,
272 const char *name)
273 {
274 struct TypeSize {
275 const char *gdb_type;
276 short size;
277 char sz, suffix;
278 };
279
280 static const struct TypeSize vec_lanes[] = {
281 /* quads */
282 { "uint128", 128, 'q', 'u' },
283 { "int128", 128, 'q', 's' },
284 /* 64 bit */
285 { "ieee_double", 64, 'd', 'f' },
286 { "uint64", 64, 'd', 'u' },
287 { "int64", 64, 'd', 's' },
288 /* 32 bit */
289 { "ieee_single", 32, 's', 'f' },
290 { "uint32", 32, 's', 'u' },
291 { "int32", 32, 's', 's' },
292 /* 16 bit */
293 { "ieee_half", 16, 'h', 'f' },
294 { "uint16", 16, 'h', 'u' },
295 { "int16", 16, 'h', 's' },
296 /* bytes */
297 { "uint8", 8, 'b', 'u' },
298 { "int8", 8, 'b', 's' },
299 };
300
301 static const char suf[] = { 'b', 'h', 's', 'd', 'q' };
302 int i, j;
303
304 /* First define types and totals in a whole VL */
305 for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
306 gdb_feature_builder_append_tag(
307 builder, "<vector id=\"%s%c%c\" type=\"%s\" count=\"%d\"/>",
308 name, vec_lanes[i].sz, vec_lanes[i].suffix,
309 vec_lanes[i].gdb_type, reg_width / vec_lanes[i].size);
310 }
311
312 /*
313 * Now define a union for each size group containing unsigned and
314 * signed and potentially float versions of each size from 128 to
315 * 8 bits.
316 */
317 for (i = 0; i < ARRAY_SIZE(suf); i++) {
318 int bits = 8 << i;
319
320 gdb_feature_builder_append_tag(builder, "<union id=\"%sn%c\">",
321 name, suf[i]);
322 for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) {
323 if (vec_lanes[j].size == bits) {
324 gdb_feature_builder_append_tag(
325 builder, "<field name=\"%c\" type=\"%s%c%c\"/>",
326 vec_lanes[j].suffix, name,
327 vec_lanes[j].sz, vec_lanes[j].suffix);
328 }
329 }
330 gdb_feature_builder_append_tag(builder, "</union>");
331 }
332
333 /* And now the final union of unions */
334 gdb_feature_builder_append_tag(builder, "<union id=\"%s\">", name);
335 for (i = ARRAY_SIZE(suf) - 1; i >= 0; i--) {
336 gdb_feature_builder_append_tag(builder,
337 "<field name=\"%c\" type=\"%sn%c\"/>",
338 suf[i], name, suf[i]);
339 }
340 gdb_feature_builder_append_tag(builder, "</union>");
341 }
342
arm_gen_dynamic_svereg_feature(CPUState * cs,int base_reg)343 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cs, int base_reg)
344 {
345 ARMCPU *cpu = ARM_CPU(cs);
346 int reg_width = cpu->sve_max_vq * 128;
347 int pred_width = cpu->sve_max_vq * 16;
348 GDBFeatureBuilder builder;
349 char *name;
350 int reg = 0;
351 int i;
352
353 gdb_feature_builder_init(&builder, &cpu->dyn_svereg_feature.desc,
354 "org.gnu.gdb.aarch64.sve", "sve-registers.xml",
355 base_reg);
356
357 /* Create the vector union type. */
358 output_vector_union_type(&builder, reg_width, "svev");
359
360 /* Create the predicate vector type. */
361 gdb_feature_builder_append_tag(
362 &builder, "<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>",
363 pred_width / 8);
364
365 /* Define the vector registers. */
366 for (i = 0; i < 32; i++) {
367 name = g_strdup_printf("z%d", i);
368 gdb_feature_builder_append_reg(&builder, name, reg_width, reg++,
369 "svev", NULL);
370 }
371
372 /* fpscr & status registers */
373 gdb_feature_builder_append_reg(&builder, "fpsr", 32, reg++,
374 "int", "float");
375 gdb_feature_builder_append_reg(&builder, "fpcr", 32, reg++,
376 "int", "float");
377
378 /* Define the predicate registers. */
379 for (i = 0; i < 16; i++) {
380 name = g_strdup_printf("p%d", i);
381 gdb_feature_builder_append_reg(&builder, name, pred_width, reg++,
382 "svep", NULL);
383 }
384 gdb_feature_builder_append_reg(&builder, "ffr", pred_width, reg++,
385 "svep", "vector");
386
387 /* Define the vector length pseudo-register. */
388 gdb_feature_builder_append_reg(&builder, "vg", 64, reg++, "int", NULL);
389
390 gdb_feature_builder_end(&builder);
391
392 return &cpu->dyn_svereg_feature.desc;
393 }
394
395 #ifdef CONFIG_USER_ONLY
aarch64_gdb_get_tag_ctl_reg(CPUState * cs,GByteArray * buf,int reg)396 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg)
397 {
398 ARMCPU *cpu = ARM_CPU(cs);
399 CPUARMState *env = &cpu->env;
400 uint64_t tcf0;
401
402 assert(reg == 0);
403
404 tcf0 = extract64(env->cp15.sctlr_el[1], 38, 2);
405
406 return gdb_get_reg64(buf, tcf0);
407 }
408
aarch64_gdb_set_tag_ctl_reg(CPUState * cs,uint8_t * buf,int reg)409 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg)
410 {
411 #if defined(CONFIG_LINUX)
412 ARMCPU *cpu = ARM_CPU(cs);
413 CPUARMState *env = &cpu->env;
414
415 uint8_t tcf;
416
417 assert(reg == 0);
418
419 tcf = *buf << PR_MTE_TCF_SHIFT;
420
421 if (!tcf) {
422 return 0;
423 }
424
425 /*
426 * 'tag_ctl' register is actually a "pseudo-register" provided by GDB to
427 * expose options regarding the type of MTE fault that can be controlled at
428 * runtime.
429 */
430 arm_set_mte_tcf0(env, tcf);
431
432 return 1;
433 #else
434 return 0;
435 #endif
436 }
437 #endif /* CONFIG_USER_ONLY */
438
439 #ifdef CONFIG_TCG
handle_q_memtag(GArray * params,void * user_ctx)440 static void handle_q_memtag(GArray *params, void *user_ctx)
441 {
442 ARMCPU *cpu = ARM_CPU(user_ctx);
443 CPUARMState *env = &cpu->env;
444 uint32_t mmu_index;
445
446 uint64_t addr = gdb_get_cmd_param(params, 0)->val_ull;
447 uint64_t len = gdb_get_cmd_param(params, 1)->val_ul;
448 int type = gdb_get_cmd_param(params, 2)->val_ul;
449
450 uint8_t *tags;
451 uint8_t addr_tag;
452
453 g_autoptr(GString) str_buf = g_string_new(NULL);
454
455 /*
456 * GDB does not query multiple tags for a memory range on remote targets, so
457 * that's not supported either by gdbstub.
458 */
459 if (len != 1) {
460 gdb_put_packet("E02");
461 }
462
463 /* GDB never queries a tag different from an allocation tag (type 1). */
464 if (type != 1) {
465 gdb_put_packet("E03");
466 }
467
468 /* Find out the current translation regime for probe. */
469 mmu_index = cpu_mmu_index(env_cpu(env), false);
470 /* Note that tags are packed here (2 tags packed in one byte). */
471 tags = allocation_tag_mem_probe(env, mmu_index, addr, MMU_DATA_LOAD, 1,
472 MMU_DATA_LOAD, true, 0);
473 if (!tags) {
474 /* Address is not in a tagged region. */
475 gdb_put_packet("E04");
476 return;
477 }
478
479 /* Unpack tag from byte. */
480 addr_tag = load_tag1(addr, tags);
481 g_string_printf(str_buf, "m%.2x", addr_tag);
482
483 gdb_put_packet(str_buf->str);
484 }
485
handle_q_isaddresstagged(GArray * params,void * user_ctx)486 static void handle_q_isaddresstagged(GArray *params, void *user_ctx)
487 {
488 ARMCPU *cpu = ARM_CPU(user_ctx);
489 CPUARMState *env = &cpu->env;
490 uint32_t mmu_index;
491
492 uint64_t addr = gdb_get_cmd_param(params, 0)->val_ull;
493
494 uint8_t *tags;
495 const char *reply;
496
497 /* Find out the current translation regime for probe. */
498 mmu_index = cpu_mmu_index(env_cpu(env), false);
499 tags = allocation_tag_mem_probe(env, mmu_index, addr, MMU_DATA_LOAD, 1,
500 MMU_DATA_LOAD, true, 0);
501 reply = tags ? "01" : "00";
502
503 gdb_put_packet(reply);
504 }
505
handle_Q_memtag(GArray * params,void * user_ctx)506 static void handle_Q_memtag(GArray *params, void *user_ctx)
507 {
508 ARMCPU *cpu = ARM_CPU(user_ctx);
509 CPUARMState *env = &cpu->env;
510 uint32_t mmu_index;
511
512 uint64_t start_addr = gdb_get_cmd_param(params, 0)->val_ull;
513 uint64_t len = gdb_get_cmd_param(params, 1)->val_ul;
514 int type = gdb_get_cmd_param(params, 2)->val_ul;
515 char const *new_tags_str = gdb_get_cmd_param(params, 3)->data;
516
517 uint64_t end_addr;
518
519 int num_new_tags;
520 uint8_t *tags;
521
522 g_autoptr(GByteArray) new_tags = g_byte_array_new();
523
524 /*
525 * Only the allocation tag (i.e. type 1) can be set at the stub side.
526 */
527 if (type != 1) {
528 gdb_put_packet("E02");
529 return;
530 }
531
532 end_addr = start_addr + (len - 1); /* 'len' is always >= 1 */
533 /* Check if request's memory range does not cross page boundaries. */
534 if ((start_addr ^ end_addr) & TARGET_PAGE_MASK) {
535 gdb_put_packet("E03");
536 return;
537 }
538
539 /*
540 * Get all tags in the page starting from the tag of the start address.
541 * Note that there are two tags packed into a single byte here.
542 */
543 /* Find out the current translation regime for probe. */
544 mmu_index = cpu_mmu_index(env_cpu(env), false);
545 tags = allocation_tag_mem_probe(env, mmu_index, start_addr, MMU_DATA_STORE,
546 1, MMU_DATA_STORE, true, 0);
547 if (!tags) {
548 /* Address is not in a tagged region. */
549 gdb_put_packet("E04");
550 return;
551 }
552
553 /* Convert tags provided by GDB, 2 hex digits per tag. */
554 num_new_tags = strlen(new_tags_str) / 2;
555 gdb_hextomem(new_tags, new_tags_str, num_new_tags);
556
557 uint64_t address = start_addr;
558 int new_tag_index = 0;
559 while (address <= end_addr) {
560 uint8_t new_tag;
561 int packed_index;
562
563 /*
564 * Find packed tag index from unpacked tag index. There are two tags
565 * in one packed index (one tag per nibble).
566 */
567 packed_index = new_tag_index / 2;
568
569 new_tag = new_tags->data[new_tag_index % num_new_tags];
570 store_tag1(address, tags + packed_index, new_tag);
571
572 address += TAG_GRANULE;
573 new_tag_index++;
574 }
575
576 gdb_put_packet("OK");
577 }
578
579 enum Command {
580 qMemTags,
581 qIsAddressTagged,
582 QMemTags,
583 NUM_CMDS
584 };
585
586 static const GdbCmdParseEntry cmd_handler_table[NUM_CMDS] = {
587 [qMemTags] = {
588 .handler = handle_q_memtag,
589 .cmd_startswith = true,
590 .cmd = "MemTags:",
591 .schema = "L,l:l0",
592 .need_cpu_context = true
593 },
594 [qIsAddressTagged] = {
595 .handler = handle_q_isaddresstagged,
596 .cmd_startswith = true,
597 .cmd = "IsAddressTagged:",
598 .schema = "L0",
599 .need_cpu_context = true
600 },
601 [QMemTags] = {
602 .handler = handle_Q_memtag,
603 .cmd_startswith = true,
604 .cmd = "MemTags:",
605 .schema = "L,l:l:s0",
606 .need_cpu_context = true
607 },
608 };
609 #endif /* CONFIG_TCG */
610
aarch64_cpu_register_gdb_commands(ARMCPU * cpu,GString * qsupported,GPtrArray * qtable,GPtrArray * stable)611 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *qsupported,
612 GPtrArray *qtable, GPtrArray *stable)
613 {
614 /* MTE */
615 #ifdef CONFIG_TCG
616 if (cpu_isar_feature(aa64_mte, cpu)) {
617 g_string_append(qsupported, ";memory-tagging+");
618
619 g_ptr_array_add(qtable, (gpointer) &cmd_handler_table[qMemTags]);
620 g_ptr_array_add(qtable, (gpointer) &cmd_handler_table[qIsAddressTagged]);
621 g_ptr_array_add(stable, (gpointer) &cmd_handler_table[QMemTags]);
622 }
623 #endif
624 }
625