1 /*
2 * QEMU monitor
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "cpu.h"
27 #include "monitor/monitor.h"
28 #include "monitor/hmp-target.h"
29 #include "monitor/hmp.h"
30 #include "qobject/qdict.h"
31 #include "qapi/error.h"
32 #include "qapi/qapi-commands-misc.h"
33
34 /* Perform linear address sign extension */
addr_canonical(CPUArchState * env,hwaddr addr)35 static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
36 {
37 #ifdef TARGET_X86_64
38 if (env->cr[4] & CR4_LA57_MASK) {
39 if (addr & (1ULL << 56)) {
40 addr |= (hwaddr)-(1LL << 57);
41 }
42 } else {
43 if (addr & (1ULL << 47)) {
44 addr |= (hwaddr)-(1LL << 48);
45 }
46 }
47 #endif
48 return addr;
49 }
50
print_pte(Monitor * mon,CPUArchState * env,hwaddr addr,hwaddr pte,hwaddr mask)51 static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
52 hwaddr pte, hwaddr mask)
53 {
54 addr = addr_canonical(env, addr);
55
56 monitor_printf(mon, HWADDR_FMT_plx ": " HWADDR_FMT_plx
57 " %c%c%c%c%c%c%c%c%c\n",
58 addr,
59 pte & mask,
60 pte & PG_NX_MASK ? 'X' : '-',
61 pte & PG_GLOBAL_MASK ? 'G' : '-',
62 pte & PG_PSE_MASK ? 'P' : '-',
63 pte & PG_DIRTY_MASK ? 'D' : '-',
64 pte & PG_ACCESSED_MASK ? 'A' : '-',
65 pte & PG_PCD_MASK ? 'C' : '-',
66 pte & PG_PWT_MASK ? 'T' : '-',
67 pte & PG_USER_MASK ? 'U' : '-',
68 pte & PG_RW_MASK ? 'W' : '-');
69 }
70
tlb_info_32(Monitor * mon,CPUArchState * env)71 static void tlb_info_32(Monitor *mon, CPUArchState *env)
72 {
73 unsigned int l1, l2;
74 uint32_t pgd, pde, pte;
75
76 pgd = env->cr[3] & ~0xfff;
77 for(l1 = 0; l1 < 1024; l1++) {
78 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
79 pde = le32_to_cpu(pde);
80 if (pde & PG_PRESENT_MASK) {
81 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
82 /* 4M pages */
83 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
84 } else {
85 for(l2 = 0; l2 < 1024; l2++) {
86 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
87 pte = le32_to_cpu(pte);
88 if (pte & PG_PRESENT_MASK) {
89 print_pte(mon, env, (l1 << 22) + (l2 << 12),
90 pte & ~PG_PSE_MASK,
91 ~0xfff);
92 }
93 }
94 }
95 }
96 }
97 }
98
tlb_info_pae32(Monitor * mon,CPUArchState * env)99 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
100 {
101 unsigned int l1, l2, l3;
102 uint64_t pdpe, pde, pte;
103 uint64_t pdp_addr, pd_addr, pt_addr;
104
105 pdp_addr = env->cr[3] & ~0x1f;
106 for (l1 = 0; l1 < 4; l1++) {
107 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
108 pdpe = le64_to_cpu(pdpe);
109 if (pdpe & PG_PRESENT_MASK) {
110 pd_addr = pdpe & 0x3fffffffff000ULL;
111 for (l2 = 0; l2 < 512; l2++) {
112 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
113 pde = le64_to_cpu(pde);
114 if (pde & PG_PRESENT_MASK) {
115 if (pde & PG_PSE_MASK) {
116 /* 2M pages with PAE, CR4.PSE is ignored */
117 print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
118 ~((hwaddr)(1 << 20) - 1));
119 } else {
120 pt_addr = pde & 0x3fffffffff000ULL;
121 for (l3 = 0; l3 < 512; l3++) {
122 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
123 pte = le64_to_cpu(pte);
124 if (pte & PG_PRESENT_MASK) {
125 print_pte(mon, env, (l1 << 30) + (l2 << 21)
126 + (l3 << 12),
127 pte & ~PG_PSE_MASK,
128 ~(hwaddr)0xfff);
129 }
130 }
131 }
132 }
133 }
134 }
135 }
136 }
137
138 #ifdef TARGET_X86_64
tlb_info_la48(Monitor * mon,CPUArchState * env,uint64_t l0,uint64_t pml4_addr)139 static void tlb_info_la48(Monitor *mon, CPUArchState *env,
140 uint64_t l0, uint64_t pml4_addr)
141 {
142 uint64_t l1, l2, l3, l4;
143 uint64_t pml4e, pdpe, pde, pte;
144 uint64_t pdp_addr, pd_addr, pt_addr;
145
146 for (l1 = 0; l1 < 512; l1++) {
147 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
148 pml4e = le64_to_cpu(pml4e);
149 if (!(pml4e & PG_PRESENT_MASK)) {
150 continue;
151 }
152
153 pdp_addr = pml4e & 0x3fffffffff000ULL;
154 for (l2 = 0; l2 < 512; l2++) {
155 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
156 pdpe = le64_to_cpu(pdpe);
157 if (!(pdpe & PG_PRESENT_MASK)) {
158 continue;
159 }
160
161 if (pdpe & PG_PSE_MASK) {
162 /* 1G pages, CR4.PSE is ignored */
163 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
164 pdpe, 0x3ffffc0000000ULL);
165 continue;
166 }
167
168 pd_addr = pdpe & 0x3fffffffff000ULL;
169 for (l3 = 0; l3 < 512; l3++) {
170 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
171 pde = le64_to_cpu(pde);
172 if (!(pde & PG_PRESENT_MASK)) {
173 continue;
174 }
175
176 if (pde & PG_PSE_MASK) {
177 /* 2M pages, CR4.PSE is ignored */
178 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
179 (l3 << 21), pde, 0x3ffffffe00000ULL);
180 continue;
181 }
182
183 pt_addr = pde & 0x3fffffffff000ULL;
184 for (l4 = 0; l4 < 512; l4++) {
185 cpu_physical_memory_read(pt_addr
186 + l4 * 8,
187 &pte, 8);
188 pte = le64_to_cpu(pte);
189 if (pte & PG_PRESENT_MASK) {
190 print_pte(mon, env, (l0 << 48) + (l1 << 39) +
191 (l2 << 30) + (l3 << 21) + (l4 << 12),
192 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
193 }
194 }
195 }
196 }
197 }
198 }
199
tlb_info_la57(Monitor * mon,CPUArchState * env)200 static void tlb_info_la57(Monitor *mon, CPUArchState *env)
201 {
202 uint64_t l0;
203 uint64_t pml5e;
204 uint64_t pml5_addr;
205
206 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
207 for (l0 = 0; l0 < 512; l0++) {
208 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
209 pml5e = le64_to_cpu(pml5e);
210 if (pml5e & PG_PRESENT_MASK) {
211 tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
212 }
213 }
214 }
215 #endif /* TARGET_X86_64 */
216
hmp_info_tlb(Monitor * mon,const QDict * qdict)217 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
218 {
219 CPUArchState *env;
220
221 env = mon_get_cpu_env(mon);
222 if (!env) {
223 monitor_printf(mon, "No CPU available\n");
224 return;
225 }
226
227 if (!(env->cr[0] & CR0_PG_MASK)) {
228 monitor_printf(mon, "PG disabled\n");
229 return;
230 }
231 if (env->cr[4] & CR4_PAE_MASK) {
232 #ifdef TARGET_X86_64
233 if (env->hflags & HF_LMA_MASK) {
234 if (env->cr[4] & CR4_LA57_MASK) {
235 tlb_info_la57(mon, env);
236 } else {
237 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
238 }
239 } else
240 #endif
241 {
242 tlb_info_pae32(mon, env);
243 }
244 } else {
245 tlb_info_32(mon, env);
246 }
247 }
248
mem_print(Monitor * mon,CPUArchState * env,hwaddr * pstart,int * plast_prot,hwaddr end,int prot)249 static void mem_print(Monitor *mon, CPUArchState *env,
250 hwaddr *pstart, int *plast_prot,
251 hwaddr end, int prot)
252 {
253 int prot1;
254 prot1 = *plast_prot;
255 if (prot != prot1) {
256 if (*pstart != -1) {
257 monitor_printf(mon, HWADDR_FMT_plx "-" HWADDR_FMT_plx " "
258 HWADDR_FMT_plx " %c%c%c\n",
259 addr_canonical(env, *pstart),
260 addr_canonical(env, end),
261 addr_canonical(env, end - *pstart),
262 prot1 & PG_USER_MASK ? 'u' : '-',
263 'r',
264 prot1 & PG_RW_MASK ? 'w' : '-');
265 }
266 if (prot != 0)
267 *pstart = end;
268 else
269 *pstart = -1;
270 *plast_prot = prot;
271 }
272 }
273
mem_info_32(Monitor * mon,CPUArchState * env)274 static void mem_info_32(Monitor *mon, CPUArchState *env)
275 {
276 unsigned int l1, l2;
277 int prot, last_prot;
278 uint32_t pgd, pde, pte;
279 hwaddr start, end;
280
281 pgd = env->cr[3] & ~0xfff;
282 last_prot = 0;
283 start = -1;
284 for(l1 = 0; l1 < 1024; l1++) {
285 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
286 pde = le32_to_cpu(pde);
287 end = l1 << 22;
288 if (pde & PG_PRESENT_MASK) {
289 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
290 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
291 mem_print(mon, env, &start, &last_prot, end, prot);
292 } else {
293 for(l2 = 0; l2 < 1024; l2++) {
294 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
295 pte = le32_to_cpu(pte);
296 end = (l1 << 22) + (l2 << 12);
297 if (pte & PG_PRESENT_MASK) {
298 prot = pte & pde &
299 (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
300 } else {
301 prot = 0;
302 }
303 mem_print(mon, env, &start, &last_prot, end, prot);
304 }
305 }
306 } else {
307 prot = 0;
308 mem_print(mon, env, &start, &last_prot, end, prot);
309 }
310 }
311 /* Flush last range */
312 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
313 }
314
mem_info_pae32(Monitor * mon,CPUArchState * env)315 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
316 {
317 unsigned int l1, l2, l3;
318 int prot, last_prot;
319 uint64_t pdpe, pde, pte;
320 uint64_t pdp_addr, pd_addr, pt_addr;
321 hwaddr start, end;
322
323 pdp_addr = env->cr[3] & ~0x1f;
324 last_prot = 0;
325 start = -1;
326 for (l1 = 0; l1 < 4; l1++) {
327 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
328 pdpe = le64_to_cpu(pdpe);
329 end = l1 << 30;
330 if (pdpe & PG_PRESENT_MASK) {
331 pd_addr = pdpe & 0x3fffffffff000ULL;
332 for (l2 = 0; l2 < 512; l2++) {
333 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
334 pde = le64_to_cpu(pde);
335 end = (l1 << 30) + (l2 << 21);
336 if (pde & PG_PRESENT_MASK) {
337 if (pde & PG_PSE_MASK) {
338 prot = pde & (PG_USER_MASK | PG_RW_MASK |
339 PG_PRESENT_MASK);
340 mem_print(mon, env, &start, &last_prot, end, prot);
341 } else {
342 pt_addr = pde & 0x3fffffffff000ULL;
343 for (l3 = 0; l3 < 512; l3++) {
344 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
345 pte = le64_to_cpu(pte);
346 end = (l1 << 30) + (l2 << 21) + (l3 << 12);
347 if (pte & PG_PRESENT_MASK) {
348 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
349 PG_PRESENT_MASK);
350 } else {
351 prot = 0;
352 }
353 mem_print(mon, env, &start, &last_prot, end, prot);
354 }
355 }
356 } else {
357 prot = 0;
358 mem_print(mon, env, &start, &last_prot, end, prot);
359 }
360 }
361 } else {
362 prot = 0;
363 mem_print(mon, env, &start, &last_prot, end, prot);
364 }
365 }
366 /* Flush last range */
367 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
368 }
369
370
371 #ifdef TARGET_X86_64
mem_info_la48(Monitor * mon,CPUArchState * env)372 static void mem_info_la48(Monitor *mon, CPUArchState *env)
373 {
374 int prot, last_prot;
375 uint64_t l1, l2, l3, l4;
376 uint64_t pml4e, pdpe, pde, pte;
377 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
378
379 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
380 last_prot = 0;
381 start = -1;
382 for (l1 = 0; l1 < 512; l1++) {
383 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
384 pml4e = le64_to_cpu(pml4e);
385 end = l1 << 39;
386 if (pml4e & PG_PRESENT_MASK) {
387 pdp_addr = pml4e & 0x3fffffffff000ULL;
388 for (l2 = 0; l2 < 512; l2++) {
389 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
390 pdpe = le64_to_cpu(pdpe);
391 end = (l1 << 39) + (l2 << 30);
392 if (pdpe & PG_PRESENT_MASK) {
393 if (pdpe & PG_PSE_MASK) {
394 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
395 PG_PRESENT_MASK);
396 prot &= pml4e;
397 mem_print(mon, env, &start, &last_prot, end, prot);
398 } else {
399 pd_addr = pdpe & 0x3fffffffff000ULL;
400 for (l3 = 0; l3 < 512; l3++) {
401 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
402 pde = le64_to_cpu(pde);
403 end = (l1 << 39) + (l2 << 30) + (l3 << 21);
404 if (pde & PG_PRESENT_MASK) {
405 if (pde & PG_PSE_MASK) {
406 prot = pde & (PG_USER_MASK | PG_RW_MASK |
407 PG_PRESENT_MASK);
408 prot &= pml4e & pdpe;
409 mem_print(mon, env, &start,
410 &last_prot, end, prot);
411 } else {
412 pt_addr = pde & 0x3fffffffff000ULL;
413 for (l4 = 0; l4 < 512; l4++) {
414 cpu_physical_memory_read(pt_addr
415 + l4 * 8,
416 &pte, 8);
417 pte = le64_to_cpu(pte);
418 end = (l1 << 39) + (l2 << 30) +
419 (l3 << 21) + (l4 << 12);
420 if (pte & PG_PRESENT_MASK) {
421 prot = pte & (PG_USER_MASK | PG_RW_MASK |
422 PG_PRESENT_MASK);
423 prot &= pml4e & pdpe & pde;
424 } else {
425 prot = 0;
426 }
427 mem_print(mon, env, &start,
428 &last_prot, end, prot);
429 }
430 }
431 } else {
432 prot = 0;
433 mem_print(mon, env, &start,
434 &last_prot, end, prot);
435 }
436 }
437 }
438 } else {
439 prot = 0;
440 mem_print(mon, env, &start, &last_prot, end, prot);
441 }
442 }
443 } else {
444 prot = 0;
445 mem_print(mon, env, &start, &last_prot, end, prot);
446 }
447 }
448 /* Flush last range */
449 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
450 }
451
mem_info_la57(Monitor * mon,CPUArchState * env)452 static void mem_info_la57(Monitor *mon, CPUArchState *env)
453 {
454 int prot, last_prot;
455 uint64_t l0, l1, l2, l3, l4;
456 uint64_t pml5e, pml4e, pdpe, pde, pte;
457 uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
458
459 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
460 last_prot = 0;
461 start = -1;
462 for (l0 = 0; l0 < 512; l0++) {
463 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
464 pml5e = le64_to_cpu(pml5e);
465 end = l0 << 48;
466 if (!(pml5e & PG_PRESENT_MASK)) {
467 prot = 0;
468 mem_print(mon, env, &start, &last_prot, end, prot);
469 continue;
470 }
471
472 pml4_addr = pml5e & 0x3fffffffff000ULL;
473 for (l1 = 0; l1 < 512; l1++) {
474 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
475 pml4e = le64_to_cpu(pml4e);
476 end = (l0 << 48) + (l1 << 39);
477 if (!(pml4e & PG_PRESENT_MASK)) {
478 prot = 0;
479 mem_print(mon, env, &start, &last_prot, end, prot);
480 continue;
481 }
482
483 pdp_addr = pml4e & 0x3fffffffff000ULL;
484 for (l2 = 0; l2 < 512; l2++) {
485 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
486 pdpe = le64_to_cpu(pdpe);
487 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
488 if (pdpe & PG_PRESENT_MASK) {
489 prot = 0;
490 mem_print(mon, env, &start, &last_prot, end, prot);
491 continue;
492 }
493
494 if (pdpe & PG_PSE_MASK) {
495 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
496 PG_PRESENT_MASK);
497 prot &= pml5e & pml4e;
498 mem_print(mon, env, &start, &last_prot, end, prot);
499 continue;
500 }
501
502 pd_addr = pdpe & 0x3fffffffff000ULL;
503 for (l3 = 0; l3 < 512; l3++) {
504 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
505 pde = le64_to_cpu(pde);
506 end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
507 if (pde & PG_PRESENT_MASK) {
508 prot = 0;
509 mem_print(mon, env, &start, &last_prot, end, prot);
510 continue;
511 }
512
513 if (pde & PG_PSE_MASK) {
514 prot = pde & (PG_USER_MASK | PG_RW_MASK |
515 PG_PRESENT_MASK);
516 prot &= pml5e & pml4e & pdpe;
517 mem_print(mon, env, &start, &last_prot, end, prot);
518 continue;
519 }
520
521 pt_addr = pde & 0x3fffffffff000ULL;
522 for (l4 = 0; l4 < 512; l4++) {
523 cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
524 pte = le64_to_cpu(pte);
525 end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
526 (l3 << 21) + (l4 << 12);
527 if (pte & PG_PRESENT_MASK) {
528 prot = pte & (PG_USER_MASK | PG_RW_MASK |
529 PG_PRESENT_MASK);
530 prot &= pml5e & pml4e & pdpe & pde;
531 } else {
532 prot = 0;
533 }
534 mem_print(mon, env, &start, &last_prot, end, prot);
535 }
536 }
537 }
538 }
539 }
540 /* Flush last range */
541 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 57, 0);
542 }
543 #endif /* TARGET_X86_64 */
544
hmp_info_mem(Monitor * mon,const QDict * qdict)545 void hmp_info_mem(Monitor *mon, const QDict *qdict)
546 {
547 CPUArchState *env;
548
549 env = mon_get_cpu_env(mon);
550 if (!env) {
551 monitor_printf(mon, "No CPU available\n");
552 return;
553 }
554
555 if (!(env->cr[0] & CR0_PG_MASK)) {
556 monitor_printf(mon, "PG disabled\n");
557 return;
558 }
559 if (env->cr[4] & CR4_PAE_MASK) {
560 #ifdef TARGET_X86_64
561 if (env->hflags & HF_LMA_MASK) {
562 if (env->cr[4] & CR4_LA57_MASK) {
563 mem_info_la57(mon, env);
564 } else {
565 mem_info_la48(mon, env);
566 }
567 } else
568 #endif
569 {
570 mem_info_pae32(mon, env);
571 }
572 } else {
573 mem_info_32(mon, env);
574 }
575 }
576
hmp_mce(Monitor * mon,const QDict * qdict)577 void hmp_mce(Monitor *mon, const QDict *qdict)
578 {
579 X86CPU *cpu;
580 CPUState *cs;
581 int cpu_index = qdict_get_int(qdict, "cpu_index");
582 int bank = qdict_get_int(qdict, "bank");
583 uint64_t status = qdict_get_int(qdict, "status");
584 uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
585 uint64_t addr = qdict_get_int(qdict, "addr");
586 uint64_t misc = qdict_get_int(qdict, "misc");
587 int flags = MCE_INJECT_UNCOND_AO;
588
589 if (qdict_get_try_bool(qdict, "broadcast", false)) {
590 flags |= MCE_INJECT_BROADCAST;
591 }
592 cs = qemu_get_cpu(cpu_index);
593 if (cs != NULL) {
594 cpu = X86_CPU(cs);
595 cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
596 flags);
597 }
598 }
599
monitor_get_pc(Monitor * mon,const struct MonitorDef * md,int val)600 static target_long monitor_get_pc(Monitor *mon, const struct MonitorDef *md,
601 int val)
602 {
603 CPUArchState *env = mon_get_cpu_env(mon);
604 return env->eip + env->segs[R_CS].base;
605 }
606
607 const MonitorDef monitor_defs[] = {
608 #define SEG(name, seg) \
609 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
610 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
611 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
612
613 { "eax", offsetof(CPUX86State, regs[0]) },
614 { "ecx", offsetof(CPUX86State, regs[1]) },
615 { "edx", offsetof(CPUX86State, regs[2]) },
616 { "ebx", offsetof(CPUX86State, regs[3]) },
617 { "esp|sp", offsetof(CPUX86State, regs[4]) },
618 { "ebp|fp", offsetof(CPUX86State, regs[5]) },
619 { "esi", offsetof(CPUX86State, regs[6]) },
620 { "edi", offsetof(CPUX86State, regs[7]) },
621 #ifdef TARGET_X86_64
622 { "r8", offsetof(CPUX86State, regs[8]) },
623 { "r9", offsetof(CPUX86State, regs[9]) },
624 { "r10", offsetof(CPUX86State, regs[10]) },
625 { "r11", offsetof(CPUX86State, regs[11]) },
626 { "r12", offsetof(CPUX86State, regs[12]) },
627 { "r13", offsetof(CPUX86State, regs[13]) },
628 { "r14", offsetof(CPUX86State, regs[14]) },
629 { "r15", offsetof(CPUX86State, regs[15]) },
630 #endif
631 { "eflags", offsetof(CPUX86State, eflags) },
632 { "eip", offsetof(CPUX86State, eip) },
633 SEG("cs", R_CS)
634 SEG("ds", R_DS)
635 SEG("es", R_ES)
636 SEG("ss", R_SS)
637 SEG("fs", R_FS)
638 SEG("gs", R_GS)
639 { "pc", 0, monitor_get_pc, },
640 { NULL },
641 };
642
target_monitor_defs(void)643 const MonitorDef *target_monitor_defs(void)
644 {
645 return monitor_defs;
646 }
647