1 /* 2 * QEMU Plugin API 3 * 4 * This provides the API that is available to the plugins to interact 5 * with QEMU. We have to be careful not to expose internal details of 6 * how QEMU works so we abstract out things like translation and 7 * instructions to anonymous data types: 8 * 9 * qemu_plugin_tb 10 * qemu_plugin_insn 11 * qemu_plugin_register 12 * 13 * Which can then be passed back into the API to do additional things. 14 * As such all the public functions in here are exported in 15 * qemu-plugin.h. 16 * 17 * The general life-cycle of a plugin is: 18 * 19 * - plugin is loaded, public qemu_plugin_install called 20 * - the install func registers callbacks for events 21 * - usually an atexit_cb is registered to dump info at the end 22 * - when a registered event occurs the plugin is called 23 * - some events pass additional info 24 * - during translation the plugin can decide to instrument any 25 * instruction 26 * - when QEMU exits all the registered atexit callbacks are called 27 * 28 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 29 * Copyright (C) 2019, Linaro 30 * 31 * License: GNU GPL, version 2 or later. 32 * See the COPYING file in the top-level directory. 33 * 34 * SPDX-License-Identifier: GPL-2.0-or-later 35 * 36 */ 37 38 #include "qemu/osdep.h" 39 #include "qemu/main-loop.h" 40 #include "qemu/plugin.h" 41 #include "qemu/log.h" 42 #include "qemu/timer.h" 43 #include "tcg/tcg.h" 44 #include "exec/exec-all.h" 45 #include "exec/gdbstub.h" 46 #include "exec/target_page.h" 47 #include "exec/translation-block.h" 48 #include "exec/translator.h" 49 #include "disas/disas.h" 50 #include "plugin.h" 51 #ifndef CONFIG_USER_ONLY 52 #include "qapi/error.h" 53 #include "migration/blocker.h" 54 #include "exec/ram_addr.h" 55 #include "qemu/plugin-memory.h" 56 #include "hw/boards.h" 57 #else 58 #include "qemu.h" 59 #ifdef CONFIG_LINUX 60 #include "loader.h" 61 #endif 62 #endif 63 64 /* Uninstall and Reset handlers */ 65 66 void qemu_plugin_uninstall(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb) 67 { 68 plugin_reset_uninstall(id, cb, false); 69 } 70 71 void qemu_plugin_reset(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb) 72 { 73 plugin_reset_uninstall(id, cb, true); 74 } 75 76 /* 77 * Plugin Register Functions 78 * 79 * This allows the plugin to register callbacks for various events 80 * during the translation. 81 */ 82 83 void qemu_plugin_register_vcpu_init_cb(qemu_plugin_id_t id, 84 qemu_plugin_vcpu_simple_cb_t cb) 85 { 86 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_INIT, cb); 87 } 88 89 void qemu_plugin_register_vcpu_exit_cb(qemu_plugin_id_t id, 90 qemu_plugin_vcpu_simple_cb_t cb) 91 { 92 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_EXIT, cb); 93 } 94 95 static bool tb_is_mem_only(void) 96 { 97 return tb_cflags(tcg_ctx->gen_tb) & CF_MEMI_ONLY; 98 } 99 100 void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb, 101 qemu_plugin_vcpu_udata_cb_t cb, 102 enum qemu_plugin_cb_flags flags, 103 void *udata) 104 { 105 if (!tb_is_mem_only()) { 106 plugin_register_dyn_cb__udata(&tb->cbs, cb, flags, udata); 107 } 108 } 109 110 void qemu_plugin_register_vcpu_tb_exec_cond_cb(struct qemu_plugin_tb *tb, 111 qemu_plugin_vcpu_udata_cb_t cb, 112 enum qemu_plugin_cb_flags flags, 113 enum qemu_plugin_cond cond, 114 qemu_plugin_u64 entry, 115 uint64_t imm, 116 void *udata) 117 { 118 if (cond == QEMU_PLUGIN_COND_NEVER || tb_is_mem_only()) { 119 return; 120 } 121 if (cond == QEMU_PLUGIN_COND_ALWAYS) { 122 qemu_plugin_register_vcpu_tb_exec_cb(tb, cb, flags, udata); 123 return; 124 } 125 plugin_register_dyn_cond_cb__udata(&tb->cbs, cb, flags, 126 cond, entry, imm, udata); 127 } 128 129 void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( 130 struct qemu_plugin_tb *tb, 131 enum qemu_plugin_op op, 132 qemu_plugin_u64 entry, 133 uint64_t imm) 134 { 135 if (!tb_is_mem_only()) { 136 plugin_register_inline_op_on_entry(&tb->cbs, 0, op, entry, imm); 137 } 138 } 139 140 void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn, 141 qemu_plugin_vcpu_udata_cb_t cb, 142 enum qemu_plugin_cb_flags flags, 143 void *udata) 144 { 145 if (!tb_is_mem_only()) { 146 plugin_register_dyn_cb__udata(&insn->insn_cbs, cb, flags, udata); 147 } 148 } 149 150 void qemu_plugin_register_vcpu_insn_exec_cond_cb( 151 struct qemu_plugin_insn *insn, 152 qemu_plugin_vcpu_udata_cb_t cb, 153 enum qemu_plugin_cb_flags flags, 154 enum qemu_plugin_cond cond, 155 qemu_plugin_u64 entry, 156 uint64_t imm, 157 void *udata) 158 { 159 if (cond == QEMU_PLUGIN_COND_NEVER || tb_is_mem_only()) { 160 return; 161 } 162 if (cond == QEMU_PLUGIN_COND_ALWAYS) { 163 qemu_plugin_register_vcpu_insn_exec_cb(insn, cb, flags, udata); 164 return; 165 } 166 plugin_register_dyn_cond_cb__udata(&insn->insn_cbs, cb, flags, 167 cond, entry, imm, udata); 168 } 169 170 void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( 171 struct qemu_plugin_insn *insn, 172 enum qemu_plugin_op op, 173 qemu_plugin_u64 entry, 174 uint64_t imm) 175 { 176 if (!tb_is_mem_only()) { 177 plugin_register_inline_op_on_entry(&insn->insn_cbs, 0, op, entry, imm); 178 } 179 } 180 181 182 /* 183 * We always plant memory instrumentation because they don't finalise until 184 * after the operation has complete. 185 */ 186 void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn, 187 qemu_plugin_vcpu_mem_cb_t cb, 188 enum qemu_plugin_cb_flags flags, 189 enum qemu_plugin_mem_rw rw, 190 void *udata) 191 { 192 plugin_register_vcpu_mem_cb(&insn->mem_cbs, cb, flags, rw, udata); 193 } 194 195 void qemu_plugin_register_vcpu_mem_inline_per_vcpu( 196 struct qemu_plugin_insn *insn, 197 enum qemu_plugin_mem_rw rw, 198 enum qemu_plugin_op op, 199 qemu_plugin_u64 entry, 200 uint64_t imm) 201 { 202 plugin_register_inline_op_on_entry(&insn->mem_cbs, rw, op, entry, imm); 203 } 204 205 void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id, 206 qemu_plugin_vcpu_tb_trans_cb_t cb) 207 { 208 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_TB_TRANS, cb); 209 } 210 211 void qemu_plugin_register_vcpu_syscall_cb(qemu_plugin_id_t id, 212 qemu_plugin_vcpu_syscall_cb_t cb) 213 { 214 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_SYSCALL, cb); 215 } 216 217 void 218 qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id, 219 qemu_plugin_vcpu_syscall_ret_cb_t cb) 220 { 221 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_SYSCALL_RET, cb); 222 } 223 224 /* 225 * Plugin Queries 226 * 227 * These are queries that the plugin can make to gauge information 228 * from our opaque data types. We do not want to leak internal details 229 * here just information useful to the plugin. 230 */ 231 232 /* 233 * Translation block information: 234 * 235 * A plugin can query the virtual address of the start of the block 236 * and the number of instructions in it. It can also get access to 237 * each translated instruction. 238 */ 239 240 size_t qemu_plugin_tb_n_insns(const struct qemu_plugin_tb *tb) 241 { 242 return tb->n; 243 } 244 245 uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb) 246 { 247 const DisasContextBase *db = tcg_ctx->plugin_db; 248 return db->pc_first; 249 } 250 251 struct qemu_plugin_insn * 252 qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx) 253 { 254 struct qemu_plugin_insn *insn; 255 if (unlikely(idx >= tb->n)) { 256 return NULL; 257 } 258 insn = g_ptr_array_index(tb->insns, idx); 259 return insn; 260 } 261 262 /* 263 * Instruction information 264 * 265 * These queries allow the plugin to retrieve information about each 266 * instruction being translated. 267 */ 268 269 size_t qemu_plugin_insn_data(const struct qemu_plugin_insn *insn, 270 void *dest, size_t len) 271 { 272 const DisasContextBase *db = tcg_ctx->plugin_db; 273 274 len = MIN(len, insn->len); 275 return translator_st(db, dest, insn->vaddr, len) ? len : 0; 276 } 277 278 size_t qemu_plugin_insn_size(const struct qemu_plugin_insn *insn) 279 { 280 return insn->len; 281 } 282 283 uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn) 284 { 285 return insn->vaddr; 286 } 287 288 void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn) 289 { 290 const DisasContextBase *db = tcg_ctx->plugin_db; 291 vaddr page0_last = db->pc_first | ~qemu_target_page_mask(); 292 293 if (db->fake_insn) { 294 return NULL; 295 } 296 297 /* 298 * ??? The return value is not intended for use of host memory, 299 * but as a proxy for address space and physical address. 300 * Thus we are only interested in the first byte and do not 301 * care about spanning pages. 302 */ 303 if (insn->vaddr <= page0_last) { 304 if (db->host_addr[0] == NULL) { 305 return NULL; 306 } 307 return db->host_addr[0] + insn->vaddr - db->pc_first; 308 } else { 309 if (db->host_addr[1] == NULL) { 310 return NULL; 311 } 312 return db->host_addr[1] + insn->vaddr - (page0_last + 1); 313 } 314 } 315 316 char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn) 317 { 318 return plugin_disas(tcg_ctx->cpu, tcg_ctx->plugin_db, 319 insn->vaddr, insn->len); 320 } 321 322 const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn) 323 { 324 const char *sym = lookup_symbol(insn->vaddr); 325 return sym[0] != 0 ? sym : NULL; 326 } 327 328 /* 329 * The memory queries allow the plugin to query information about a 330 * memory access. 331 */ 332 333 unsigned qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info) 334 { 335 MemOp op = get_memop(info); 336 return op & MO_SIZE; 337 } 338 339 bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info) 340 { 341 MemOp op = get_memop(info); 342 return op & MO_SIGN; 343 } 344 345 bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info) 346 { 347 MemOp op = get_memop(info); 348 return (op & MO_BSWAP) == MO_BE; 349 } 350 351 bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info) 352 { 353 return get_plugin_meminfo_rw(info) & QEMU_PLUGIN_MEM_W; 354 } 355 356 qemu_plugin_mem_value qemu_plugin_mem_get_value(qemu_plugin_meminfo_t info) 357 { 358 uint64_t low = current_cpu->neg.plugin_mem_value_low; 359 qemu_plugin_mem_value value; 360 361 switch (qemu_plugin_mem_size_shift(info)) { 362 case 0: 363 value.type = QEMU_PLUGIN_MEM_VALUE_U8; 364 value.data.u8 = (uint8_t)low; 365 break; 366 case 1: 367 value.type = QEMU_PLUGIN_MEM_VALUE_U16; 368 value.data.u16 = (uint16_t)low; 369 break; 370 case 2: 371 value.type = QEMU_PLUGIN_MEM_VALUE_U32; 372 value.data.u32 = (uint32_t)low; 373 break; 374 case 3: 375 value.type = QEMU_PLUGIN_MEM_VALUE_U64; 376 value.data.u64 = low; 377 break; 378 case 4: 379 value.type = QEMU_PLUGIN_MEM_VALUE_U128; 380 value.data.u128.low = low; 381 value.data.u128.high = current_cpu->neg.plugin_mem_value_high; 382 break; 383 default: 384 g_assert_not_reached(); 385 } 386 return value; 387 } 388 389 /* 390 * Virtual Memory queries 391 */ 392 393 #ifdef CONFIG_SOFTMMU 394 static __thread struct qemu_plugin_hwaddr hwaddr_info; 395 #endif 396 397 struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, 398 uint64_t vaddr) 399 { 400 #ifdef CONFIG_SOFTMMU 401 CPUState *cpu = current_cpu; 402 unsigned int mmu_idx = get_mmuidx(info); 403 enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); 404 hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0; 405 406 assert(mmu_idx < NB_MMU_MODES); 407 408 if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx, 409 hwaddr_info.is_store, &hwaddr_info)) { 410 error_report("invalid use of qemu_plugin_get_hwaddr"); 411 return NULL; 412 } 413 414 return &hwaddr_info; 415 #else 416 return NULL; 417 #endif 418 } 419 420 bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr) 421 { 422 #ifdef CONFIG_SOFTMMU 423 return haddr->is_io; 424 #else 425 return false; 426 #endif 427 } 428 429 uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr) 430 { 431 #ifdef CONFIG_SOFTMMU 432 if (haddr) { 433 return haddr->phys_addr; 434 } 435 #endif 436 return 0; 437 } 438 439 const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h) 440 { 441 #ifdef CONFIG_SOFTMMU 442 if (h && h->is_io) { 443 MemoryRegion *mr = h->mr; 444 if (!mr->name) { 445 unsigned maddr = (uintptr_t)mr; 446 g_autofree char *temp = g_strdup_printf("anon%08x", maddr); 447 return g_intern_string(temp); 448 } else { 449 return g_intern_string(mr->name); 450 } 451 } else { 452 return g_intern_static_string("RAM"); 453 } 454 #else 455 return g_intern_static_string("Invalid"); 456 #endif 457 } 458 459 int qemu_plugin_num_vcpus(void) 460 { 461 return plugin_num_vcpus(); 462 } 463 464 /* 465 * Plugin output 466 */ 467 void qemu_plugin_outs(const char *string) 468 { 469 qemu_log_mask(CPU_LOG_PLUGIN, "%s", string); 470 } 471 472 bool qemu_plugin_bool_parse(const char *name, const char *value, bool *ret) 473 { 474 return name && value && qapi_bool_parse(name, value, ret, NULL); 475 } 476 477 /* 478 * Binary path, start and end locations 479 */ 480 const char *qemu_plugin_path_to_binary(void) 481 { 482 char *path = NULL; 483 #ifdef CONFIG_USER_ONLY 484 TaskState *ts = get_task_state(current_cpu); 485 path = g_strdup(ts->bprm->filename); 486 #endif 487 return path; 488 } 489 490 uint64_t qemu_plugin_start_code(void) 491 { 492 uint64_t start = 0; 493 #ifdef CONFIG_USER_ONLY 494 TaskState *ts = get_task_state(current_cpu); 495 start = ts->info->start_code; 496 #endif 497 return start; 498 } 499 500 uint64_t qemu_plugin_end_code(void) 501 { 502 uint64_t end = 0; 503 #ifdef CONFIG_USER_ONLY 504 TaskState *ts = get_task_state(current_cpu); 505 end = ts->info->end_code; 506 #endif 507 return end; 508 } 509 510 uint64_t qemu_plugin_entry_code(void) 511 { 512 uint64_t entry = 0; 513 #ifdef CONFIG_USER_ONLY 514 TaskState *ts = get_task_state(current_cpu); 515 entry = ts->info->entry; 516 #endif 517 return entry; 518 } 519 520 /* 521 * Create register handles. 522 * 523 * We need to create a handle for each register so the plugin 524 * infrastructure can call gdbstub to read a register. They are 525 * currently just a pointer encapsulation of the gdb_reg but in 526 * future may hold internal plugin state so its important plugin 527 * authors are not tempted to treat them as numbers. 528 * 529 * We also construct a result array with those handles and some 530 * ancillary data the plugin might find useful. 531 */ 532 533 static GArray *create_register_handles(GArray *gdbstub_regs) 534 { 535 GArray *find_data = g_array_new(true, true, 536 sizeof(qemu_plugin_reg_descriptor)); 537 538 for (int i = 0; i < gdbstub_regs->len; i++) { 539 GDBRegDesc *grd = &g_array_index(gdbstub_regs, GDBRegDesc, i); 540 qemu_plugin_reg_descriptor desc; 541 542 /* skip "un-named" regs */ 543 if (!grd->name) { 544 continue; 545 } 546 547 /* Create a record for the plugin */ 548 desc.handle = GINT_TO_POINTER(grd->gdb_reg + 1); 549 desc.name = g_intern_string(grd->name); 550 desc.feature = g_intern_string(grd->feature_name); 551 g_array_append_val(find_data, desc); 552 } 553 554 return find_data; 555 } 556 557 GArray *qemu_plugin_get_registers(void) 558 { 559 g_assert(current_cpu); 560 561 g_autoptr(GArray) regs = gdb_get_register_list(current_cpu); 562 return create_register_handles(regs); 563 } 564 565 bool qemu_plugin_read_memory_vaddr(uint64_t addr, GByteArray *data, size_t len) 566 { 567 g_assert(current_cpu); 568 569 if (len == 0) { 570 return false; 571 } 572 573 g_byte_array_set_size(data, len); 574 575 int result = cpu_memory_rw_debug(current_cpu, addr, data->data, 576 data->len, false); 577 578 if (result < 0) { 579 return false; 580 } 581 582 return true; 583 } 584 585 int qemu_plugin_read_register(struct qemu_plugin_register *reg, GByteArray *buf) 586 { 587 g_assert(current_cpu); 588 589 return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg) - 1); 590 } 591 592 struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size) 593 { 594 return plugin_scoreboard_new(element_size); 595 } 596 597 void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score) 598 { 599 plugin_scoreboard_free(score); 600 } 601 602 void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score, 603 unsigned int vcpu_index) 604 { 605 g_assert(vcpu_index < qemu_plugin_num_vcpus()); 606 /* we can't use g_array_index since entry size is not statically known */ 607 char *base_ptr = score->data->data; 608 return base_ptr + vcpu_index * g_array_get_element_size(score->data); 609 } 610 611 static uint64_t *plugin_u64_address(qemu_plugin_u64 entry, 612 unsigned int vcpu_index) 613 { 614 char *ptr = qemu_plugin_scoreboard_find(entry.score, vcpu_index); 615 return (uint64_t *)(ptr + entry.offset); 616 } 617 618 void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index, 619 uint64_t added) 620 { 621 *plugin_u64_address(entry, vcpu_index) += added; 622 } 623 624 uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry, 625 unsigned int vcpu_index) 626 { 627 return *plugin_u64_address(entry, vcpu_index); 628 } 629 630 void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index, 631 uint64_t val) 632 { 633 *plugin_u64_address(entry, vcpu_index) = val; 634 } 635 636 uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry) 637 { 638 uint64_t total = 0; 639 for (int i = 0, n = qemu_plugin_num_vcpus(); i < n; ++i) { 640 total += qemu_plugin_u64_get(entry, i); 641 } 642 return total; 643 } 644 645 /* 646 * Time control 647 */ 648 static bool has_control; 649 #ifdef CONFIG_SOFTMMU 650 static Error *migration_blocker; 651 #endif 652 653 const void *qemu_plugin_request_time_control(void) 654 { 655 if (!has_control) { 656 has_control = true; 657 #ifdef CONFIG_SOFTMMU 658 error_setg(&migration_blocker, 659 "TCG plugin time control does not support migration"); 660 migrate_add_blocker(&migration_blocker, NULL); 661 #endif 662 return &has_control; 663 } 664 return NULL; 665 } 666 667 #ifdef CONFIG_SOFTMMU 668 static void advance_virtual_time__async(CPUState *cpu, run_on_cpu_data data) 669 { 670 int64_t new_time = data.host_ulong; 671 qemu_clock_advance_virtual_time(new_time); 672 } 673 #endif 674 675 void qemu_plugin_update_ns(const void *handle, int64_t new_time) 676 { 677 #ifdef CONFIG_SOFTMMU 678 if (handle == &has_control) { 679 /* Need to execute out of cpu_exec, so bql can be locked. */ 680 async_run_on_cpu(current_cpu, 681 advance_virtual_time__async, 682 RUN_ON_CPU_HOST_ULONG(new_time)); 683 } 684 #endif 685 } 686