1 /* 2 * QEMU Plugin API 3 * 4 * This provides the API that is available to the plugins to interact 5 * with QEMU. We have to be careful not to expose internal details of 6 * how QEMU works so we abstract out things like translation and 7 * instructions to anonymous data types: 8 * 9 * qemu_plugin_tb 10 * qemu_plugin_insn 11 * qemu_plugin_register 12 * 13 * Which can then be passed back into the API to do additional things. 14 * As such all the public functions in here are exported in 15 * qemu-plugin.h. 16 * 17 * The general life-cycle of a plugin is: 18 * 19 * - plugin is loaded, public qemu_plugin_install called 20 * - the install func registers callbacks for events 21 * - usually an atexit_cb is registered to dump info at the end 22 * - when a registered event occurs the plugin is called 23 * - some events pass additional info 24 * - during translation the plugin can decide to instrument any 25 * instruction 26 * - when QEMU exits all the registered atexit callbacks are called 27 * 28 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 29 * Copyright (C) 2019, Linaro 30 * 31 * License: GNU GPL, version 2 or later. 32 * See the COPYING file in the top-level directory. 33 * 34 * SPDX-License-Identifier: GPL-2.0-or-later 35 * 36 */ 37 38 #include "qemu/osdep.h" 39 #include "qemu/main-loop.h" 40 #include "qemu/plugin.h" 41 #include "qemu/log.h" 42 #include "tcg/tcg.h" 43 #include "exec/gdbstub.h" 44 #include "exec/target_page.h" 45 #include "exec/translation-block.h" 46 #include "exec/translator.h" 47 #include "disas/disas.h" 48 #include "plugin.h" 49 #ifndef CONFIG_USER_ONLY 50 #include "qapi/error.h" 51 #include "migration/blocker.h" 52 #include "qemu/plugin-memory.h" 53 #include "hw/boards.h" 54 #else 55 #include "qemu.h" 56 #ifdef CONFIG_LINUX 57 #include "loader.h" 58 #endif 59 #endif 60 61 /* Uninstall and Reset handlers */ 62 63 void qemu_plugin_uninstall(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb) 64 { 65 plugin_reset_uninstall(id, cb, false); 66 } 67 68 void qemu_plugin_reset(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb) 69 { 70 plugin_reset_uninstall(id, cb, true); 71 } 72 73 /* 74 * Plugin Register Functions 75 * 76 * This allows the plugin to register callbacks for various events 77 * during the translation. 78 */ 79 80 void qemu_plugin_register_vcpu_init_cb(qemu_plugin_id_t id, 81 qemu_plugin_vcpu_simple_cb_t cb) 82 { 83 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_INIT, cb); 84 } 85 86 void qemu_plugin_register_vcpu_exit_cb(qemu_plugin_id_t id, 87 qemu_plugin_vcpu_simple_cb_t cb) 88 { 89 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_EXIT, cb); 90 } 91 92 static bool tb_is_mem_only(void) 93 { 94 return tb_cflags(tcg_ctx->gen_tb) & CF_MEMI_ONLY; 95 } 96 97 void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb, 98 qemu_plugin_vcpu_udata_cb_t cb, 99 enum qemu_plugin_cb_flags flags, 100 void *udata) 101 { 102 if (!tb_is_mem_only()) { 103 plugin_register_dyn_cb__udata(&tb->cbs, cb, flags, udata); 104 } 105 } 106 107 void qemu_plugin_register_vcpu_tb_exec_cond_cb(struct qemu_plugin_tb *tb, 108 qemu_plugin_vcpu_udata_cb_t cb, 109 enum qemu_plugin_cb_flags flags, 110 enum qemu_plugin_cond cond, 111 qemu_plugin_u64 entry, 112 uint64_t imm, 113 void *udata) 114 { 115 if (cond == QEMU_PLUGIN_COND_NEVER || tb_is_mem_only()) { 116 return; 117 } 118 if (cond == QEMU_PLUGIN_COND_ALWAYS) { 119 qemu_plugin_register_vcpu_tb_exec_cb(tb, cb, flags, udata); 120 return; 121 } 122 plugin_register_dyn_cond_cb__udata(&tb->cbs, cb, flags, 123 cond, entry, imm, udata); 124 } 125 126 void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( 127 struct qemu_plugin_tb *tb, 128 enum qemu_plugin_op op, 129 qemu_plugin_u64 entry, 130 uint64_t imm) 131 { 132 if (!tb_is_mem_only()) { 133 plugin_register_inline_op_on_entry(&tb->cbs, 0, op, entry, imm); 134 } 135 } 136 137 void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn, 138 qemu_plugin_vcpu_udata_cb_t cb, 139 enum qemu_plugin_cb_flags flags, 140 void *udata) 141 { 142 if (!tb_is_mem_only()) { 143 plugin_register_dyn_cb__udata(&insn->insn_cbs, cb, flags, udata); 144 } 145 } 146 147 void qemu_plugin_register_vcpu_insn_exec_cond_cb( 148 struct qemu_plugin_insn *insn, 149 qemu_plugin_vcpu_udata_cb_t cb, 150 enum qemu_plugin_cb_flags flags, 151 enum qemu_plugin_cond cond, 152 qemu_plugin_u64 entry, 153 uint64_t imm, 154 void *udata) 155 { 156 if (cond == QEMU_PLUGIN_COND_NEVER || tb_is_mem_only()) { 157 return; 158 } 159 if (cond == QEMU_PLUGIN_COND_ALWAYS) { 160 qemu_plugin_register_vcpu_insn_exec_cb(insn, cb, flags, udata); 161 return; 162 } 163 plugin_register_dyn_cond_cb__udata(&insn->insn_cbs, cb, flags, 164 cond, entry, imm, udata); 165 } 166 167 void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( 168 struct qemu_plugin_insn *insn, 169 enum qemu_plugin_op op, 170 qemu_plugin_u64 entry, 171 uint64_t imm) 172 { 173 if (!tb_is_mem_only()) { 174 plugin_register_inline_op_on_entry(&insn->insn_cbs, 0, op, entry, imm); 175 } 176 } 177 178 179 /* 180 * We always plant memory instrumentation because they don't finalise until 181 * after the operation has complete. 182 */ 183 void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn, 184 qemu_plugin_vcpu_mem_cb_t cb, 185 enum qemu_plugin_cb_flags flags, 186 enum qemu_plugin_mem_rw rw, 187 void *udata) 188 { 189 plugin_register_vcpu_mem_cb(&insn->mem_cbs, cb, flags, rw, udata); 190 } 191 192 void qemu_plugin_register_vcpu_mem_inline_per_vcpu( 193 struct qemu_plugin_insn *insn, 194 enum qemu_plugin_mem_rw rw, 195 enum qemu_plugin_op op, 196 qemu_plugin_u64 entry, 197 uint64_t imm) 198 { 199 plugin_register_inline_op_on_entry(&insn->mem_cbs, rw, op, entry, imm); 200 } 201 202 void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id, 203 qemu_plugin_vcpu_tb_trans_cb_t cb) 204 { 205 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_TB_TRANS, cb); 206 } 207 208 void qemu_plugin_register_vcpu_syscall_cb(qemu_plugin_id_t id, 209 qemu_plugin_vcpu_syscall_cb_t cb) 210 { 211 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_SYSCALL, cb); 212 } 213 214 void 215 qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id, 216 qemu_plugin_vcpu_syscall_ret_cb_t cb) 217 { 218 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_SYSCALL_RET, cb); 219 } 220 221 /* 222 * Plugin Queries 223 * 224 * These are queries that the plugin can make to gauge information 225 * from our opaque data types. We do not want to leak internal details 226 * here just information useful to the plugin. 227 */ 228 229 /* 230 * Translation block information: 231 * 232 * A plugin can query the virtual address of the start of the block 233 * and the number of instructions in it. It can also get access to 234 * each translated instruction. 235 */ 236 237 size_t qemu_plugin_tb_n_insns(const struct qemu_plugin_tb *tb) 238 { 239 return tb->n; 240 } 241 242 uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb) 243 { 244 const DisasContextBase *db = tcg_ctx->plugin_db; 245 return db->pc_first; 246 } 247 248 struct qemu_plugin_insn * 249 qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx) 250 { 251 struct qemu_plugin_insn *insn; 252 if (unlikely(idx >= tb->n)) { 253 return NULL; 254 } 255 insn = g_ptr_array_index(tb->insns, idx); 256 return insn; 257 } 258 259 /* 260 * Instruction information 261 * 262 * These queries allow the plugin to retrieve information about each 263 * instruction being translated. 264 */ 265 266 size_t qemu_plugin_insn_data(const struct qemu_plugin_insn *insn, 267 void *dest, size_t len) 268 { 269 const DisasContextBase *db = tcg_ctx->plugin_db; 270 271 len = MIN(len, insn->len); 272 return translator_st(db, dest, insn->vaddr, len) ? len : 0; 273 } 274 275 size_t qemu_plugin_insn_size(const struct qemu_plugin_insn *insn) 276 { 277 return insn->len; 278 } 279 280 uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn) 281 { 282 return insn->vaddr; 283 } 284 285 void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn) 286 { 287 const DisasContextBase *db = tcg_ctx->plugin_db; 288 vaddr page0_last = db->pc_first | ~qemu_target_page_mask(); 289 290 if (db->fake_insn) { 291 return NULL; 292 } 293 294 /* 295 * ??? The return value is not intended for use of host memory, 296 * but as a proxy for address space and physical address. 297 * Thus we are only interested in the first byte and do not 298 * care about spanning pages. 299 */ 300 if (insn->vaddr <= page0_last) { 301 if (db->host_addr[0] == NULL) { 302 return NULL; 303 } 304 return db->host_addr[0] + insn->vaddr - db->pc_first; 305 } else { 306 if (db->host_addr[1] == NULL) { 307 return NULL; 308 } 309 return db->host_addr[1] + insn->vaddr - (page0_last + 1); 310 } 311 } 312 313 char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn) 314 { 315 return plugin_disas(tcg_ctx->cpu, tcg_ctx->plugin_db, 316 insn->vaddr, insn->len); 317 } 318 319 const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn) 320 { 321 const char *sym = lookup_symbol(insn->vaddr); 322 return sym[0] != 0 ? sym : NULL; 323 } 324 325 /* 326 * The memory queries allow the plugin to query information about a 327 * memory access. 328 */ 329 330 unsigned qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info) 331 { 332 MemOp op = get_memop(info); 333 return op & MO_SIZE; 334 } 335 336 bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info) 337 { 338 MemOp op = get_memop(info); 339 return op & MO_SIGN; 340 } 341 342 bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info) 343 { 344 MemOp op = get_memop(info); 345 return (op & MO_BSWAP) == MO_BE; 346 } 347 348 bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info) 349 { 350 return get_plugin_meminfo_rw(info) & QEMU_PLUGIN_MEM_W; 351 } 352 353 qemu_plugin_mem_value qemu_plugin_mem_get_value(qemu_plugin_meminfo_t info) 354 { 355 uint64_t low = current_cpu->neg.plugin_mem_value_low; 356 qemu_plugin_mem_value value; 357 358 switch (qemu_plugin_mem_size_shift(info)) { 359 case 0: 360 value.type = QEMU_PLUGIN_MEM_VALUE_U8; 361 value.data.u8 = (uint8_t)low; 362 break; 363 case 1: 364 value.type = QEMU_PLUGIN_MEM_VALUE_U16; 365 value.data.u16 = (uint16_t)low; 366 break; 367 case 2: 368 value.type = QEMU_PLUGIN_MEM_VALUE_U32; 369 value.data.u32 = (uint32_t)low; 370 break; 371 case 3: 372 value.type = QEMU_PLUGIN_MEM_VALUE_U64; 373 value.data.u64 = low; 374 break; 375 case 4: 376 value.type = QEMU_PLUGIN_MEM_VALUE_U128; 377 value.data.u128.low = low; 378 value.data.u128.high = current_cpu->neg.plugin_mem_value_high; 379 break; 380 default: 381 g_assert_not_reached(); 382 } 383 return value; 384 } 385 386 /* 387 * Virtual Memory queries 388 */ 389 390 #ifdef CONFIG_SOFTMMU 391 static __thread struct qemu_plugin_hwaddr hwaddr_info; 392 #endif 393 394 struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, 395 uint64_t vaddr) 396 { 397 #ifdef CONFIG_SOFTMMU 398 CPUState *cpu = current_cpu; 399 unsigned int mmu_idx = get_mmuidx(info); 400 enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); 401 hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0; 402 403 assert(mmu_idx < NB_MMU_MODES); 404 405 if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx, 406 hwaddr_info.is_store, &hwaddr_info)) { 407 error_report("invalid use of qemu_plugin_get_hwaddr"); 408 return NULL; 409 } 410 411 return &hwaddr_info; 412 #else 413 return NULL; 414 #endif 415 } 416 417 bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr) 418 { 419 #ifdef CONFIG_SOFTMMU 420 return haddr->is_io; 421 #else 422 return false; 423 #endif 424 } 425 426 uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr) 427 { 428 #ifdef CONFIG_SOFTMMU 429 if (haddr) { 430 return haddr->phys_addr; 431 } 432 #endif 433 return 0; 434 } 435 436 const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h) 437 { 438 #ifdef CONFIG_SOFTMMU 439 if (h && h->is_io) { 440 MemoryRegion *mr = h->mr; 441 if (!mr->name) { 442 unsigned maddr = (uintptr_t)mr; 443 g_autofree char *temp = g_strdup_printf("anon%08x", maddr); 444 return g_intern_string(temp); 445 } else { 446 return g_intern_string(mr->name); 447 } 448 } else { 449 return g_intern_static_string("RAM"); 450 } 451 #else 452 return g_intern_static_string("Invalid"); 453 #endif 454 } 455 456 int qemu_plugin_num_vcpus(void) 457 { 458 return plugin_num_vcpus(); 459 } 460 461 /* 462 * Plugin output 463 */ 464 void qemu_plugin_outs(const char *string) 465 { 466 qemu_log_mask(CPU_LOG_PLUGIN, "%s", string); 467 } 468 469 bool qemu_plugin_bool_parse(const char *name, const char *value, bool *ret) 470 { 471 return name && value && qapi_bool_parse(name, value, ret, NULL); 472 } 473 474 /* 475 * Binary path, start and end locations 476 */ 477 const char *qemu_plugin_path_to_binary(void) 478 { 479 char *path = NULL; 480 #ifdef CONFIG_USER_ONLY 481 TaskState *ts = get_task_state(current_cpu); 482 path = g_strdup(ts->bprm->filename); 483 #endif 484 return path; 485 } 486 487 uint64_t qemu_plugin_start_code(void) 488 { 489 uint64_t start = 0; 490 #ifdef CONFIG_USER_ONLY 491 TaskState *ts = get_task_state(current_cpu); 492 start = ts->info->start_code; 493 #endif 494 return start; 495 } 496 497 uint64_t qemu_plugin_end_code(void) 498 { 499 uint64_t end = 0; 500 #ifdef CONFIG_USER_ONLY 501 TaskState *ts = get_task_state(current_cpu); 502 end = ts->info->end_code; 503 #endif 504 return end; 505 } 506 507 uint64_t qemu_plugin_entry_code(void) 508 { 509 uint64_t entry = 0; 510 #ifdef CONFIG_USER_ONLY 511 TaskState *ts = get_task_state(current_cpu); 512 entry = ts->info->entry; 513 #endif 514 return entry; 515 } 516 517 /* 518 * Create register handles. 519 * 520 * We need to create a handle for each register so the plugin 521 * infrastructure can call gdbstub to read a register. They are 522 * currently just a pointer encapsulation of the gdb_reg but in 523 * future may hold internal plugin state so its important plugin 524 * authors are not tempted to treat them as numbers. 525 * 526 * We also construct a result array with those handles and some 527 * ancillary data the plugin might find useful. 528 */ 529 530 static GArray *create_register_handles(GArray *gdbstub_regs) 531 { 532 GArray *find_data = g_array_new(true, true, 533 sizeof(qemu_plugin_reg_descriptor)); 534 535 for (int i = 0; i < gdbstub_regs->len; i++) { 536 GDBRegDesc *grd = &g_array_index(gdbstub_regs, GDBRegDesc, i); 537 qemu_plugin_reg_descriptor desc; 538 539 /* skip "un-named" regs */ 540 if (!grd->name) { 541 continue; 542 } 543 544 /* Create a record for the plugin */ 545 desc.handle = GINT_TO_POINTER(grd->gdb_reg + 1); 546 desc.name = g_intern_string(grd->name); 547 desc.feature = g_intern_string(grd->feature_name); 548 g_array_append_val(find_data, desc); 549 } 550 551 return find_data; 552 } 553 554 GArray *qemu_plugin_get_registers(void) 555 { 556 g_assert(current_cpu); 557 558 g_autoptr(GArray) regs = gdb_get_register_list(current_cpu); 559 return create_register_handles(regs); 560 } 561 562 bool qemu_plugin_read_memory_vaddr(uint64_t addr, GByteArray *data, size_t len) 563 { 564 g_assert(current_cpu); 565 566 if (len == 0) { 567 return false; 568 } 569 570 g_byte_array_set_size(data, len); 571 572 int result = cpu_memory_rw_debug(current_cpu, addr, data->data, 573 data->len, false); 574 575 if (result < 0) { 576 return false; 577 } 578 579 return true; 580 } 581 582 int qemu_plugin_read_register(struct qemu_plugin_register *reg, GByteArray *buf) 583 { 584 g_assert(current_cpu); 585 586 return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg) - 1); 587 } 588 589 struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size) 590 { 591 return plugin_scoreboard_new(element_size); 592 } 593 594 void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score) 595 { 596 plugin_scoreboard_free(score); 597 } 598 599 void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score, 600 unsigned int vcpu_index) 601 { 602 g_assert(vcpu_index < qemu_plugin_num_vcpus()); 603 /* we can't use g_array_index since entry size is not statically known */ 604 char *base_ptr = score->data->data; 605 return base_ptr + vcpu_index * g_array_get_element_size(score->data); 606 } 607 608 static uint64_t *plugin_u64_address(qemu_plugin_u64 entry, 609 unsigned int vcpu_index) 610 { 611 char *ptr = qemu_plugin_scoreboard_find(entry.score, vcpu_index); 612 return (uint64_t *)(ptr + entry.offset); 613 } 614 615 void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index, 616 uint64_t added) 617 { 618 *plugin_u64_address(entry, vcpu_index) += added; 619 } 620 621 uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry, 622 unsigned int vcpu_index) 623 { 624 return *plugin_u64_address(entry, vcpu_index); 625 } 626 627 void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index, 628 uint64_t val) 629 { 630 *plugin_u64_address(entry, vcpu_index) = val; 631 } 632 633 uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry) 634 { 635 uint64_t total = 0; 636 for (int i = 0, n = qemu_plugin_num_vcpus(); i < n; ++i) { 637 total += qemu_plugin_u64_get(entry, i); 638 } 639 return total; 640 } 641 642 /* 643 * Time control 644 */ 645 static bool has_control; 646 #ifdef CONFIG_SOFTMMU 647 static Error *migration_blocker; 648 #endif 649 650 const void *qemu_plugin_request_time_control(void) 651 { 652 if (!has_control) { 653 has_control = true; 654 #ifdef CONFIG_SOFTMMU 655 error_setg(&migration_blocker, 656 "TCG plugin time control does not support migration"); 657 migrate_add_blocker(&migration_blocker, NULL); 658 #endif 659 return &has_control; 660 } 661 return NULL; 662 } 663 664 #ifdef CONFIG_SOFTMMU 665 static void advance_virtual_time__async(CPUState *cpu, run_on_cpu_data data) 666 { 667 int64_t new_time = data.host_ulong; 668 qemu_clock_advance_virtual_time(new_time); 669 } 670 #endif 671 672 void qemu_plugin_update_ns(const void *handle, int64_t new_time) 673 { 674 #ifdef CONFIG_SOFTMMU 675 if (handle == &has_control) { 676 /* Need to execute out of cpu_exec, so bql can be locked. */ 677 async_run_on_cpu(current_cpu, 678 advance_virtual_time__async, 679 RUN_ON_CPU_HOST_ULONG(new_time)); 680 } 681 #endif 682 } 683