1 /* 2 * QEMU Plugin API 3 * 4 * This provides the API that is available to the plugins to interact 5 * with QEMU. We have to be careful not to expose internal details of 6 * how QEMU works so we abstract out things like translation and 7 * instructions to anonymous data types: 8 * 9 * qemu_plugin_tb 10 * qemu_plugin_insn 11 * qemu_plugin_register 12 * 13 * Which can then be passed back into the API to do additional things. 14 * As such all the public functions in here are exported in 15 * qemu-plugin.h. 16 * 17 * The general life-cycle of a plugin is: 18 * 19 * - plugin is loaded, public qemu_plugin_install called 20 * - the install func registers callbacks for events 21 * - usually an atexit_cb is registered to dump info at the end 22 * - when a registered event occurs the plugin is called 23 * - some events pass additional info 24 * - during translation the plugin can decide to instrument any 25 * instruction 26 * - when QEMU exits all the registered atexit callbacks are called 27 * 28 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 29 * Copyright (C) 2019, Linaro 30 * 31 * License: GNU GPL, version 2 or later. 32 * See the COPYING file in the top-level directory. 33 * 34 * SPDX-License-Identifier: GPL-2.0-or-later 35 * 36 */ 37 38 #include "qemu/osdep.h" 39 #include "qemu/main-loop.h" 40 #include "qemu/plugin.h" 41 #include "qemu/log.h" 42 #include "qemu/timer.h" 43 #include "tcg/tcg.h" 44 #include "exec/exec-all.h" 45 #include "exec/gdbstub.h" 46 #include "exec/translation-block.h" 47 #include "exec/translator.h" 48 #include "disas/disas.h" 49 #include "plugin.h" 50 #ifndef CONFIG_USER_ONLY 51 #include "qapi/error.h" 52 #include "migration/blocker.h" 53 #include "exec/ram_addr.h" 54 #include "qemu/plugin-memory.h" 55 #include "hw/boards.h" 56 #else 57 #include "qemu.h" 58 #ifdef CONFIG_LINUX 59 #include "loader.h" 60 #endif 61 #endif 62 63 /* Uninstall and Reset handlers */ 64 65 void qemu_plugin_uninstall(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb) 66 { 67 plugin_reset_uninstall(id, cb, false); 68 } 69 70 void qemu_plugin_reset(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb) 71 { 72 plugin_reset_uninstall(id, cb, true); 73 } 74 75 /* 76 * Plugin Register Functions 77 * 78 * This allows the plugin to register callbacks for various events 79 * during the translation. 80 */ 81 82 void qemu_plugin_register_vcpu_init_cb(qemu_plugin_id_t id, 83 qemu_plugin_vcpu_simple_cb_t cb) 84 { 85 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_INIT, cb); 86 } 87 88 void qemu_plugin_register_vcpu_exit_cb(qemu_plugin_id_t id, 89 qemu_plugin_vcpu_simple_cb_t cb) 90 { 91 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_EXIT, cb); 92 } 93 94 static bool tb_is_mem_only(void) 95 { 96 return tb_cflags(tcg_ctx->gen_tb) & CF_MEMI_ONLY; 97 } 98 99 void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb, 100 qemu_plugin_vcpu_udata_cb_t cb, 101 enum qemu_plugin_cb_flags flags, 102 void *udata) 103 { 104 if (!tb_is_mem_only()) { 105 plugin_register_dyn_cb__udata(&tb->cbs, cb, flags, udata); 106 } 107 } 108 109 void qemu_plugin_register_vcpu_tb_exec_cond_cb(struct qemu_plugin_tb *tb, 110 qemu_plugin_vcpu_udata_cb_t cb, 111 enum qemu_plugin_cb_flags flags, 112 enum qemu_plugin_cond cond, 113 qemu_plugin_u64 entry, 114 uint64_t imm, 115 void *udata) 116 { 117 if (cond == QEMU_PLUGIN_COND_NEVER || tb_is_mem_only()) { 118 return; 119 } 120 if (cond == QEMU_PLUGIN_COND_ALWAYS) { 121 qemu_plugin_register_vcpu_tb_exec_cb(tb, cb, flags, udata); 122 return; 123 } 124 plugin_register_dyn_cond_cb__udata(&tb->cbs, cb, flags, 125 cond, entry, imm, udata); 126 } 127 128 void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( 129 struct qemu_plugin_tb *tb, 130 enum qemu_plugin_op op, 131 qemu_plugin_u64 entry, 132 uint64_t imm) 133 { 134 if (!tb_is_mem_only()) { 135 plugin_register_inline_op_on_entry(&tb->cbs, 0, op, entry, imm); 136 } 137 } 138 139 void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn, 140 qemu_plugin_vcpu_udata_cb_t cb, 141 enum qemu_plugin_cb_flags flags, 142 void *udata) 143 { 144 if (!tb_is_mem_only()) { 145 plugin_register_dyn_cb__udata(&insn->insn_cbs, cb, flags, udata); 146 } 147 } 148 149 void qemu_plugin_register_vcpu_insn_exec_cond_cb( 150 struct qemu_plugin_insn *insn, 151 qemu_plugin_vcpu_udata_cb_t cb, 152 enum qemu_plugin_cb_flags flags, 153 enum qemu_plugin_cond cond, 154 qemu_plugin_u64 entry, 155 uint64_t imm, 156 void *udata) 157 { 158 if (cond == QEMU_PLUGIN_COND_NEVER || tb_is_mem_only()) { 159 return; 160 } 161 if (cond == QEMU_PLUGIN_COND_ALWAYS) { 162 qemu_plugin_register_vcpu_insn_exec_cb(insn, cb, flags, udata); 163 return; 164 } 165 plugin_register_dyn_cond_cb__udata(&insn->insn_cbs, cb, flags, 166 cond, entry, imm, udata); 167 } 168 169 void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( 170 struct qemu_plugin_insn *insn, 171 enum qemu_plugin_op op, 172 qemu_plugin_u64 entry, 173 uint64_t imm) 174 { 175 if (!tb_is_mem_only()) { 176 plugin_register_inline_op_on_entry(&insn->insn_cbs, 0, op, entry, imm); 177 } 178 } 179 180 181 /* 182 * We always plant memory instrumentation because they don't finalise until 183 * after the operation has complete. 184 */ 185 void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn, 186 qemu_plugin_vcpu_mem_cb_t cb, 187 enum qemu_plugin_cb_flags flags, 188 enum qemu_plugin_mem_rw rw, 189 void *udata) 190 { 191 plugin_register_vcpu_mem_cb(&insn->mem_cbs, cb, flags, rw, udata); 192 } 193 194 void qemu_plugin_register_vcpu_mem_inline_per_vcpu( 195 struct qemu_plugin_insn *insn, 196 enum qemu_plugin_mem_rw rw, 197 enum qemu_plugin_op op, 198 qemu_plugin_u64 entry, 199 uint64_t imm) 200 { 201 plugin_register_inline_op_on_entry(&insn->mem_cbs, rw, op, entry, imm); 202 } 203 204 void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id, 205 qemu_plugin_vcpu_tb_trans_cb_t cb) 206 { 207 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_TB_TRANS, cb); 208 } 209 210 void qemu_plugin_register_vcpu_syscall_cb(qemu_plugin_id_t id, 211 qemu_plugin_vcpu_syscall_cb_t cb) 212 { 213 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_SYSCALL, cb); 214 } 215 216 void 217 qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id, 218 qemu_plugin_vcpu_syscall_ret_cb_t cb) 219 { 220 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_SYSCALL_RET, cb); 221 } 222 223 /* 224 * Plugin Queries 225 * 226 * These are queries that the plugin can make to gauge information 227 * from our opaque data types. We do not want to leak internal details 228 * here just information useful to the plugin. 229 */ 230 231 /* 232 * Translation block information: 233 * 234 * A plugin can query the virtual address of the start of the block 235 * and the number of instructions in it. It can also get access to 236 * each translated instruction. 237 */ 238 239 size_t qemu_plugin_tb_n_insns(const struct qemu_plugin_tb *tb) 240 { 241 return tb->n; 242 } 243 244 uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb) 245 { 246 const DisasContextBase *db = tcg_ctx->plugin_db; 247 return db->pc_first; 248 } 249 250 struct qemu_plugin_insn * 251 qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx) 252 { 253 struct qemu_plugin_insn *insn; 254 if (unlikely(idx >= tb->n)) { 255 return NULL; 256 } 257 insn = g_ptr_array_index(tb->insns, idx); 258 return insn; 259 } 260 261 /* 262 * Instruction information 263 * 264 * These queries allow the plugin to retrieve information about each 265 * instruction being translated. 266 */ 267 268 size_t qemu_plugin_insn_data(const struct qemu_plugin_insn *insn, 269 void *dest, size_t len) 270 { 271 const DisasContextBase *db = tcg_ctx->plugin_db; 272 273 len = MIN(len, insn->len); 274 return translator_st(db, dest, insn->vaddr, len) ? len : 0; 275 } 276 277 size_t qemu_plugin_insn_size(const struct qemu_plugin_insn *insn) 278 { 279 return insn->len; 280 } 281 282 uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn) 283 { 284 return insn->vaddr; 285 } 286 287 void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn) 288 { 289 const DisasContextBase *db = tcg_ctx->plugin_db; 290 vaddr page0_last = db->pc_first | ~TARGET_PAGE_MASK; 291 292 if (db->fake_insn) { 293 return NULL; 294 } 295 296 /* 297 * ??? The return value is not intended for use of host memory, 298 * but as a proxy for address space and physical address. 299 * Thus we are only interested in the first byte and do not 300 * care about spanning pages. 301 */ 302 if (insn->vaddr <= page0_last) { 303 if (db->host_addr[0] == NULL) { 304 return NULL; 305 } 306 return db->host_addr[0] + insn->vaddr - db->pc_first; 307 } else { 308 if (db->host_addr[1] == NULL) { 309 return NULL; 310 } 311 return db->host_addr[1] + insn->vaddr - (page0_last + 1); 312 } 313 } 314 315 char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn) 316 { 317 return plugin_disas(tcg_ctx->cpu, tcg_ctx->plugin_db, 318 insn->vaddr, insn->len); 319 } 320 321 const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn) 322 { 323 const char *sym = lookup_symbol(insn->vaddr); 324 return sym[0] != 0 ? sym : NULL; 325 } 326 327 /* 328 * The memory queries allow the plugin to query information about a 329 * memory access. 330 */ 331 332 unsigned qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info) 333 { 334 MemOp op = get_memop(info); 335 return op & MO_SIZE; 336 } 337 338 bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info) 339 { 340 MemOp op = get_memop(info); 341 return op & MO_SIGN; 342 } 343 344 bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info) 345 { 346 MemOp op = get_memop(info); 347 return (op & MO_BSWAP) == MO_BE; 348 } 349 350 bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info) 351 { 352 return get_plugin_meminfo_rw(info) & QEMU_PLUGIN_MEM_W; 353 } 354 355 qemu_plugin_mem_value qemu_plugin_mem_get_value(qemu_plugin_meminfo_t info) 356 { 357 uint64_t low = current_cpu->neg.plugin_mem_value_low; 358 qemu_plugin_mem_value value; 359 360 switch (qemu_plugin_mem_size_shift(info)) { 361 case 0: 362 value.type = QEMU_PLUGIN_MEM_VALUE_U8; 363 value.data.u8 = (uint8_t)low; 364 break; 365 case 1: 366 value.type = QEMU_PLUGIN_MEM_VALUE_U16; 367 value.data.u16 = (uint16_t)low; 368 break; 369 case 2: 370 value.type = QEMU_PLUGIN_MEM_VALUE_U32; 371 value.data.u32 = (uint32_t)low; 372 break; 373 case 3: 374 value.type = QEMU_PLUGIN_MEM_VALUE_U64; 375 value.data.u64 = low; 376 break; 377 case 4: 378 value.type = QEMU_PLUGIN_MEM_VALUE_U128; 379 value.data.u128.low = low; 380 value.data.u128.high = current_cpu->neg.plugin_mem_value_high; 381 break; 382 default: 383 g_assert_not_reached(); 384 } 385 return value; 386 } 387 388 /* 389 * Virtual Memory queries 390 */ 391 392 #ifdef CONFIG_SOFTMMU 393 static __thread struct qemu_plugin_hwaddr hwaddr_info; 394 #endif 395 396 struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, 397 uint64_t vaddr) 398 { 399 #ifdef CONFIG_SOFTMMU 400 CPUState *cpu = current_cpu; 401 unsigned int mmu_idx = get_mmuidx(info); 402 enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); 403 hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0; 404 405 assert(mmu_idx < NB_MMU_MODES); 406 407 if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx, 408 hwaddr_info.is_store, &hwaddr_info)) { 409 error_report("invalid use of qemu_plugin_get_hwaddr"); 410 return NULL; 411 } 412 413 return &hwaddr_info; 414 #else 415 return NULL; 416 #endif 417 } 418 419 bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr) 420 { 421 #ifdef CONFIG_SOFTMMU 422 return haddr->is_io; 423 #else 424 return false; 425 #endif 426 } 427 428 uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr) 429 { 430 #ifdef CONFIG_SOFTMMU 431 if (haddr) { 432 return haddr->phys_addr; 433 } 434 #endif 435 return 0; 436 } 437 438 const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h) 439 { 440 #ifdef CONFIG_SOFTMMU 441 if (h && h->is_io) { 442 MemoryRegion *mr = h->mr; 443 if (!mr->name) { 444 unsigned maddr = (uintptr_t)mr; 445 g_autofree char *temp = g_strdup_printf("anon%08x", maddr); 446 return g_intern_string(temp); 447 } else { 448 return g_intern_string(mr->name); 449 } 450 } else { 451 return g_intern_static_string("RAM"); 452 } 453 #else 454 return g_intern_static_string("Invalid"); 455 #endif 456 } 457 458 int qemu_plugin_num_vcpus(void) 459 { 460 return plugin_num_vcpus(); 461 } 462 463 /* 464 * Plugin output 465 */ 466 void qemu_plugin_outs(const char *string) 467 { 468 qemu_log_mask(CPU_LOG_PLUGIN, "%s", string); 469 } 470 471 bool qemu_plugin_bool_parse(const char *name, const char *value, bool *ret) 472 { 473 return name && value && qapi_bool_parse(name, value, ret, NULL); 474 } 475 476 /* 477 * Binary path, start and end locations 478 */ 479 const char *qemu_plugin_path_to_binary(void) 480 { 481 char *path = NULL; 482 #ifdef CONFIG_USER_ONLY 483 TaskState *ts = get_task_state(current_cpu); 484 path = g_strdup(ts->bprm->filename); 485 #endif 486 return path; 487 } 488 489 uint64_t qemu_plugin_start_code(void) 490 { 491 uint64_t start = 0; 492 #ifdef CONFIG_USER_ONLY 493 TaskState *ts = get_task_state(current_cpu); 494 start = ts->info->start_code; 495 #endif 496 return start; 497 } 498 499 uint64_t qemu_plugin_end_code(void) 500 { 501 uint64_t end = 0; 502 #ifdef CONFIG_USER_ONLY 503 TaskState *ts = get_task_state(current_cpu); 504 end = ts->info->end_code; 505 #endif 506 return end; 507 } 508 509 uint64_t qemu_plugin_entry_code(void) 510 { 511 uint64_t entry = 0; 512 #ifdef CONFIG_USER_ONLY 513 TaskState *ts = get_task_state(current_cpu); 514 entry = ts->info->entry; 515 #endif 516 return entry; 517 } 518 519 /* 520 * Create register handles. 521 * 522 * We need to create a handle for each register so the plugin 523 * infrastructure can call gdbstub to read a register. They are 524 * currently just a pointer encapsulation of the gdb_reg but in 525 * future may hold internal plugin state so its important plugin 526 * authors are not tempted to treat them as numbers. 527 * 528 * We also construct a result array with those handles and some 529 * ancillary data the plugin might find useful. 530 */ 531 532 static GArray *create_register_handles(GArray *gdbstub_regs) 533 { 534 GArray *find_data = g_array_new(true, true, 535 sizeof(qemu_plugin_reg_descriptor)); 536 537 for (int i = 0; i < gdbstub_regs->len; i++) { 538 GDBRegDesc *grd = &g_array_index(gdbstub_regs, GDBRegDesc, i); 539 qemu_plugin_reg_descriptor desc; 540 541 /* skip "un-named" regs */ 542 if (!grd->name) { 543 continue; 544 } 545 546 /* Create a record for the plugin */ 547 desc.handle = GINT_TO_POINTER(grd->gdb_reg + 1); 548 desc.name = g_intern_string(grd->name); 549 desc.feature = g_intern_string(grd->feature_name); 550 g_array_append_val(find_data, desc); 551 } 552 553 return find_data; 554 } 555 556 GArray *qemu_plugin_get_registers(void) 557 { 558 g_assert(current_cpu); 559 560 g_autoptr(GArray) regs = gdb_get_register_list(current_cpu); 561 return create_register_handles(regs); 562 } 563 564 bool qemu_plugin_read_memory_vaddr(uint64_t addr, GByteArray *data, size_t len) 565 { 566 g_assert(current_cpu); 567 568 if (len == 0) { 569 return false; 570 } 571 572 g_byte_array_set_size(data, len); 573 574 int result = cpu_memory_rw_debug(current_cpu, addr, data->data, 575 data->len, false); 576 577 if (result < 0) { 578 return false; 579 } 580 581 return true; 582 } 583 584 int qemu_plugin_read_register(struct qemu_plugin_register *reg, GByteArray *buf) 585 { 586 g_assert(current_cpu); 587 588 return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg) - 1); 589 } 590 591 struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size) 592 { 593 return plugin_scoreboard_new(element_size); 594 } 595 596 void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score) 597 { 598 plugin_scoreboard_free(score); 599 } 600 601 void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score, 602 unsigned int vcpu_index) 603 { 604 g_assert(vcpu_index < qemu_plugin_num_vcpus()); 605 /* we can't use g_array_index since entry size is not statically known */ 606 char *base_ptr = score->data->data; 607 return base_ptr + vcpu_index * g_array_get_element_size(score->data); 608 } 609 610 static uint64_t *plugin_u64_address(qemu_plugin_u64 entry, 611 unsigned int vcpu_index) 612 { 613 char *ptr = qemu_plugin_scoreboard_find(entry.score, vcpu_index); 614 return (uint64_t *)(ptr + entry.offset); 615 } 616 617 void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index, 618 uint64_t added) 619 { 620 *plugin_u64_address(entry, vcpu_index) += added; 621 } 622 623 uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry, 624 unsigned int vcpu_index) 625 { 626 return *plugin_u64_address(entry, vcpu_index); 627 } 628 629 void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index, 630 uint64_t val) 631 { 632 *plugin_u64_address(entry, vcpu_index) = val; 633 } 634 635 uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry) 636 { 637 uint64_t total = 0; 638 for (int i = 0, n = qemu_plugin_num_vcpus(); i < n; ++i) { 639 total += qemu_plugin_u64_get(entry, i); 640 } 641 return total; 642 } 643 644 /* 645 * Time control 646 */ 647 static bool has_control; 648 #ifdef CONFIG_SOFTMMU 649 static Error *migration_blocker; 650 #endif 651 652 const void *qemu_plugin_request_time_control(void) 653 { 654 if (!has_control) { 655 has_control = true; 656 #ifdef CONFIG_SOFTMMU 657 error_setg(&migration_blocker, 658 "TCG plugin time control does not support migration"); 659 migrate_add_blocker(&migration_blocker, NULL); 660 #endif 661 return &has_control; 662 } 663 return NULL; 664 } 665 666 #ifdef CONFIG_SOFTMMU 667 static void advance_virtual_time__async(CPUState *cpu, run_on_cpu_data data) 668 { 669 int64_t new_time = data.host_ulong; 670 qemu_clock_advance_virtual_time(new_time); 671 } 672 #endif 673 674 void qemu_plugin_update_ns(const void *handle, int64_t new_time) 675 { 676 #ifdef CONFIG_SOFTMMU 677 if (handle == &has_control) { 678 /* Need to execute out of cpu_exec, so bql can be locked. */ 679 async_run_on_cpu(current_cpu, 680 advance_virtual_time__async, 681 RUN_ON_CPU_HOST_ULONG(new_time)); 682 } 683 #endif 684 } 685