1 /*
2 * plugin-gen.c - TCG-related bits of plugin infrastructure
3 *
4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5 * License: GNU GPL, version 2 or later.
6 * See the COPYING file in the top-level directory.
7 *
8 * We support instrumentation at an instruction granularity. That is,
9 * if a plugin wants to instrument the memory accesses performed by a
10 * particular instruction, it can just do that instead of instrumenting
11 * all memory accesses. Thus, in order to do this we first have to
12 * translate a TB, so that plugins can decide what/where to instrument.
13 *
14 * Injecting the desired instrumentation could be done with a second
15 * translation pass that combined the instrumentation requests, but that
16 * would be ugly and inefficient since we would decode the guest code twice.
17 * Instead, during TB translation we add "plugin_cb" marker opcodes
18 * for all possible instrumentation events, and then once we collect the
19 * instrumentation requests from plugins, we generate code for those markers
20 * or remove them if they have no requests.
21 */
22 #include "qemu/osdep.h"
23 #include "qemu/plugin.h"
24 #include "qemu/log.h"
25 #include "tcg/tcg.h"
26 #include "tcg/tcg-temp-internal.h"
27 #include "tcg/tcg-op-common.h"
28 #include "exec/plugin-gen.h"
29 #include "exec/translator.h"
30 #include "exec/translation-block.h"
31
32 enum plugin_gen_from {
33 PLUGIN_GEN_FROM_TB,
34 PLUGIN_GEN_FROM_INSN,
35 PLUGIN_GEN_AFTER_INSN,
36 PLUGIN_GEN_AFTER_TB,
37 };
38
39 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
plugin_gen_disable_mem_helpers(void)40 void plugin_gen_disable_mem_helpers(void)
41 {
42 if (tcg_ctx->plugin_insn) {
43 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_TB);
44 }
45 }
46
gen_enable_mem_helper(struct qemu_plugin_tb * ptb,struct qemu_plugin_insn * insn)47 static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
48 struct qemu_plugin_insn *insn)
49 {
50 GArray *arr;
51 size_t len;
52
53 /*
54 * Tracking memory accesses performed from helpers requires extra work.
55 * If an instruction is emulated with helpers, we do two things:
56 * (1) copy the CB descriptors, and keep track of it so that they can be
57 * freed later on, and (2) point CPUState.neg.plugin_mem_cbs to the
58 * descriptors, so that we can read them at run-time
59 * (i.e. when the helper executes).
60 * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
61 *
62 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
63 * is possible that the code we generate after the instruction is
64 * dead, we also add checks before generating tb_exit etc.
65 */
66 if (!insn->calls_helpers) {
67 return;
68 }
69
70 if (!insn->mem_cbs || !insn->mem_cbs->len) {
71 insn->mem_helper = false;
72 return;
73 }
74 insn->mem_helper = true;
75 ptb->mem_helper = true;
76
77 /*
78 * TODO: It seems like we should be able to use ref/unref
79 * to avoid needing to actually copy this array.
80 * Alternately, perhaps we could allocate new memory adjacent
81 * to the TranslationBlock itself, so that we do not have to
82 * actively manage the lifetime after this.
83 */
84 len = insn->mem_cbs->len;
85 arr = g_array_sized_new(false, false,
86 sizeof(struct qemu_plugin_dyn_cb), len);
87 g_array_append_vals(arr, insn->mem_cbs->data, len);
88 qemu_plugin_add_dyn_cb_arr(arr);
89
90 tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
91 offsetof(CPUState, neg.plugin_mem_cbs) - sizeof(CPUState));
92 }
93
gen_disable_mem_helper(void)94 static void gen_disable_mem_helper(void)
95 {
96 tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
97 offsetof(CPUState, neg.plugin_mem_cbs) - sizeof(CPUState));
98 }
99
gen_cpu_index(void)100 static TCGv_i32 gen_cpu_index(void)
101 {
102 /*
103 * Optimize when we run with a single vcpu. All values using cpu_index,
104 * including scoreboard index, will be optimized out.
105 * User-mode calls tb_flush when setting this flag. In system-mode, all
106 * vcpus are created before generating code.
107 */
108 if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
109 return tcg_constant_i32(current_cpu->cpu_index);
110 }
111 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
112 tcg_gen_ld_i32(cpu_index, tcg_env,
113 offsetof(CPUState, cpu_index) - sizeof(CPUState));
114 return cpu_index;
115 }
116
gen_udata_cb(struct qemu_plugin_regular_cb * cb)117 static void gen_udata_cb(struct qemu_plugin_regular_cb *cb)
118 {
119 TCGv_i32 cpu_index = gen_cpu_index();
120 tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL,
121 tcgv_i32_temp(cpu_index),
122 tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
123 tcg_temp_free_i32(cpu_index);
124 }
125
gen_plugin_u64_ptr(qemu_plugin_u64 entry)126 static TCGv_ptr gen_plugin_u64_ptr(qemu_plugin_u64 entry)
127 {
128 TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
129
130 GArray *arr = entry.score->data;
131 char *base_ptr = arr->data + entry.offset;
132 size_t entry_size = g_array_get_element_size(arr);
133
134 TCGv_i32 cpu_index = gen_cpu_index();
135 tcg_gen_muli_i32(cpu_index, cpu_index, entry_size);
136 tcg_gen_ext_i32_ptr(ptr, cpu_index);
137 tcg_temp_free_i32(cpu_index);
138 tcg_gen_addi_ptr(ptr, ptr, (intptr_t) base_ptr);
139
140 return ptr;
141 }
142
plugin_cond_to_tcgcond(enum qemu_plugin_cond cond)143 static TCGCond plugin_cond_to_tcgcond(enum qemu_plugin_cond cond)
144 {
145 switch (cond) {
146 case QEMU_PLUGIN_COND_EQ:
147 return TCG_COND_EQ;
148 case QEMU_PLUGIN_COND_NE:
149 return TCG_COND_NE;
150 case QEMU_PLUGIN_COND_LT:
151 return TCG_COND_LTU;
152 case QEMU_PLUGIN_COND_LE:
153 return TCG_COND_LEU;
154 case QEMU_PLUGIN_COND_GT:
155 return TCG_COND_GTU;
156 case QEMU_PLUGIN_COND_GE:
157 return TCG_COND_GEU;
158 default:
159 /* ALWAYS and NEVER conditions should never reach */
160 g_assert_not_reached();
161 }
162 }
163
gen_udata_cond_cb(struct qemu_plugin_conditional_cb * cb)164 static void gen_udata_cond_cb(struct qemu_plugin_conditional_cb *cb)
165 {
166 TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
167 TCGv_i64 val = tcg_temp_ebb_new_i64();
168 TCGLabel *after_cb = gen_new_label();
169
170 /* Condition should be negated, as calling the cb is the "else" path */
171 TCGCond cond = tcg_invert_cond(plugin_cond_to_tcgcond(cb->cond));
172
173 tcg_gen_ld_i64(val, ptr, 0);
174 tcg_gen_brcondi_i64(cond, val, cb->imm, after_cb);
175 TCGv_i32 cpu_index = gen_cpu_index();
176 tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL,
177 tcgv_i32_temp(cpu_index),
178 tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
179 tcg_temp_free_i32(cpu_index);
180 gen_set_label(after_cb);
181
182 tcg_temp_free_i64(val);
183 tcg_temp_free_ptr(ptr);
184 }
185
gen_inline_add_u64_cb(struct qemu_plugin_inline_cb * cb)186 static void gen_inline_add_u64_cb(struct qemu_plugin_inline_cb *cb)
187 {
188 TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
189 TCGv_i64 val = tcg_temp_ebb_new_i64();
190
191 tcg_gen_ld_i64(val, ptr, 0);
192 tcg_gen_addi_i64(val, val, cb->imm);
193 tcg_gen_st_i64(val, ptr, 0);
194
195 tcg_temp_free_i64(val);
196 tcg_temp_free_ptr(ptr);
197 }
198
gen_inline_store_u64_cb(struct qemu_plugin_inline_cb * cb)199 static void gen_inline_store_u64_cb(struct qemu_plugin_inline_cb *cb)
200 {
201 TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
202 TCGv_i64 val = tcg_constant_i64(cb->imm);
203
204 tcg_gen_st_i64(val, ptr, 0);
205
206 tcg_temp_free_ptr(ptr);
207 }
208
gen_mem_cb(struct qemu_plugin_regular_cb * cb,qemu_plugin_meminfo_t meminfo,TCGv_i64 addr)209 static void gen_mem_cb(struct qemu_plugin_regular_cb *cb,
210 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
211 {
212 TCGv_i32 cpu_index = gen_cpu_index();
213 tcg_gen_call4(cb->f.vcpu_mem, cb->info, NULL,
214 tcgv_i32_temp(cpu_index),
215 tcgv_i32_temp(tcg_constant_i32(meminfo)),
216 tcgv_i64_temp(addr),
217 tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
218 tcg_temp_free_i32(cpu_index);
219 }
220
inject_cb(struct qemu_plugin_dyn_cb * cb)221 static void inject_cb(struct qemu_plugin_dyn_cb *cb)
222
223 {
224 switch (cb->type) {
225 case PLUGIN_CB_REGULAR:
226 gen_udata_cb(&cb->regular);
227 break;
228 case PLUGIN_CB_COND:
229 gen_udata_cond_cb(&cb->cond);
230 break;
231 case PLUGIN_CB_INLINE_ADD_U64:
232 gen_inline_add_u64_cb(&cb->inline_insn);
233 break;
234 case PLUGIN_CB_INLINE_STORE_U64:
235 gen_inline_store_u64_cb(&cb->inline_insn);
236 break;
237 default:
238 g_assert_not_reached();
239 }
240 }
241
inject_mem_cb(struct qemu_plugin_dyn_cb * cb,enum qemu_plugin_mem_rw rw,qemu_plugin_meminfo_t meminfo,TCGv_i64 addr)242 static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb,
243 enum qemu_plugin_mem_rw rw,
244 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
245 {
246 switch (cb->type) {
247 case PLUGIN_CB_MEM_REGULAR:
248 if (rw & cb->regular.rw) {
249 gen_mem_cb(&cb->regular, meminfo, addr);
250 }
251 break;
252 case PLUGIN_CB_INLINE_ADD_U64:
253 case PLUGIN_CB_INLINE_STORE_U64:
254 if (rw & cb->inline_insn.rw) {
255 inject_cb(cb);
256 }
257 break;
258 default:
259 g_assert_not_reached();
260 }
261 }
262
plugin_gen_inject(struct qemu_plugin_tb * plugin_tb)263 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
264 {
265 TCGOp *op, *next;
266 int insn_idx = -1;
267
268 if (unlikely(qemu_loglevel_mask(LOG_TB_OP_PLUGIN)
269 && qemu_log_in_addr_range(tcg_ctx->plugin_db->pc_first))) {
270 FILE *logfile = qemu_log_trylock();
271 if (logfile) {
272 fprintf(logfile, "OP before plugin injection:\n");
273 tcg_dump_ops(tcg_ctx, logfile, false);
274 fprintf(logfile, "\n");
275 qemu_log_unlock(logfile);
276 }
277 }
278
279 /*
280 * While injecting code, we cannot afford to reuse any ebb temps
281 * that might be live within the existing opcode stream.
282 * The simplest solution is to release them all and create new.
283 */
284 tcg_temp_ebb_reset_freed(tcg_ctx);
285
286 QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) {
287 switch (op->opc) {
288 case INDEX_op_insn_start:
289 insn_idx++;
290 break;
291
292 case INDEX_op_plugin_cb:
293 {
294 enum plugin_gen_from from = op->args[0];
295 struct qemu_plugin_insn *insn = NULL;
296 const GArray *cbs;
297 int i, n;
298
299 if (insn_idx >= 0) {
300 insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
301 }
302
303 tcg_ctx->emit_before_op = op;
304
305 switch (from) {
306 case PLUGIN_GEN_AFTER_TB:
307 if (plugin_tb->mem_helper) {
308 gen_disable_mem_helper();
309 }
310 break;
311
312 case PLUGIN_GEN_AFTER_INSN:
313 assert(insn != NULL);
314 if (insn->mem_helper) {
315 gen_disable_mem_helper();
316 }
317 break;
318
319 case PLUGIN_GEN_FROM_TB:
320 assert(insn == NULL);
321
322 cbs = plugin_tb->cbs;
323 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
324 inject_cb(
325 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
326 }
327 break;
328
329 case PLUGIN_GEN_FROM_INSN:
330 assert(insn != NULL);
331
332 gen_enable_mem_helper(plugin_tb, insn);
333
334 cbs = insn->insn_cbs;
335 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
336 inject_cb(
337 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
338 }
339 break;
340
341 default:
342 g_assert_not_reached();
343 }
344
345 tcg_ctx->emit_before_op = NULL;
346 tcg_op_remove(tcg_ctx, op);
347 break;
348 }
349
350 case INDEX_op_plugin_mem_cb:
351 {
352 TCGv_i64 addr = temp_tcgv_i64(arg_temp(op->args[0]));
353 qemu_plugin_meminfo_t meminfo = op->args[1];
354 enum qemu_plugin_mem_rw rw =
355 (qemu_plugin_mem_is_store(meminfo)
356 ? QEMU_PLUGIN_MEM_W : QEMU_PLUGIN_MEM_R);
357 struct qemu_plugin_insn *insn;
358 const GArray *cbs;
359 int i, n;
360
361 assert(insn_idx >= 0);
362 insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
363
364 tcg_ctx->emit_before_op = op;
365
366 cbs = insn->mem_cbs;
367 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
368 inject_mem_cb(&g_array_index(cbs, struct qemu_plugin_dyn_cb, i),
369 rw, meminfo, addr);
370 }
371
372 tcg_ctx->emit_before_op = NULL;
373 tcg_op_remove(tcg_ctx, op);
374 break;
375 }
376
377 default:
378 /* plugins don't care about any other ops */
379 break;
380 }
381 }
382 }
383
plugin_gen_tb_start(CPUState * cpu,const DisasContextBase * db)384 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db)
385 {
386 struct qemu_plugin_tb *ptb;
387
388 if (!test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS,
389 cpu->plugin_state->event_mask)) {
390 return false;
391 }
392
393 tcg_ctx->plugin_db = db;
394 tcg_ctx->plugin_insn = NULL;
395 ptb = tcg_ctx->plugin_tb;
396
397 if (ptb) {
398 /* Reset callbacks */
399 if (ptb->cbs) {
400 g_array_set_size(ptb->cbs, 0);
401 }
402 ptb->n = 0;
403 ptb->mem_helper = false;
404 } else {
405 ptb = g_new0(struct qemu_plugin_tb, 1);
406 tcg_ctx->plugin_tb = ptb;
407 ptb->insns = g_ptr_array_new();
408 }
409
410 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
411 return true;
412 }
413
plugin_gen_insn_start(CPUState * cpu,const DisasContextBase * db)414 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
415 {
416 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
417 struct qemu_plugin_insn *insn;
418 size_t n = db->num_insns;
419 vaddr pc;
420
421 assert(n >= 1);
422 ptb->n = n;
423 if (n <= ptb->insns->len) {
424 insn = g_ptr_array_index(ptb->insns, n - 1);
425 } else {
426 assert(n - 1 == ptb->insns->len);
427 insn = g_new0(struct qemu_plugin_insn, 1);
428 g_ptr_array_add(ptb->insns, insn);
429 }
430
431 tcg_ctx->plugin_insn = insn;
432 insn->calls_helpers = false;
433 insn->mem_helper = false;
434 if (insn->insn_cbs) {
435 g_array_set_size(insn->insn_cbs, 0);
436 }
437 if (insn->mem_cbs) {
438 g_array_set_size(insn->mem_cbs, 0);
439 }
440
441 pc = db->pc_next;
442 insn->vaddr = pc;
443
444 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_INSN);
445 }
446
plugin_gen_insn_end(void)447 void plugin_gen_insn_end(void)
448 {
449 const DisasContextBase *db = tcg_ctx->plugin_db;
450 struct qemu_plugin_insn *pinsn = tcg_ctx->plugin_insn;
451
452 pinsn->len = db->fake_insn ? db->record_len : db->pc_next - pinsn->vaddr;
453
454 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_INSN);
455 }
456
457 /*
458 * There are cases where we never get to finalise a translation - for
459 * example a page fault during translation. As a result we shouldn't
460 * do any clean-up here and make sure things are reset in
461 * plugin_gen_tb_start.
462 */
plugin_gen_tb_end(CPUState * cpu,size_t num_insns)463 void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
464 {
465 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
466
467 /* translator may have removed instructions, update final count */
468 g_assert(num_insns <= ptb->n);
469 ptb->n = num_insns;
470
471 /* collect instrumentation requests */
472 qemu_plugin_tb_trans_cb(cpu, ptb);
473
474 /* inject the instrumentation at the appropriate places */
475 plugin_gen_inject(ptb);
476
477 /* reset plugin translation state (plugin_tb is reused between blocks) */
478 tcg_ctx->plugin_db = NULL;
479 tcg_ctx->plugin_insn = NULL;
480 }
481