xref: /qemu/accel/tcg/plugin-gen.c (revision ac977170bf1e89fce25197ad54f04d9ec1f6a2b6)
1 /*
2  * plugin-gen.c - TCG-related bits of plugin infrastructure
3  *
4  * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5  * License: GNU GPL, version 2 or later.
6  *   See the COPYING file in the top-level directory.
7  *
8  * We support instrumentation at an instruction granularity. That is,
9  * if a plugin wants to instrument the memory accesses performed by a
10  * particular instruction, it can just do that instead of instrumenting
11  * all memory accesses. Thus, in order to do this we first have to
12  * translate a TB, so that plugins can decide what/where to instrument.
13  *
14  * Injecting the desired instrumentation could be done with a second
15  * translation pass that combined the instrumentation requests, but that
16  * would be ugly and inefficient since we would decode the guest code twice.
17  * Instead, during TB translation we add "empty" instrumentation calls for all
18  * possible instrumentation events, and then once we collect the instrumentation
19  * requests from plugins, we either "fill in" those empty events or remove them
20  * if they have no requests.
21  *
22  * When "filling in" an event we first copy the empty callback's TCG ops. This
23  * might seem unnecessary, but it is done to support an arbitrary number
24  * of callbacks per event. Take for example a regular instruction callback.
25  * We first generate a callback to an empty helper function. Then, if two
26  * plugins register one callback each for this instruction, we make two copies
27  * of the TCG ops generated for the empty callback, substituting the function
28  * pointer that points to the empty helper function with the plugins' desired
29  * callback functions. After that we remove the empty callback's ops.
30  *
31  * Note that the location in TCGOp.args[] of the pointer to a helper function
32  * varies across different guest and host architectures. Instead of duplicating
33  * the logic that figures this out, we rely on the fact that the empty
34  * callbacks point to empty functions that are unique pointers in the program.
35  * Thus, to find the right location we just have to look for a match in
36  * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37  * TCG ops and then fill them in; regardless of whether we have one or many
38  * callbacks for that event, the logic to add all of them is the same.
39  *
40  * When generating more than one callback per event, we make a small
41  * optimization to avoid generating redundant operations. For instance, for the
42  * second and all subsequent callbacks of an event, we do not need to reload the
43  * CPU's index into a TCG temp, since the first callback did it already.
44  */
45 #include "qemu/osdep.h"
46 #include "qemu/plugin.h"
47 #include "cpu.h"
48 #include "tcg/tcg.h"
49 #include "tcg/tcg-temp-internal.h"
50 #include "tcg/tcg-op.h"
51 #include "exec/exec-all.h"
52 #include "exec/plugin-gen.h"
53 #include "exec/translator.h"
54 #include "exec/helper-proto-common.h"
55 
56 #define HELPER_H  "accel/tcg/plugin-helpers.h"
57 #include "exec/helper-info.c.inc"
58 #undef  HELPER_H
59 
60 /*
61  * plugin_cb_start TCG op args[]:
62  * 0: enum plugin_gen_from
63  * 1: enum plugin_gen_cb
64  * 2: set to 1 for mem callback that is a write, 0 otherwise.
65  */
66 
67 enum plugin_gen_from {
68     PLUGIN_GEN_FROM_TB,
69     PLUGIN_GEN_FROM_INSN,
70     PLUGIN_GEN_FROM_MEM,
71     PLUGIN_GEN_AFTER_INSN,
72     PLUGIN_GEN_AFTER_TB,
73     PLUGIN_GEN_N_FROMS,
74 };
75 
76 enum plugin_gen_cb {
77     PLUGIN_GEN_CB_UDATA,
78     PLUGIN_GEN_CB_UDATA_R,
79     PLUGIN_GEN_CB_INLINE,
80     PLUGIN_GEN_CB_MEM,
81     PLUGIN_GEN_ENABLE_MEM_HELPER,
82     PLUGIN_GEN_DISABLE_MEM_HELPER,
83     PLUGIN_GEN_N_CBS,
84 };
85 
86 /*
87  * These helpers are stubs that get dynamically switched out for calls
88  * direct to the plugin if they are subscribed to.
89  */
90 void HELPER(plugin_vcpu_udata_cb_no_wg)(uint32_t cpu_index, void *udata)
91 { }
92 
93 void HELPER(plugin_vcpu_udata_cb_no_rwg)(uint32_t cpu_index, void *udata)
94 { }
95 
96 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
97                                 qemu_plugin_meminfo_t info, uint64_t vaddr,
98                                 void *userdata)
99 { }
100 
101 /*
102  * For now we only support addi_i64.
103  * When we support more ops, we can generate one empty inline cb for each.
104  */
105 static void gen_empty_inline_cb(void)
106 {
107     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
108     TCGv_ptr cpu_index_as_ptr = tcg_temp_ebb_new_ptr();
109     TCGv_i64 val = tcg_temp_ebb_new_i64();
110     TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
111 
112     tcg_gen_ld_i32(cpu_index, tcg_env,
113                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
114     /* second operand will be replaced by immediate value */
115     tcg_gen_mul_i32(cpu_index, cpu_index, cpu_index);
116     tcg_gen_ext_i32_ptr(cpu_index_as_ptr, cpu_index);
117 
118     tcg_gen_movi_ptr(ptr, 0);
119     tcg_gen_add_ptr(ptr, ptr, cpu_index_as_ptr);
120     tcg_gen_ld_i64(val, ptr, 0);
121     /* second operand will be replaced by immediate value */
122     tcg_gen_add_i64(val, val, val);
123 
124     tcg_gen_st_i64(val, ptr, 0);
125     tcg_temp_free_ptr(ptr);
126     tcg_temp_free_i64(val);
127     tcg_temp_free_ptr(cpu_index_as_ptr);
128     tcg_temp_free_i32(cpu_index);
129 }
130 
131 static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
132 {
133     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
134     TCGv_i32 meminfo = tcg_temp_ebb_new_i32();
135     TCGv_ptr udata = tcg_temp_ebb_new_ptr();
136 
137     tcg_gen_movi_i32(meminfo, info);
138     tcg_gen_movi_ptr(udata, 0);
139     tcg_gen_ld_i32(cpu_index, tcg_env,
140                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
141 
142     gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
143 
144     tcg_temp_free_ptr(udata);
145     tcg_temp_free_i32(meminfo);
146     tcg_temp_free_i32(cpu_index);
147 }
148 
149 static void gen_plugin_cb_start(enum plugin_gen_from from,
150                                 enum plugin_gen_cb type, unsigned wr)
151 {
152     tcg_gen_plugin_cb_start(from, type, wr);
153 }
154 
155 static void plugin_gen_empty_callback(enum plugin_gen_from from)
156 {
157     switch (from) {
158     case PLUGIN_GEN_AFTER_INSN:
159     case PLUGIN_GEN_FROM_TB:
160     case PLUGIN_GEN_FROM_INSN:
161         tcg_gen_plugin_cb(from);
162         break;
163     default:
164         g_assert_not_reached();
165     }
166 }
167 
168 void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
169 {
170     enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
171 
172     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw);
173     gen_empty_mem_cb(addr, info);
174     tcg_gen_plugin_cb_end();
175 
176     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw);
177     gen_empty_inline_cb();
178     tcg_gen_plugin_cb_end();
179 }
180 
181 static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
182 {
183     while (op) {
184         if (op->opc == opc) {
185             return op;
186         }
187         op = QTAILQ_NEXT(op, link);
188     }
189     return NULL;
190 }
191 
192 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end)
193 {
194     TCGOp *ret = QTAILQ_NEXT(end, link);
195 
196     QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link);
197     return ret;
198 }
199 
200 /* remove all ops until (and including) plugin_cb_end */
201 static TCGOp *rm_ops(TCGOp *op)
202 {
203     TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end);
204 
205     tcg_debug_assert(end_op);
206     return rm_ops_range(op, end_op);
207 }
208 
209 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
210 {
211     TCGOp *old_op = QTAILQ_NEXT(*begin_op, link);
212     unsigned nargs = old_op->nargs;
213 
214     *begin_op = old_op;
215     op = tcg_op_insert_after(tcg_ctx, op, old_op->opc, nargs);
216     memcpy(op->args, old_op->args, sizeof(op->args[0]) * nargs);
217 
218     return op;
219 }
220 
221 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
222 {
223     op = copy_op_nocheck(begin_op, op);
224     tcg_debug_assert((*begin_op)->opc == opc);
225     return op;
226 }
227 
228 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
229 {
230     if (UINTPTR_MAX == UINT32_MAX) {
231         /* mov_i32 */
232         op = copy_op(begin_op, op, INDEX_op_mov_i32);
233         op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
234     } else {
235         /* mov_i64 */
236         op = copy_op(begin_op, op, INDEX_op_mov_i64);
237         op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
238     }
239     return op;
240 }
241 
242 static TCGOp *copy_ld_i32(TCGOp **begin_op, TCGOp *op)
243 {
244     return copy_op(begin_op, op, INDEX_op_ld_i32);
245 }
246 
247 static TCGOp *copy_ext_i32_ptr(TCGOp **begin_op, TCGOp *op)
248 {
249     if (UINTPTR_MAX == UINT32_MAX) {
250         op = copy_op(begin_op, op, INDEX_op_mov_i32);
251     } else {
252         op = copy_op(begin_op, op, INDEX_op_ext_i32_i64);
253     }
254     return op;
255 }
256 
257 static TCGOp *copy_add_ptr(TCGOp **begin_op, TCGOp *op)
258 {
259     if (UINTPTR_MAX == UINT32_MAX) {
260         op = copy_op(begin_op, op, INDEX_op_add_i32);
261     } else {
262         op = copy_op(begin_op, op, INDEX_op_add_i64);
263     }
264     return op;
265 }
266 
267 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
268 {
269     if (TCG_TARGET_REG_BITS == 32) {
270         /* 2x ld_i32 */
271         op = copy_ld_i32(begin_op, op);
272         op = copy_ld_i32(begin_op, op);
273     } else {
274         /* ld_i64 */
275         op = copy_op(begin_op, op, INDEX_op_ld_i64);
276     }
277     return op;
278 }
279 
280 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
281 {
282     if (TCG_TARGET_REG_BITS == 32) {
283         /* 2x st_i32 */
284         op = copy_op(begin_op, op, INDEX_op_st_i32);
285         op = copy_op(begin_op, op, INDEX_op_st_i32);
286     } else {
287         /* st_i64 */
288         op = copy_op(begin_op, op, INDEX_op_st_i64);
289     }
290     return op;
291 }
292 
293 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
294 {
295     if (TCG_TARGET_REG_BITS == 32) {
296         /* all 32-bit backends must implement add2_i32 */
297         g_assert(TCG_TARGET_HAS_add2_i32);
298         op = copy_op(begin_op, op, INDEX_op_add2_i32);
299         op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
300         op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
301     } else {
302         op = copy_op(begin_op, op, INDEX_op_add_i64);
303         op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
304     }
305     return op;
306 }
307 
308 static TCGOp *copy_mul_i32(TCGOp **begin_op, TCGOp *op, uint32_t v)
309 {
310     op = copy_op(begin_op, op, INDEX_op_mul_i32);
311     op->args[2] = tcgv_i32_arg(tcg_constant_i32(v));
312     return op;
313 }
314 
315 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *func, int *cb_idx)
316 {
317     TCGOp *old_op;
318     int func_idx;
319 
320     /* copy all ops until the call */
321     do {
322         op = copy_op_nocheck(begin_op, op);
323     } while (op->opc != INDEX_op_call);
324 
325     /* fill in the op call */
326     old_op = *begin_op;
327     TCGOP_CALLI(op) = TCGOP_CALLI(old_op);
328     TCGOP_CALLO(op) = TCGOP_CALLO(old_op);
329     tcg_debug_assert(op->life == 0);
330 
331     func_idx = TCGOP_CALLO(op) + TCGOP_CALLI(op);
332     *cb_idx = func_idx;
333     op->args[func_idx] = (uintptr_t)func;
334 
335     return op;
336 }
337 
338 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
339                                TCGOp *begin_op, TCGOp *op,
340                                int *unused)
341 {
342     char *ptr = cb->inline_insn.entry.score->data->data;
343     size_t elem_size = g_array_get_element_size(
344         cb->inline_insn.entry.score->data);
345     size_t offset = cb->inline_insn.entry.offset;
346 
347     op = copy_ld_i32(&begin_op, op);
348     op = copy_mul_i32(&begin_op, op, elem_size);
349     op = copy_ext_i32_ptr(&begin_op, op);
350     op = copy_const_ptr(&begin_op, op, ptr + offset);
351     op = copy_add_ptr(&begin_op, op);
352     op = copy_ld_i64(&begin_op, op);
353     op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
354     op = copy_st_i64(&begin_op, op);
355     return op;
356 }
357 
358 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
359                             TCGOp *begin_op, TCGOp *op, int *cb_idx)
360 {
361     enum plugin_gen_cb type = begin_op->args[1];
362 
363     tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
364 
365     /* const_i32 == mov_i32 ("info", so it remains as is) */
366     op = copy_op(&begin_op, op, INDEX_op_mov_i32);
367 
368     /* const_ptr */
369     op = copy_const_ptr(&begin_op, op, cb->userp);
370 
371     /* copy the ld_i32, but note that we only have to copy it once */
372     if (*cb_idx == -1) {
373         op = copy_op(&begin_op, op, INDEX_op_ld_i32);
374     } else {
375         begin_op = QTAILQ_NEXT(begin_op, link);
376         tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
377     }
378 
379     if (type == PLUGIN_GEN_CB_MEM) {
380         /* call */
381         op = copy_call(&begin_op, op, cb->regular.f.vcpu_udata, cb_idx);
382     }
383 
384     return op;
385 }
386 
387 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
388                             TCGOp *begin_op, TCGOp *op, int *intp);
389 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
390 
391 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
392 {
393     int w;
394 
395     w = op->args[2];
396     return !!(cb->rw & (w + 1));
397 }
398 
399 static void inject_cb_type(const GArray *cbs, TCGOp *begin_op,
400                            inject_fn inject, op_ok_fn ok)
401 {
402     TCGOp *end_op;
403     TCGOp *op;
404     int cb_idx = -1;
405     int i;
406 
407     if (!cbs || cbs->len == 0) {
408         rm_ops(begin_op);
409         return;
410     }
411 
412     end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
413     tcg_debug_assert(end_op);
414 
415     op = end_op;
416     for (i = 0; i < cbs->len; i++) {
417         struct qemu_plugin_dyn_cb *cb =
418             &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
419 
420         if (!ok(begin_op, cb)) {
421             continue;
422         }
423         op = inject(cb, begin_op, op, &cb_idx);
424     }
425     rm_ops_range(begin_op, end_op);
426 }
427 
428 static void
429 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
430 {
431     inject_cb_type(cbs, begin_op, append_inline_cb, ok);
432 }
433 
434 static void
435 inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
436 {
437     inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
438 }
439 
440 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
441 void plugin_gen_disable_mem_helpers(void)
442 {
443     if (tcg_ctx->plugin_insn) {
444         tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_TB);
445     }
446 }
447 
448 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
449                                    TCGOp *begin_op, int insn_idx)
450 {
451     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
452     inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op);
453 }
454 
455 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
456                                   TCGOp *begin_op, int insn_idx)
457 {
458     const GArray *cbs;
459     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
460 
461     cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
462     inject_inline_cb(cbs, begin_op, op_rw);
463 }
464 
465 static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
466                                   struct qemu_plugin_insn *insn)
467 {
468     GArray *cbs[2];
469     GArray *arr;
470     size_t n_cbs;
471 
472     /*
473      * Tracking memory accesses performed from helpers requires extra work.
474      * If an instruction is emulated with helpers, we do two things:
475      * (1) copy the CB descriptors, and keep track of it so that they can be
476      * freed later on, and (2) point CPUState.plugin_mem_cbs to the
477      * descriptors, so that we can read them at run-time
478      * (i.e. when the helper executes).
479      * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
480      *
481      * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
482      * is possible that the code we generate after the instruction is
483      * dead, we also add checks before generating tb_exit etc.
484      */
485     if (!insn->calls_helpers) {
486         return;
487     }
488 
489     cbs[0] = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
490     cbs[1] = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
491     n_cbs = cbs[0]->len + cbs[1]->len;
492 
493     if (n_cbs == 0) {
494         insn->mem_helper = false;
495         return;
496     }
497     insn->mem_helper = true;
498     ptb->mem_helper = true;
499 
500     arr = g_array_sized_new(false, false,
501                             sizeof(struct qemu_plugin_dyn_cb), n_cbs);
502     g_array_append_vals(arr, cbs[0]->data, cbs[0]->len);
503     g_array_append_vals(arr, cbs[1]->data, cbs[1]->len);
504 
505     qemu_plugin_add_dyn_cb_arr(arr);
506 
507     tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
508                    offsetof(CPUState, plugin_mem_cbs) -
509                    offsetof(ArchCPU, env));
510 }
511 
512 static void gen_disable_mem_helper(void)
513 {
514     tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
515                    offsetof(CPUState, plugin_mem_cbs) -
516                    offsetof(ArchCPU, env));
517 }
518 
519 static void gen_udata_cb(struct qemu_plugin_dyn_cb *cb)
520 {
521     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
522 
523     tcg_gen_ld_i32(cpu_index, tcg_env,
524                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
525     tcg_gen_call2(cb->regular.f.vcpu_udata, cb->regular.info, NULL,
526                   tcgv_i32_temp(cpu_index),
527                   tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
528     tcg_temp_free_i32(cpu_index);
529 }
530 
531 static void gen_inline_cb(struct qemu_plugin_dyn_cb *cb)
532 {
533     GArray *arr = cb->inline_insn.entry.score->data;
534     size_t offset = cb->inline_insn.entry.offset;
535     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
536     TCGv_i64 val = tcg_temp_ebb_new_i64();
537     TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
538 
539     tcg_gen_ld_i32(cpu_index, tcg_env,
540                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
541     tcg_gen_muli_i32(cpu_index, cpu_index, g_array_get_element_size(arr));
542     tcg_gen_ext_i32_ptr(ptr, cpu_index);
543     tcg_temp_free_i32(cpu_index);
544 
545     tcg_gen_addi_ptr(ptr, ptr, (intptr_t)arr->data);
546     tcg_gen_ld_i64(val, ptr, offset);
547     tcg_gen_addi_i64(val, val, cb->inline_insn.imm);
548     tcg_gen_st_i64(val, ptr, offset);
549 
550     tcg_temp_free_i64(val);
551     tcg_temp_free_ptr(ptr);
552 }
553 
554 /* #define DEBUG_PLUGIN_GEN_OPS */
555 static void pr_ops(void)
556 {
557 #ifdef DEBUG_PLUGIN_GEN_OPS
558     TCGOp *op;
559     int i = 0;
560 
561     QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
562         const char *name = "";
563         const char *type = "";
564 
565         if (op->opc == INDEX_op_plugin_cb_start) {
566             switch (op->args[0]) {
567             case PLUGIN_GEN_FROM_TB:
568                 name = "tb";
569                 break;
570             case PLUGIN_GEN_FROM_INSN:
571                 name = "insn";
572                 break;
573             case PLUGIN_GEN_FROM_MEM:
574                 name = "mem";
575                 break;
576             case PLUGIN_GEN_AFTER_INSN:
577                 name = "after insn";
578                 break;
579             default:
580                 break;
581             }
582             switch (op->args[1]) {
583             case PLUGIN_GEN_CB_UDATA:
584                 type = "udata";
585                 break;
586             case PLUGIN_GEN_CB_INLINE:
587                 type = "inline";
588                 break;
589             case PLUGIN_GEN_CB_MEM:
590                 type = "mem";
591                 break;
592             case PLUGIN_GEN_ENABLE_MEM_HELPER:
593                 type = "enable mem helper";
594                 break;
595             case PLUGIN_GEN_DISABLE_MEM_HELPER:
596                 type = "disable mem helper";
597                 break;
598             default:
599                 break;
600             }
601         }
602         printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type);
603         i++;
604     }
605 #endif
606 }
607 
608 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
609 {
610     TCGOp *op, *next;
611     int insn_idx = -1;
612 
613     pr_ops();
614 
615     /*
616      * While injecting code, we cannot afford to reuse any ebb temps
617      * that might be live within the existing opcode stream.
618      * The simplest solution is to release them all and create new.
619      */
620     memset(tcg_ctx->free_temps, 0, sizeof(tcg_ctx->free_temps));
621 
622     QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) {
623         switch (op->opc) {
624         case INDEX_op_insn_start:
625             insn_idx++;
626             break;
627 
628         case INDEX_op_plugin_cb:
629         {
630             enum plugin_gen_from from = op->args[0];
631             struct qemu_plugin_insn *insn = NULL;
632             const GArray *cbs;
633             int i, n;
634 
635             if (insn_idx >= 0) {
636                 insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
637             }
638 
639             tcg_ctx->emit_before_op = op;
640 
641             switch (from) {
642             case PLUGIN_GEN_AFTER_TB:
643                 if (plugin_tb->mem_helper) {
644                     gen_disable_mem_helper();
645                 }
646                 break;
647 
648             case PLUGIN_GEN_AFTER_INSN:
649                 assert(insn != NULL);
650                 if (insn->mem_helper) {
651                     gen_disable_mem_helper();
652                 }
653                 break;
654 
655             case PLUGIN_GEN_FROM_TB:
656                 assert(insn == NULL);
657 
658                 cbs = plugin_tb->cbs[PLUGIN_CB_REGULAR];
659                 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
660                     struct qemu_plugin_dyn_cb *cb =
661                         &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
662                     gen_udata_cb(cb);
663                 }
664 
665                 cbs = plugin_tb->cbs[PLUGIN_CB_INLINE];
666                 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
667                     struct qemu_plugin_dyn_cb *cb =
668                         &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
669                     gen_inline_cb(cb);
670                 }
671                 break;
672 
673             case PLUGIN_GEN_FROM_INSN:
674                 assert(insn != NULL);
675 
676                 gen_enable_mem_helper(plugin_tb, insn);
677 
678                 cbs = insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR];
679                 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
680                     struct qemu_plugin_dyn_cb *cb =
681                         &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
682                     gen_udata_cb(cb);
683                 }
684 
685                 cbs = insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE];
686                 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
687                     struct qemu_plugin_dyn_cb *cb =
688                         &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
689                     gen_inline_cb(cb);
690                 }
691                 break;
692 
693             default:
694                 g_assert_not_reached();
695             }
696 
697             tcg_ctx->emit_before_op = NULL;
698             tcg_op_remove(tcg_ctx, op);
699             break;
700         }
701 
702         case INDEX_op_plugin_cb_start:
703         {
704             enum plugin_gen_from from = op->args[0];
705             enum plugin_gen_cb type = op->args[1];
706 
707             switch (from) {
708             case PLUGIN_GEN_FROM_MEM:
709             {
710                 g_assert(insn_idx >= 0);
711 
712                 switch (type) {
713                 case PLUGIN_GEN_CB_MEM:
714                     plugin_gen_mem_regular(plugin_tb, op, insn_idx);
715                     break;
716                 case PLUGIN_GEN_CB_INLINE:
717                     plugin_gen_mem_inline(plugin_tb, op, insn_idx);
718                     break;
719                 default:
720                     g_assert_not_reached();
721                 }
722 
723                 break;
724             }
725             default:
726                 g_assert_not_reached();
727             }
728             break;
729         }
730         default:
731             /* plugins don't care about any other ops */
732             break;
733         }
734     }
735     pr_ops();
736 }
737 
738 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
739                          bool mem_only)
740 {
741     bool ret = false;
742 
743     if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_state->event_mask)) {
744         struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
745         int i;
746 
747         /* reset callbacks */
748         for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
749             if (ptb->cbs[i]) {
750                 g_array_set_size(ptb->cbs[i], 0);
751             }
752         }
753         ptb->n = 0;
754 
755         ret = true;
756 
757         ptb->vaddr = db->pc_first;
758         ptb->vaddr2 = -1;
759         ptb->haddr1 = db->host_addr[0];
760         ptb->haddr2 = NULL;
761         ptb->mem_only = mem_only;
762         ptb->mem_helper = false;
763 
764         plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
765     }
766 
767     tcg_ctx->plugin_insn = NULL;
768 
769     return ret;
770 }
771 
772 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
773 {
774     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
775     struct qemu_plugin_insn *pinsn;
776 
777     pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next);
778     tcg_ctx->plugin_insn = pinsn;
779     plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
780 
781     /*
782      * Detect page crossing to get the new host address.
783      * Note that we skip this when haddr1 == NULL, e.g. when we're
784      * fetching instructions from a region not backed by RAM.
785      */
786     if (ptb->haddr1 == NULL) {
787         pinsn->haddr = NULL;
788     } else if (is_same_page(db, db->pc_next)) {
789         pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
790     } else {
791         if (ptb->vaddr2 == -1) {
792             ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
793             get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2);
794         }
795         pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
796     }
797 }
798 
799 void plugin_gen_insn_end(void)
800 {
801     plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
802 }
803 
804 /*
805  * There are cases where we never get to finalise a translation - for
806  * example a page fault during translation. As a result we shouldn't
807  * do any clean-up here and make sure things are reset in
808  * plugin_gen_tb_start.
809  */
810 void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
811 {
812     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
813 
814     /* translator may have removed instructions, update final count */
815     g_assert(num_insns <= ptb->n);
816     ptb->n = num_insns;
817 
818     /* collect instrumentation requests */
819     qemu_plugin_tb_trans_cb(cpu, ptb);
820 
821     /* inject the instrumentation at the appropriate places */
822     plugin_gen_inject(ptb);
823 }
824