xref: /qemu/accel/tcg/plugin-gen.c (revision b384c734ecb6d6a4f121d5716f7275e8e350a6fa)
1 /*
2  * plugin-gen.c - TCG-related bits of plugin infrastructure
3  *
4  * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5  * License: GNU GPL, version 2 or later.
6  *   See the COPYING file in the top-level directory.
7  *
8  * We support instrumentation at an instruction granularity. That is,
9  * if a plugin wants to instrument the memory accesses performed by a
10  * particular instruction, it can just do that instead of instrumenting
11  * all memory accesses. Thus, in order to do this we first have to
12  * translate a TB, so that plugins can decide what/where to instrument.
13  *
14  * Injecting the desired instrumentation could be done with a second
15  * translation pass that combined the instrumentation requests, but that
16  * would be ugly and inefficient since we would decode the guest code twice.
17  * Instead, during TB translation we add "empty" instrumentation calls for all
18  * possible instrumentation events, and then once we collect the instrumentation
19  * requests from plugins, we either "fill in" those empty events or remove them
20  * if they have no requests.
21  *
22  * When "filling in" an event we first copy the empty callback's TCG ops. This
23  * might seem unnecessary, but it is done to support an arbitrary number
24  * of callbacks per event. Take for example a regular instruction callback.
25  * We first generate a callback to an empty helper function. Then, if two
26  * plugins register one callback each for this instruction, we make two copies
27  * of the TCG ops generated for the empty callback, substituting the function
28  * pointer that points to the empty helper function with the plugins' desired
29  * callback functions. After that we remove the empty callback's ops.
30  *
31  * Note that the location in TCGOp.args[] of the pointer to a helper function
32  * varies across different guest and host architectures. Instead of duplicating
33  * the logic that figures this out, we rely on the fact that the empty
34  * callbacks point to empty functions that are unique pointers in the program.
35  * Thus, to find the right location we just have to look for a match in
36  * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37  * TCG ops and then fill them in; regardless of whether we have one or many
38  * callbacks for that event, the logic to add all of them is the same.
39  *
40  * When generating more than one callback per event, we make a small
41  * optimization to avoid generating redundant operations. For instance, for the
42  * second and all subsequent callbacks of an event, we do not need to reload the
43  * CPU's index into a TCG temp, since the first callback did it already.
44  */
45 #include "qemu/osdep.h"
46 #include "qemu/plugin.h"
47 #include "qemu/log.h"
48 #include "cpu.h"
49 #include "tcg/tcg.h"
50 #include "tcg/tcg-temp-internal.h"
51 #include "tcg/tcg-op.h"
52 #include "exec/exec-all.h"
53 #include "exec/plugin-gen.h"
54 #include "exec/translator.h"
55 
56 enum plugin_gen_from {
57     PLUGIN_GEN_FROM_TB,
58     PLUGIN_GEN_FROM_INSN,
59     PLUGIN_GEN_AFTER_INSN,
60     PLUGIN_GEN_AFTER_TB,
61 };
62 
63 static void plugin_gen_empty_callback(enum plugin_gen_from from)
64 {
65     switch (from) {
66     case PLUGIN_GEN_AFTER_INSN:
67     case PLUGIN_GEN_FROM_TB:
68     case PLUGIN_GEN_FROM_INSN:
69         tcg_gen_plugin_cb(from);
70         break;
71     default:
72         g_assert_not_reached();
73     }
74 }
75 
76 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
77 void plugin_gen_disable_mem_helpers(void)
78 {
79     if (tcg_ctx->plugin_insn) {
80         tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_TB);
81     }
82 }
83 
84 static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
85                                   struct qemu_plugin_insn *insn)
86 {
87     GArray *arr;
88     size_t len;
89 
90     /*
91      * Tracking memory accesses performed from helpers requires extra work.
92      * If an instruction is emulated with helpers, we do two things:
93      * (1) copy the CB descriptors, and keep track of it so that they can be
94      * freed later on, and (2) point CPUState.plugin_mem_cbs to the
95      * descriptors, so that we can read them at run-time
96      * (i.e. when the helper executes).
97      * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
98      *
99      * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
100      * is possible that the code we generate after the instruction is
101      * dead, we also add checks before generating tb_exit etc.
102      */
103     if (!insn->calls_helpers) {
104         return;
105     }
106 
107     if (!insn->mem_cbs || !insn->mem_cbs->len) {
108         insn->mem_helper = false;
109         return;
110     }
111     insn->mem_helper = true;
112     ptb->mem_helper = true;
113 
114     /*
115      * TODO: It seems like we should be able to use ref/unref
116      * to avoid needing to actually copy this array.
117      * Alternately, perhaps we could allocate new memory adjacent
118      * to the TranslationBlock itself, so that we do not have to
119      * actively manage the lifetime after this.
120      */
121     len = insn->mem_cbs->len;
122     arr = g_array_sized_new(false, false,
123                             sizeof(struct qemu_plugin_dyn_cb), len);
124     memcpy(arr->data, insn->mem_cbs->data,
125            len * sizeof(struct qemu_plugin_dyn_cb));
126     qemu_plugin_add_dyn_cb_arr(arr);
127 
128     tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
129                    offsetof(CPUState, plugin_mem_cbs) -
130                    offsetof(ArchCPU, env));
131 }
132 
133 static void gen_disable_mem_helper(void)
134 {
135     tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
136                    offsetof(CPUState, plugin_mem_cbs) -
137                    offsetof(ArchCPU, env));
138 }
139 
140 static void gen_udata_cb(struct qemu_plugin_dyn_cb *cb)
141 {
142     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
143 
144     tcg_gen_ld_i32(cpu_index, tcg_env,
145                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
146     tcg_gen_call2(cb->regular.f.vcpu_udata, cb->regular.info, NULL,
147                   tcgv_i32_temp(cpu_index),
148                   tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
149     tcg_temp_free_i32(cpu_index);
150 }
151 
152 static void gen_inline_cb(struct qemu_plugin_dyn_cb *cb)
153 {
154     GArray *arr = cb->inline_insn.entry.score->data;
155     size_t offset = cb->inline_insn.entry.offset;
156     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
157     TCGv_i64 val = tcg_temp_ebb_new_i64();
158     TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
159 
160     tcg_gen_ld_i32(cpu_index, tcg_env,
161                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
162     tcg_gen_muli_i32(cpu_index, cpu_index, g_array_get_element_size(arr));
163     tcg_gen_ext_i32_ptr(ptr, cpu_index);
164     tcg_temp_free_i32(cpu_index);
165 
166     tcg_gen_addi_ptr(ptr, ptr, (intptr_t)arr->data);
167     tcg_gen_ld_i64(val, ptr, offset);
168     tcg_gen_addi_i64(val, val, cb->inline_insn.imm);
169     tcg_gen_st_i64(val, ptr, offset);
170 
171     tcg_temp_free_i64(val);
172     tcg_temp_free_ptr(ptr);
173 }
174 
175 static void gen_mem_cb(struct qemu_plugin_dyn_cb *cb,
176                        qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
177 {
178     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
179 
180     tcg_gen_ld_i32(cpu_index, tcg_env,
181                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
182     tcg_gen_call4(cb->regular.f.vcpu_mem, cb->regular.info, NULL,
183                   tcgv_i32_temp(cpu_index),
184                   tcgv_i32_temp(tcg_constant_i32(meminfo)),
185                   tcgv_i64_temp(addr),
186                   tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
187     tcg_temp_free_i32(cpu_index);
188 }
189 
190 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
191 {
192     TCGOp *op, *next;
193     int insn_idx = -1;
194 
195     if (unlikely(qemu_loglevel_mask(LOG_TB_OP_PLUGIN)
196                  && qemu_log_in_addr_range(plugin_tb->vaddr))) {
197         FILE *logfile = qemu_log_trylock();
198         if (logfile) {
199             fprintf(logfile, "OP before plugin injection:\n");
200             tcg_dump_ops(tcg_ctx, logfile, false);
201             fprintf(logfile, "\n");
202             qemu_log_unlock(logfile);
203         }
204     }
205 
206     /*
207      * While injecting code, we cannot afford to reuse any ebb temps
208      * that might be live within the existing opcode stream.
209      * The simplest solution is to release them all and create new.
210      */
211     memset(tcg_ctx->free_temps, 0, sizeof(tcg_ctx->free_temps));
212 
213     QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) {
214         switch (op->opc) {
215         case INDEX_op_insn_start:
216             insn_idx++;
217             break;
218 
219         case INDEX_op_plugin_cb:
220         {
221             enum plugin_gen_from from = op->args[0];
222             struct qemu_plugin_insn *insn = NULL;
223             const GArray *cbs;
224             int i, n;
225 
226             if (insn_idx >= 0) {
227                 insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
228             }
229 
230             tcg_ctx->emit_before_op = op;
231 
232             switch (from) {
233             case PLUGIN_GEN_AFTER_TB:
234                 if (plugin_tb->mem_helper) {
235                     gen_disable_mem_helper();
236                 }
237                 break;
238 
239             case PLUGIN_GEN_AFTER_INSN:
240                 assert(insn != NULL);
241                 if (insn->mem_helper) {
242                     gen_disable_mem_helper();
243                 }
244                 break;
245 
246             case PLUGIN_GEN_FROM_TB:
247                 assert(insn == NULL);
248 
249                 cbs = plugin_tb->cbs;
250                 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
251                     struct qemu_plugin_dyn_cb *cb =
252                         &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
253 
254                     switch (cb->type) {
255                     case PLUGIN_CB_REGULAR:
256                         gen_udata_cb(cb);
257                         break;
258                     case PLUGIN_CB_INLINE:
259                         gen_inline_cb(cb);
260                         break;
261                     default:
262                         g_assert_not_reached();
263                     }
264                 }
265                 break;
266 
267             case PLUGIN_GEN_FROM_INSN:
268                 assert(insn != NULL);
269 
270                 gen_enable_mem_helper(plugin_tb, insn);
271 
272                 cbs = insn->insn_cbs;
273                 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
274                     struct qemu_plugin_dyn_cb *cb =
275                         &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
276 
277                     switch (cb->type) {
278                     case PLUGIN_CB_REGULAR:
279                         gen_udata_cb(cb);
280                         break;
281                     case PLUGIN_CB_INLINE:
282                         gen_inline_cb(cb);
283                         break;
284                     default:
285                         g_assert_not_reached();
286                     }
287                 }
288                 break;
289 
290             default:
291                 g_assert_not_reached();
292             }
293 
294             tcg_ctx->emit_before_op = NULL;
295             tcg_op_remove(tcg_ctx, op);
296             break;
297         }
298 
299         case INDEX_op_plugin_mem_cb:
300         {
301             TCGv_i64 addr = temp_tcgv_i64(arg_temp(op->args[0]));
302             qemu_plugin_meminfo_t meminfo = op->args[1];
303             struct qemu_plugin_insn *insn;
304             const GArray *cbs;
305             int i, n, rw;
306 
307             assert(insn_idx >= 0);
308             insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
309             rw = qemu_plugin_mem_is_store(meminfo) ? 2 : 1;
310 
311             tcg_ctx->emit_before_op = op;
312 
313             cbs = insn->mem_cbs;
314             for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
315                 struct qemu_plugin_dyn_cb *cb =
316                     &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
317 
318                 if (cb->rw & rw) {
319                     switch (cb->type) {
320                     case PLUGIN_CB_MEM_REGULAR:
321                         gen_mem_cb(cb, meminfo, addr);
322                         break;
323                     case PLUGIN_CB_INLINE:
324                         gen_inline_cb(cb);
325                         break;
326                     default:
327                         g_assert_not_reached();
328                     }
329                 }
330             }
331 
332             tcg_ctx->emit_before_op = NULL;
333             tcg_op_remove(tcg_ctx, op);
334             break;
335         }
336 
337         default:
338             /* plugins don't care about any other ops */
339             break;
340         }
341     }
342 }
343 
344 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
345                          bool mem_only)
346 {
347     bool ret = false;
348 
349     if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_state->event_mask)) {
350         struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
351 
352         /* reset callbacks */
353         if (ptb->cbs) {
354             g_array_set_size(ptb->cbs, 0);
355         }
356         ptb->n = 0;
357 
358         ret = true;
359 
360         ptb->vaddr = db->pc_first;
361         ptb->vaddr2 = -1;
362         ptb->haddr1 = db->host_addr[0];
363         ptb->haddr2 = NULL;
364         ptb->mem_only = mem_only;
365         ptb->mem_helper = false;
366 
367         plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
368     }
369 
370     tcg_ctx->plugin_insn = NULL;
371 
372     return ret;
373 }
374 
375 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
376 {
377     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
378     struct qemu_plugin_insn *pinsn;
379 
380     pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next);
381     tcg_ctx->plugin_insn = pinsn;
382     plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
383 
384     /*
385      * Detect page crossing to get the new host address.
386      * Note that we skip this when haddr1 == NULL, e.g. when we're
387      * fetching instructions from a region not backed by RAM.
388      */
389     if (ptb->haddr1 == NULL) {
390         pinsn->haddr = NULL;
391     } else if (is_same_page(db, db->pc_next)) {
392         pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
393     } else {
394         if (ptb->vaddr2 == -1) {
395             ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
396             get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2);
397         }
398         pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
399     }
400 }
401 
402 void plugin_gen_insn_end(void)
403 {
404     plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
405 }
406 
407 /*
408  * There are cases where we never get to finalise a translation - for
409  * example a page fault during translation. As a result we shouldn't
410  * do any clean-up here and make sure things are reset in
411  * plugin_gen_tb_start.
412  */
413 void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
414 {
415     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
416 
417     /* translator may have removed instructions, update final count */
418     g_assert(num_insns <= ptb->n);
419     ptb->n = num_insns;
420 
421     /* collect instrumentation requests */
422     qemu_plugin_tb_trans_cb(cpu, ptb);
423 
424     /* inject the instrumentation at the appropriate places */
425     plugin_gen_inject(ptb);
426 }
427