1 /*
2 * QEMU TCG vCPU common functionality
3 *
4 * Functionality common to all TCG vCPU variants: mttcg, rr and icount.
5 *
6 * Copyright (c) 2003-2008 Fabrice Bellard
7 * Copyright (c) 2014 Red Hat Inc.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a copy
10 * of this software and associated documentation files (the "Software"), to deal
11 * in the Software without restriction, including without limitation the rights
12 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 * copies of the Software, and to permit persons to whom the Software is
14 * furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 * THE SOFTWARE.
26 */
27
28 #include "qemu/osdep.h"
29 #include "system/accel-ops.h"
30 #include "system/tcg.h"
31 #include "system/replay.h"
32 #include "exec/icount.h"
33 #include "qemu/main-loop.h"
34 #include "qemu/guest-random.h"
35 #include "qemu/timer.h"
36 #include "exec/cputlb.h"
37 #include "exec/hwaddr.h"
38 #include "exec/tb-flush.h"
39 #include "exec/translation-block.h"
40 #include "exec/watchpoint.h"
41 #include "gdbstub/enums.h"
42
43 #include "hw/core/cpu.h"
44
45 #include "tcg-accel-ops.h"
46 #include "tcg-accel-ops-mttcg.h"
47 #include "tcg-accel-ops-rr.h"
48 #include "tcg-accel-ops-icount.h"
49
50 /* common functionality among all TCG variants */
51
tcg_cpu_init_cflags(CPUState * cpu,bool parallel)52 void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
53 {
54 uint32_t cflags;
55
56 /*
57 * Include the cluster number in the hash we use to look up TBs.
58 * This is important because a TB that is valid for one cluster at
59 * a given physical address and set of CPU flags is not necessarily
60 * valid for another:
61 * the two clusters may have different views of physical memory, or
62 * may have different CPU features (eg FPU present or absent).
63 */
64 cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
65
66 cflags |= parallel ? CF_PARALLEL : 0;
67 cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
68 tcg_cflags_set(cpu, cflags);
69 }
70
tcg_cpu_destroy(CPUState * cpu)71 void tcg_cpu_destroy(CPUState *cpu)
72 {
73 cpu_thread_signal_destroyed(cpu);
74 }
75
tcg_cpu_exec(CPUState * cpu)76 int tcg_cpu_exec(CPUState *cpu)
77 {
78 int ret;
79 assert(tcg_enabled());
80 cpu_exec_start(cpu);
81 ret = cpu_exec(cpu);
82 cpu_exec_end(cpu);
83 return ret;
84 }
85
tcg_cpu_reset_hold(CPUState * cpu)86 static void tcg_cpu_reset_hold(CPUState *cpu)
87 {
88 tcg_flush_jmp_cache(cpu);
89
90 tlb_flush(cpu);
91 }
92
93 /* mask must never be zero, except for A20 change call */
tcg_handle_interrupt(CPUState * cpu,int mask)94 void tcg_handle_interrupt(CPUState *cpu, int mask)
95 {
96 g_assert(bql_locked());
97
98 cpu->interrupt_request |= mask;
99
100 /*
101 * If called from iothread context, wake the target cpu in
102 * case its halted.
103 */
104 if (!qemu_cpu_is_self(cpu)) {
105 qemu_cpu_kick(cpu);
106 } else {
107 qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
108 }
109 }
110
tcg_supports_guest_debug(void)111 static bool tcg_supports_guest_debug(void)
112 {
113 return true;
114 }
115
116 /* Translate GDB watchpoint type to a flags value for cpu_watchpoint_* */
xlat_gdb_type(CPUState * cpu,int gdbtype)117 static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
118 {
119 static const int xlat[] = {
120 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
121 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
122 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
123 };
124
125 int cputype = xlat[gdbtype];
126
127 if (cpu->cc->gdb_stop_before_watchpoint) {
128 cputype |= BP_STOP_BEFORE_ACCESS;
129 }
130 return cputype;
131 }
132
tcg_insert_breakpoint(CPUState * cs,int type,vaddr addr,vaddr len)133 static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
134 {
135 CPUState *cpu;
136 int err = 0;
137
138 switch (type) {
139 case GDB_BREAKPOINT_SW:
140 case GDB_BREAKPOINT_HW:
141 CPU_FOREACH(cpu) {
142 err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
143 if (err) {
144 break;
145 }
146 }
147 return err;
148 case GDB_WATCHPOINT_WRITE:
149 case GDB_WATCHPOINT_READ:
150 case GDB_WATCHPOINT_ACCESS:
151 CPU_FOREACH(cpu) {
152 err = cpu_watchpoint_insert(cpu, addr, len,
153 xlat_gdb_type(cpu, type), NULL);
154 if (err) {
155 break;
156 }
157 }
158 return err;
159 default:
160 return -ENOSYS;
161 }
162 }
163
tcg_remove_breakpoint(CPUState * cs,int type,vaddr addr,vaddr len)164 static int tcg_remove_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
165 {
166 CPUState *cpu;
167 int err = 0;
168
169 switch (type) {
170 case GDB_BREAKPOINT_SW:
171 case GDB_BREAKPOINT_HW:
172 CPU_FOREACH(cpu) {
173 err = cpu_breakpoint_remove(cpu, addr, BP_GDB);
174 if (err) {
175 break;
176 }
177 }
178 return err;
179 case GDB_WATCHPOINT_WRITE:
180 case GDB_WATCHPOINT_READ:
181 case GDB_WATCHPOINT_ACCESS:
182 CPU_FOREACH(cpu) {
183 err = cpu_watchpoint_remove(cpu, addr, len,
184 xlat_gdb_type(cpu, type));
185 if (err) {
186 break;
187 }
188 }
189 return err;
190 default:
191 return -ENOSYS;
192 }
193 }
194
tcg_remove_all_breakpoints(CPUState * cpu)195 static inline void tcg_remove_all_breakpoints(CPUState *cpu)
196 {
197 cpu_breakpoint_remove_all(cpu, BP_GDB);
198 cpu_watchpoint_remove_all(cpu, BP_GDB);
199 }
200
tcg_accel_ops_init(AccelOpsClass * ops)201 static void tcg_accel_ops_init(AccelOpsClass *ops)
202 {
203 if (qemu_tcg_mttcg_enabled()) {
204 ops->create_vcpu_thread = mttcg_start_vcpu_thread;
205 ops->kick_vcpu_thread = mttcg_kick_vcpu_thread;
206 ops->handle_interrupt = tcg_handle_interrupt;
207 } else {
208 ops->create_vcpu_thread = rr_start_vcpu_thread;
209 ops->kick_vcpu_thread = rr_kick_vcpu_thread;
210
211 if (icount_enabled()) {
212 ops->handle_interrupt = icount_handle_interrupt;
213 ops->get_virtual_clock = icount_get;
214 ops->get_elapsed_ticks = icount_get;
215 } else {
216 ops->handle_interrupt = tcg_handle_interrupt;
217 }
218 }
219
220 ops->cpu_reset_hold = tcg_cpu_reset_hold;
221 ops->supports_guest_debug = tcg_supports_guest_debug;
222 ops->insert_breakpoint = tcg_insert_breakpoint;
223 ops->remove_breakpoint = tcg_remove_breakpoint;
224 ops->remove_all_breakpoints = tcg_remove_all_breakpoints;
225 }
226
tcg_accel_ops_class_init(ObjectClass * oc,const void * data)227 static void tcg_accel_ops_class_init(ObjectClass *oc, const void *data)
228 {
229 AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
230
231 ops->ops_init = tcg_accel_ops_init;
232 }
233
234 static const TypeInfo tcg_accel_ops_type = {
235 .name = ACCEL_OPS_NAME("tcg"),
236
237 .parent = TYPE_ACCEL_OPS,
238 .class_init = tcg_accel_ops_class_init,
239 .abstract = true,
240 };
241 module_obj(ACCEL_OPS_NAME("tcg"));
242
tcg_accel_ops_register_types(void)243 static void tcg_accel_ops_register_types(void)
244 {
245 type_register_static(&tcg_accel_ops_type);
246 }
247 type_init(tcg_accel_ops_register_types);
248