1 /* 2 * QEMU System Emulator, accelerator interfaces 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2014 Red Hat Inc. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "system/tcg.h" 28 #include "exec/replay-core.h" 29 #include "system/cpu-timers.h" 30 #include "tcg/startup.h" 31 #include "qapi/error.h" 32 #include "qemu/error-report.h" 33 #include "qemu/accel.h" 34 #include "qemu/atomic.h" 35 #include "qapi/qapi-builtin-visit.h" 36 #include "qemu/units.h" 37 #if defined(CONFIG_USER_ONLY) 38 #include "hw/qdev-core.h" 39 #else 40 #include "hw/boards.h" 41 #endif 42 #include "internal-common.h" 43 #include "cpu-param.h" 44 45 46 struct TCGState { 47 AccelState parent_obj; 48 49 bool mttcg_enabled; 50 bool one_insn_per_tb; 51 int splitwx_enabled; 52 unsigned long tb_size; 53 }; 54 typedef struct TCGState TCGState; 55 56 #define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg") 57 58 DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE, 59 TYPE_TCG_ACCEL) 60 61 /* 62 * We default to false if we know other options have been enabled 63 * which are currently incompatible with MTTCG. Otherwise when each 64 * guest (target) has been updated to support: 65 * - atomic instructions 66 * - memory ordering primitives (barriers) 67 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak 68 * 69 * Once a guest architecture has been converted to the new primitives 70 * there is one remaining limitation to check: 71 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host) 72 */ 73 74 static bool default_mttcg_enabled(void) 75 { 76 if (icount_enabled()) { 77 return false; 78 } 79 #ifdef TARGET_SUPPORTS_MTTCG 80 # ifndef TCG_GUEST_DEFAULT_MO 81 # error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO" 82 # endif 83 return true; 84 #else 85 return false; 86 #endif 87 } 88 89 static void tcg_accel_instance_init(Object *obj) 90 { 91 TCGState *s = TCG_STATE(obj); 92 93 s->mttcg_enabled = default_mttcg_enabled(); 94 95 /* If debugging enabled, default "auto on", otherwise off. */ 96 #if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY) 97 s->splitwx_enabled = -1; 98 #else 99 s->splitwx_enabled = 0; 100 #endif 101 } 102 103 bool mttcg_enabled; 104 bool one_insn_per_tb; 105 106 static int tcg_init_machine(MachineState *ms) 107 { 108 TCGState *s = TCG_STATE(current_accel()); 109 #ifdef CONFIG_USER_ONLY 110 unsigned max_cpus = 1; 111 #else 112 unsigned max_cpus = ms->smp.max_cpus; 113 #endif 114 115 tcg_allowed = true; 116 mttcg_enabled = s->mttcg_enabled; 117 118 page_init(); 119 tb_htable_init(); 120 tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus); 121 122 #if defined(CONFIG_SOFTMMU) 123 /* 124 * There's no guest base to take into account, so go ahead and 125 * initialize the prologue now. 126 */ 127 tcg_prologue_init(); 128 #endif 129 130 #ifdef CONFIG_USER_ONLY 131 qdev_create_fake_machine(); 132 #endif 133 134 return 0; 135 } 136 137 static char *tcg_get_thread(Object *obj, Error **errp) 138 { 139 TCGState *s = TCG_STATE(obj); 140 141 return g_strdup(s->mttcg_enabled ? "multi" : "single"); 142 } 143 144 static void tcg_set_thread(Object *obj, const char *value, Error **errp) 145 { 146 TCGState *s = TCG_STATE(obj); 147 148 if (strcmp(value, "multi") == 0) { 149 if (icount_enabled()) { 150 error_setg(errp, "No MTTCG when icount is enabled"); 151 } else { 152 #ifndef TARGET_SUPPORTS_MTTCG 153 warn_report("Guest not yet converted to MTTCG - " 154 "you may get unexpected results"); 155 #endif 156 s->mttcg_enabled = true; 157 } 158 } else if (strcmp(value, "single") == 0) { 159 s->mttcg_enabled = false; 160 } else { 161 error_setg(errp, "Invalid 'thread' setting %s", value); 162 } 163 } 164 165 static void tcg_get_tb_size(Object *obj, Visitor *v, 166 const char *name, void *opaque, 167 Error **errp) 168 { 169 TCGState *s = TCG_STATE(obj); 170 uint32_t value = s->tb_size; 171 172 visit_type_uint32(v, name, &value, errp); 173 } 174 175 static void tcg_set_tb_size(Object *obj, Visitor *v, 176 const char *name, void *opaque, 177 Error **errp) 178 { 179 TCGState *s = TCG_STATE(obj); 180 uint32_t value; 181 182 if (!visit_type_uint32(v, name, &value, errp)) { 183 return; 184 } 185 186 s->tb_size = value; 187 } 188 189 static bool tcg_get_splitwx(Object *obj, Error **errp) 190 { 191 TCGState *s = TCG_STATE(obj); 192 return s->splitwx_enabled; 193 } 194 195 static void tcg_set_splitwx(Object *obj, bool value, Error **errp) 196 { 197 TCGState *s = TCG_STATE(obj); 198 s->splitwx_enabled = value; 199 } 200 201 static bool tcg_get_one_insn_per_tb(Object *obj, Error **errp) 202 { 203 TCGState *s = TCG_STATE(obj); 204 return s->one_insn_per_tb; 205 } 206 207 static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp) 208 { 209 TCGState *s = TCG_STATE(obj); 210 s->one_insn_per_tb = value; 211 /* Set the global also: this changes the behaviour */ 212 qatomic_set(&one_insn_per_tb, value); 213 } 214 215 static int tcg_gdbstub_supported_sstep_flags(void) 216 { 217 /* 218 * In replay mode all events will come from the log and can't be 219 * suppressed otherwise we would break determinism. However as those 220 * events are tied to the number of executed instructions we won't see 221 * them occurring every time we single step. 222 */ 223 if (replay_mode != REPLAY_MODE_NONE) { 224 return SSTEP_ENABLE; 225 } else { 226 return SSTEP_ENABLE | SSTEP_NOIRQ | SSTEP_NOTIMER; 227 } 228 } 229 230 static void tcg_accel_class_init(ObjectClass *oc, void *data) 231 { 232 AccelClass *ac = ACCEL_CLASS(oc); 233 ac->name = "tcg"; 234 ac->init_machine = tcg_init_machine; 235 ac->cpu_common_realize = tcg_exec_realizefn; 236 ac->cpu_common_unrealize = tcg_exec_unrealizefn; 237 ac->allowed = &tcg_allowed; 238 ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags; 239 240 object_class_property_add_str(oc, "thread", 241 tcg_get_thread, 242 tcg_set_thread); 243 244 object_class_property_add(oc, "tb-size", "int", 245 tcg_get_tb_size, tcg_set_tb_size, 246 NULL, NULL); 247 object_class_property_set_description(oc, "tb-size", 248 "TCG translation block cache size"); 249 250 object_class_property_add_bool(oc, "split-wx", 251 tcg_get_splitwx, tcg_set_splitwx); 252 object_class_property_set_description(oc, "split-wx", 253 "Map jit pages into separate RW and RX regions"); 254 255 object_class_property_add_bool(oc, "one-insn-per-tb", 256 tcg_get_one_insn_per_tb, 257 tcg_set_one_insn_per_tb); 258 object_class_property_set_description(oc, "one-insn-per-tb", 259 "Only put one guest insn in each translation block"); 260 } 261 262 static const TypeInfo tcg_accel_type = { 263 .name = TYPE_TCG_ACCEL, 264 .parent = TYPE_ACCEL, 265 .instance_init = tcg_accel_instance_init, 266 .class_init = tcg_accel_class_init, 267 .instance_size = sizeof(TCGState), 268 }; 269 module_obj(TYPE_TCG_ACCEL); 270 271 static void register_accel_types(void) 272 { 273 type_register_static(&tcg_accel_type); 274 } 275 276 type_init(register_accel_types); 277