1 /* 2 * x86 misc helpers - system code 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/main-loop.h" 22 #include "cpu.h" 23 #include "exec/helper-proto.h" 24 #include "exec/cpu_ldst.h" 25 #include "system/address-spaces.h" 26 #include "system/memory.h" 27 #include "exec/cputlb.h" 28 #include "tcg/helper-tcg.h" 29 #include "hw/i386/apic.h" 30 31 void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) 32 { 33 address_space_stb(&address_space_io, port, data, 34 cpu_get_mem_attrs(env), NULL); 35 } 36 37 target_ulong helper_inb(CPUX86State *env, uint32_t port) 38 { 39 return address_space_ldub(&address_space_io, port, 40 cpu_get_mem_attrs(env), NULL); 41 } 42 43 void helper_outw(CPUX86State *env, uint32_t port, uint32_t data) 44 { 45 address_space_stw(&address_space_io, port, data, 46 cpu_get_mem_attrs(env), NULL); 47 } 48 49 target_ulong helper_inw(CPUX86State *env, uint32_t port) 50 { 51 return address_space_lduw(&address_space_io, port, 52 cpu_get_mem_attrs(env), NULL); 53 } 54 55 void helper_outl(CPUX86State *env, uint32_t port, uint32_t data) 56 { 57 address_space_stl(&address_space_io, port, data, 58 cpu_get_mem_attrs(env), NULL); 59 } 60 61 target_ulong helper_inl(CPUX86State *env, uint32_t port) 62 { 63 return address_space_ldl(&address_space_io, port, 64 cpu_get_mem_attrs(env), NULL); 65 } 66 67 target_ulong helper_read_cr8(CPUX86State *env) 68 { 69 if (!(env->hflags2 & HF2_VINTR_MASK)) { 70 return cpu_get_apic_tpr(env_archcpu(env)->apic_state); 71 } else { 72 return env->int_ctl & V_TPR_MASK; 73 } 74 } 75 76 void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) 77 { 78 switch (reg) { 79 case 0: 80 /* 81 * If we reach this point, the CR0 write intercept is disabled. 82 * But we could still exit if the hypervisor has requested the selective 83 * intercept for bits other than TS and MP 84 */ 85 if (cpu_svm_has_intercept(env, SVM_EXIT_CR0_SEL_WRITE) && 86 ((env->cr[0] ^ t0) & ~(CR0_TS_MASK | CR0_MP_MASK))) { 87 cpu_vmexit(env, SVM_EXIT_CR0_SEL_WRITE, 0, GETPC()); 88 } 89 cpu_x86_update_cr0(env, t0); 90 break; 91 case 3: 92 if ((env->efer & MSR_EFER_LMA) && 93 (t0 & ((~0ULL) << env_archcpu(env)->phys_bits))) { 94 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); 95 } 96 if (!(env->efer & MSR_EFER_LMA)) { 97 t0 &= 0xffffffffUL; 98 } 99 cpu_x86_update_cr3(env, t0); 100 break; 101 case 4: 102 if (t0 & cr4_reserved_bits(env)) { 103 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); 104 } 105 if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) && 106 (env->hflags & HF_CS64_MASK)) { 107 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 108 } 109 cpu_x86_update_cr4(env, t0); 110 break; 111 case 8: 112 if (!(env->hflags2 & HF2_VINTR_MASK)) { 113 bql_lock(); 114 cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); 115 bql_unlock(); 116 } 117 env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK); 118 119 CPUState *cs = env_cpu(env); 120 if (ctl_has_irq(env)) { 121 cpu_interrupt(cs, CPU_INTERRUPT_VIRQ); 122 } else { 123 cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); 124 } 125 break; 126 default: 127 env->cr[reg] = t0; 128 break; 129 } 130 } 131 132 void helper_wrmsr(CPUX86State *env) 133 { 134 uint64_t val; 135 CPUState *cs = env_cpu(env); 136 137 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); 138 139 val = ((uint32_t)env->regs[R_EAX]) | 140 ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); 141 142 switch ((uint32_t)env->regs[R_ECX]) { 143 case MSR_IA32_SYSENTER_CS: 144 env->sysenter_cs = val & 0xffff; 145 break; 146 case MSR_IA32_SYSENTER_ESP: 147 env->sysenter_esp = val; 148 break; 149 case MSR_IA32_SYSENTER_EIP: 150 env->sysenter_eip = val; 151 break; 152 case MSR_IA32_APICBASE: { 153 int ret; 154 155 if (val & MSR_IA32_APICBASE_RESERVED) { 156 goto error; 157 } 158 159 ret = cpu_set_apic_base(env_archcpu(env)->apic_state, val); 160 if (ret < 0) { 161 goto error; 162 } 163 break; 164 } 165 case MSR_EFER: 166 { 167 uint64_t update_mask; 168 169 update_mask = 0; 170 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) { 171 update_mask |= MSR_EFER_SCE; 172 } 173 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 174 update_mask |= MSR_EFER_LME; 175 } 176 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { 177 update_mask |= MSR_EFER_FFXSR; 178 } 179 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) { 180 update_mask |= MSR_EFER_NXE; 181 } 182 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 183 update_mask |= MSR_EFER_SVME; 184 } 185 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { 186 update_mask |= MSR_EFER_FFXSR; 187 } 188 cpu_load_efer(env, (env->efer & ~update_mask) | 189 (val & update_mask)); 190 } 191 break; 192 case MSR_STAR: 193 env->star = val; 194 break; 195 case MSR_PAT: 196 env->pat = val; 197 break; 198 case MSR_IA32_PKRS: 199 if (val & 0xFFFFFFFF00000000ull) { 200 goto error; 201 } 202 env->pkrs = val; 203 tlb_flush(cs); 204 break; 205 case MSR_VM_HSAVE_PA: 206 if (val & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) { 207 goto error; 208 } 209 env->vm_hsave = val; 210 break; 211 #ifdef TARGET_X86_64 212 case MSR_LSTAR: 213 env->lstar = val; 214 break; 215 case MSR_CSTAR: 216 env->cstar = val; 217 break; 218 case MSR_FMASK: 219 env->fmask = val; 220 break; 221 case MSR_FSBASE: 222 env->segs[R_FS].base = val; 223 break; 224 case MSR_GSBASE: 225 env->segs[R_GS].base = val; 226 break; 227 case MSR_KERNELGSBASE: 228 env->kernelgsbase = val; 229 break; 230 #endif 231 case MSR_MTRRphysBase(0): 232 case MSR_MTRRphysBase(1): 233 case MSR_MTRRphysBase(2): 234 case MSR_MTRRphysBase(3): 235 case MSR_MTRRphysBase(4): 236 case MSR_MTRRphysBase(5): 237 case MSR_MTRRphysBase(6): 238 case MSR_MTRRphysBase(7): 239 env->mtrr_var[((uint32_t)env->regs[R_ECX] - 240 MSR_MTRRphysBase(0)) / 2].base = val; 241 break; 242 case MSR_MTRRphysMask(0): 243 case MSR_MTRRphysMask(1): 244 case MSR_MTRRphysMask(2): 245 case MSR_MTRRphysMask(3): 246 case MSR_MTRRphysMask(4): 247 case MSR_MTRRphysMask(5): 248 case MSR_MTRRphysMask(6): 249 case MSR_MTRRphysMask(7): 250 env->mtrr_var[((uint32_t)env->regs[R_ECX] - 251 MSR_MTRRphysMask(0)) / 2].mask = val; 252 break; 253 case MSR_MTRRfix64K_00000: 254 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 255 MSR_MTRRfix64K_00000] = val; 256 break; 257 case MSR_MTRRfix16K_80000: 258 case MSR_MTRRfix16K_A0000: 259 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 260 MSR_MTRRfix16K_80000 + 1] = val; 261 break; 262 case MSR_MTRRfix4K_C0000: 263 case MSR_MTRRfix4K_C8000: 264 case MSR_MTRRfix4K_D0000: 265 case MSR_MTRRfix4K_D8000: 266 case MSR_MTRRfix4K_E0000: 267 case MSR_MTRRfix4K_E8000: 268 case MSR_MTRRfix4K_F0000: 269 case MSR_MTRRfix4K_F8000: 270 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 271 MSR_MTRRfix4K_C0000 + 3] = val; 272 break; 273 case MSR_MTRRdefType: 274 env->mtrr_deftype = val; 275 break; 276 case MSR_MCG_STATUS: 277 env->mcg_status = val; 278 break; 279 case MSR_MCG_CTL: 280 if ((env->mcg_cap & MCG_CTL_P) 281 && (val == 0 || val == ~(uint64_t)0)) { 282 env->mcg_ctl = val; 283 } 284 break; 285 case MSR_TSC_AUX: 286 env->tsc_aux = val; 287 break; 288 case MSR_IA32_MISC_ENABLE: 289 env->msr_ia32_misc_enable = val; 290 break; 291 case MSR_IA32_BNDCFGS: 292 /* FIXME: #GP if reserved bits are set. */ 293 /* FIXME: Extend highest implemented bit of linear address. */ 294 env->msr_bndcfgs = val; 295 cpu_sync_bndcs_hflags(env); 296 break; 297 case MSR_APIC_START ... MSR_APIC_END: { 298 int ret; 299 int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; 300 301 bql_lock(); 302 ret = apic_msr_write(index, val); 303 bql_unlock(); 304 if (ret < 0) { 305 goto error; 306 } 307 308 break; 309 } 310 default: 311 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL 312 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + 313 (4 * env->mcg_cap & 0xff)) { 314 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; 315 if ((offset & 0x3) != 0 316 || (val == 0 || val == ~(uint64_t)0)) { 317 env->mce_banks[offset] = val; 318 } 319 break; 320 } 321 /* XXX: exception? */ 322 break; 323 } 324 return; 325 error: 326 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 327 } 328 329 void helper_rdmsr(CPUX86State *env) 330 { 331 X86CPU *x86_cpu = env_archcpu(env); 332 uint64_t val; 333 334 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC()); 335 336 switch ((uint32_t)env->regs[R_ECX]) { 337 case MSR_IA32_SYSENTER_CS: 338 val = env->sysenter_cs; 339 break; 340 case MSR_IA32_SYSENTER_ESP: 341 val = env->sysenter_esp; 342 break; 343 case MSR_IA32_SYSENTER_EIP: 344 val = env->sysenter_eip; 345 break; 346 case MSR_IA32_APICBASE: 347 val = cpu_get_apic_base(env_archcpu(env)->apic_state); 348 break; 349 case MSR_EFER: 350 val = env->efer; 351 break; 352 case MSR_STAR: 353 val = env->star; 354 break; 355 case MSR_PAT: 356 val = env->pat; 357 break; 358 case MSR_IA32_PKRS: 359 val = env->pkrs; 360 break; 361 case MSR_VM_HSAVE_PA: 362 val = env->vm_hsave; 363 break; 364 case MSR_IA32_PERF_STATUS: 365 /* tsc_increment_by_tick */ 366 val = 1000ULL; 367 /* CPU multiplier */ 368 val |= (((uint64_t)4ULL) << 40); 369 break; 370 #ifdef TARGET_X86_64 371 case MSR_LSTAR: 372 val = env->lstar; 373 break; 374 case MSR_CSTAR: 375 val = env->cstar; 376 break; 377 case MSR_FMASK: 378 val = env->fmask; 379 break; 380 case MSR_FSBASE: 381 val = env->segs[R_FS].base; 382 break; 383 case MSR_GSBASE: 384 val = env->segs[R_GS].base; 385 break; 386 case MSR_KERNELGSBASE: 387 val = env->kernelgsbase; 388 break; 389 case MSR_TSC_AUX: 390 val = env->tsc_aux; 391 break; 392 #endif 393 case MSR_SMI_COUNT: 394 val = env->msr_smi_count; 395 break; 396 case MSR_MTRRphysBase(0): 397 case MSR_MTRRphysBase(1): 398 case MSR_MTRRphysBase(2): 399 case MSR_MTRRphysBase(3): 400 case MSR_MTRRphysBase(4): 401 case MSR_MTRRphysBase(5): 402 case MSR_MTRRphysBase(6): 403 case MSR_MTRRphysBase(7): 404 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - 405 MSR_MTRRphysBase(0)) / 2].base; 406 break; 407 case MSR_MTRRphysMask(0): 408 case MSR_MTRRphysMask(1): 409 case MSR_MTRRphysMask(2): 410 case MSR_MTRRphysMask(3): 411 case MSR_MTRRphysMask(4): 412 case MSR_MTRRphysMask(5): 413 case MSR_MTRRphysMask(6): 414 case MSR_MTRRphysMask(7): 415 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - 416 MSR_MTRRphysMask(0)) / 2].mask; 417 break; 418 case MSR_MTRRfix64K_00000: 419 val = env->mtrr_fixed[0]; 420 break; 421 case MSR_MTRRfix16K_80000: 422 case MSR_MTRRfix16K_A0000: 423 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 424 MSR_MTRRfix16K_80000 + 1]; 425 break; 426 case MSR_MTRRfix4K_C0000: 427 case MSR_MTRRfix4K_C8000: 428 case MSR_MTRRfix4K_D0000: 429 case MSR_MTRRfix4K_D8000: 430 case MSR_MTRRfix4K_E0000: 431 case MSR_MTRRfix4K_E8000: 432 case MSR_MTRRfix4K_F0000: 433 case MSR_MTRRfix4K_F8000: 434 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 435 MSR_MTRRfix4K_C0000 + 3]; 436 break; 437 case MSR_MTRRdefType: 438 val = env->mtrr_deftype; 439 break; 440 case MSR_MTRRcap: 441 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 442 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | 443 MSR_MTRRcap_WC_SUPPORTED; 444 } else { 445 /* XXX: exception? */ 446 val = 0; 447 } 448 break; 449 case MSR_MCG_CAP: 450 val = env->mcg_cap; 451 break; 452 case MSR_MCG_CTL: 453 if (env->mcg_cap & MCG_CTL_P) { 454 val = env->mcg_ctl; 455 } else { 456 val = 0; 457 } 458 break; 459 case MSR_MCG_STATUS: 460 val = env->mcg_status; 461 break; 462 case MSR_IA32_MISC_ENABLE: 463 val = env->msr_ia32_misc_enable; 464 break; 465 case MSR_IA32_BNDCFGS: 466 val = env->msr_bndcfgs; 467 break; 468 case MSR_IA32_UCODE_REV: 469 val = x86_cpu->ucode_rev; 470 break; 471 case MSR_CORE_THREAD_COUNT: { 472 val = cpu_x86_get_msr_core_thread_count(x86_cpu); 473 break; 474 } 475 case MSR_APIC_START ... MSR_APIC_END: { 476 int ret; 477 int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; 478 479 bql_lock(); 480 ret = apic_msr_read(index, &val); 481 bql_unlock(); 482 if (ret < 0) { 483 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 484 } 485 486 break; 487 } 488 default: 489 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL 490 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + 491 (4 * env->mcg_cap & 0xff)) { 492 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; 493 val = env->mce_banks[offset]; 494 break; 495 } 496 /* XXX: exception? */ 497 val = 0; 498 break; 499 } 500 env->regs[R_EAX] = (uint32_t)(val); 501 env->regs[R_EDX] = (uint32_t)(val >> 32); 502 } 503 504 void helper_flush_page(CPUX86State *env, target_ulong addr) 505 { 506 tlb_flush_page(env_cpu(env), addr); 507 } 508 509 G_NORETURN void helper_hlt(CPUX86State *env) 510 { 511 CPUState *cs = env_cpu(env); 512 513 do_end_instruction(env); 514 cs->halted = 1; 515 cs->exception_index = EXCP_HLT; 516 cpu_loop_exit(cs); 517 } 518 519 void helper_monitor(CPUX86State *env, target_ulong ptr) 520 { 521 if ((uint32_t)env->regs[R_ECX] != 0) { 522 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 523 } 524 /* XXX: store address? */ 525 cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC()); 526 } 527 528 G_NORETURN void helper_mwait(CPUX86State *env, int next_eip_addend) 529 { 530 CPUState *cs = env_cpu(env); 531 532 if ((uint32_t)env->regs[R_ECX] != 0) { 533 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 534 } 535 cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC()); 536 env->eip += next_eip_addend; 537 538 /* XXX: not complete but not completely erroneous */ 539 if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) { 540 helper_pause(env); 541 } else { 542 helper_hlt(env); 543 } 544 } 545