1 /* 2 * Miscellaneous PowerPC emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "exec/helper-proto.h" 25 #include "qemu/error-report.h" 26 #include "qemu/main-loop.h" 27 #include "mmu-book3s-v3.h" 28 #include "hw/ppc/ppc.h" 29 30 #include "helper_regs.h" 31 32 /*****************************************************************************/ 33 /* SPR accesses */ 34 void helper_load_dump_spr(CPUPPCState *env, uint32_t sprn) 35 { 36 qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn, 37 env->spr[sprn]); 38 } 39 40 void helper_store_dump_spr(CPUPPCState *env, uint32_t sprn) 41 { 42 qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn, 43 env->spr[sprn]); 44 } 45 46 void helper_spr_core_write_generic(CPUPPCState *env, uint32_t sprn, 47 target_ulong val) 48 { 49 CPUState *cs = env_cpu(env); 50 CPUState *ccs; 51 uint32_t nr_threads = cs->nr_threads; 52 53 if (nr_threads == 1) { 54 env->spr[sprn] = val; 55 return; 56 } 57 58 THREAD_SIBLING_FOREACH(cs, ccs) { 59 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env; 60 cenv->spr[sprn] = val; 61 } 62 } 63 64 void helper_spr_write_CTRL(CPUPPCState *env, uint32_t sprn, 65 target_ulong val) 66 { 67 CPUState *cs = env_cpu(env); 68 CPUState *ccs; 69 uint32_t run = val & 1; 70 uint32_t ts, ts_mask; 71 72 assert(sprn == SPR_CTRL); 73 74 env->spr[sprn] &= ~1U; 75 env->spr[sprn] |= run; 76 77 ts_mask = ~(1U << (8 + env->spr[SPR_TIR])); 78 ts = run << (8 + env->spr[SPR_TIR]); 79 80 THREAD_SIBLING_FOREACH(cs, ccs) { 81 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env; 82 83 cenv->spr[sprn] &= ts_mask; 84 cenv->spr[sprn] |= ts; 85 } 86 } 87 88 89 #ifdef TARGET_PPC64 90 static void raise_hv_fu_exception(CPUPPCState *env, uint32_t bit, 91 const char *caller, uint32_t cause, 92 uintptr_t raddr) 93 { 94 qemu_log_mask(CPU_LOG_INT, "HV Facility %d is unavailable (%s)\n", 95 bit, caller); 96 97 env->spr[SPR_HFSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS); 98 99 raise_exception_err_ra(env, POWERPC_EXCP_HV_FU, cause, raddr); 100 } 101 102 static void raise_fu_exception(CPUPPCState *env, uint32_t bit, 103 uint32_t sprn, uint32_t cause, 104 uintptr_t raddr) 105 { 106 qemu_log("Facility SPR %d is unavailable (SPR FSCR:%d)\n", sprn, bit); 107 108 env->spr[SPR_FSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS); 109 cause &= FSCR_IC_MASK; 110 env->spr[SPR_FSCR] |= (target_ulong)cause << FSCR_IC_POS; 111 112 raise_exception_err_ra(env, POWERPC_EXCP_FU, 0, raddr); 113 } 114 #endif 115 116 void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit, 117 const char *caller, uint32_t cause) 118 { 119 #ifdef TARGET_PPC64 120 if ((env->msr_mask & MSR_HVB) && !FIELD_EX64(env->msr, MSR, HV) && 121 !(env->spr[SPR_HFSCR] & (1UL << bit))) { 122 raise_hv_fu_exception(env, bit, caller, cause, GETPC()); 123 } 124 #endif 125 } 126 127 void helper_fscr_facility_check(CPUPPCState *env, uint32_t bit, 128 uint32_t sprn, uint32_t cause) 129 { 130 #ifdef TARGET_PPC64 131 if (env->spr[SPR_FSCR] & (1ULL << bit)) { 132 /* Facility is enabled, continue */ 133 return; 134 } 135 raise_fu_exception(env, bit, sprn, cause, GETPC()); 136 #endif 137 } 138 139 void helper_msr_facility_check(CPUPPCState *env, uint32_t bit, 140 uint32_t sprn, uint32_t cause) 141 { 142 #ifdef TARGET_PPC64 143 if (env->msr & (1ULL << bit)) { 144 /* Facility is enabled, continue */ 145 return; 146 } 147 raise_fu_exception(env, bit, sprn, cause, GETPC()); 148 #endif 149 } 150 151 #if !defined(CONFIG_USER_ONLY) 152 153 #ifdef TARGET_PPC64 154 static void helper_mmcr0_facility_check(CPUPPCState *env, uint32_t bit, 155 uint32_t sprn, uint32_t cause) 156 { 157 if (FIELD_EX64(env->msr, MSR, PR) && 158 !(env->spr[SPR_POWER_MMCR0] & (1ULL << bit))) { 159 raise_fu_exception(env, bit, sprn, cause, GETPC()); 160 } 161 } 162 #endif 163 164 void helper_store_sdr1(CPUPPCState *env, target_ulong val) 165 { 166 if (env->spr[SPR_SDR1] != val) { 167 ppc_store_sdr1(env, val); 168 tlb_flush(env_cpu(env)); 169 } 170 } 171 172 #if defined(TARGET_PPC64) 173 void helper_store_ptcr(CPUPPCState *env, target_ulong val) 174 { 175 if (env->spr[SPR_PTCR] != val) { 176 PowerPCCPU *cpu = env_archcpu(env); 177 target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS; 178 target_ulong patbsize = val & PTCR_PATS; 179 180 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, val); 181 182 assert(!cpu->vhyp); 183 assert(env->mmu_model & POWERPC_MMU_3_00); 184 185 if (val & ~ptcr_mask) { 186 error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR", 187 val & ~ptcr_mask); 188 val &= ptcr_mask; 189 } 190 191 if (patbsize > 24) { 192 error_report("Invalid Partition Table size 0x" TARGET_FMT_lx 193 " stored in PTCR", patbsize); 194 return; 195 } 196 197 env->spr[SPR_PTCR] = val; 198 tlb_flush(env_cpu(env)); 199 } 200 } 201 202 void helper_store_pcr(CPUPPCState *env, target_ulong value) 203 { 204 PowerPCCPU *cpu = env_archcpu(env); 205 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 206 207 env->spr[SPR_PCR] = value & pcc->pcr_mask; 208 } 209 210 void helper_store_ciabr(CPUPPCState *env, target_ulong value) 211 { 212 ppc_store_ciabr(env, value); 213 } 214 215 void helper_store_dawr0(CPUPPCState *env, target_ulong value) 216 { 217 ppc_store_dawr0(env, value); 218 } 219 220 void helper_store_dawrx0(CPUPPCState *env, target_ulong value) 221 { 222 ppc_store_dawrx0(env, value); 223 } 224 225 /* 226 * DPDES register is shared. Each bit reflects the state of the 227 * doorbell interrupt of a thread of the same core. 228 */ 229 target_ulong helper_load_dpdes(CPUPPCState *env) 230 { 231 CPUState *cs = env_cpu(env); 232 CPUState *ccs; 233 uint32_t nr_threads = cs->nr_threads; 234 target_ulong dpdes = 0; 235 236 helper_hfscr_facility_check(env, HFSCR_MSGP, "load DPDES", HFSCR_IC_MSGP); 237 238 if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) { 239 nr_threads = 1; /* DPDES behaves as 1-thread in LPAR-per-thread mode */ 240 } 241 242 if (nr_threads == 1) { 243 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { 244 dpdes = 1; 245 } 246 return dpdes; 247 } 248 249 bql_lock(); 250 THREAD_SIBLING_FOREACH(cs, ccs) { 251 PowerPCCPU *ccpu = POWERPC_CPU(ccs); 252 CPUPPCState *cenv = &ccpu->env; 253 uint32_t thread_id = ppc_cpu_tir(ccpu); 254 255 if (cenv->pending_interrupts & PPC_INTERRUPT_DOORBELL) { 256 dpdes |= (0x1 << thread_id); 257 } 258 } 259 bql_unlock(); 260 261 return dpdes; 262 } 263 264 void helper_store_dpdes(CPUPPCState *env, target_ulong val) 265 { 266 PowerPCCPU *cpu = env_archcpu(env); 267 CPUState *cs = env_cpu(env); 268 CPUState *ccs; 269 uint32_t nr_threads = cs->nr_threads; 270 271 helper_hfscr_facility_check(env, HFSCR_MSGP, "store DPDES", HFSCR_IC_MSGP); 272 273 if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) { 274 nr_threads = 1; /* DPDES behaves as 1-thread in LPAR-per-thread mode */ 275 } 276 277 if (val & ~(nr_threads - 1)) { 278 qemu_log_mask(LOG_GUEST_ERROR, "Invalid DPDES register value " 279 TARGET_FMT_lx"\n", val); 280 val &= (nr_threads - 1); /* Ignore the invalid bits */ 281 } 282 283 if (nr_threads == 1) { 284 ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & 0x1); 285 return; 286 } 287 288 /* Does iothread need to be locked for walking CPU list? */ 289 bql_lock(); 290 THREAD_SIBLING_FOREACH(cs, ccs) { 291 PowerPCCPU *ccpu = POWERPC_CPU(ccs); 292 uint32_t thread_id = ppc_cpu_tir(ccpu); 293 294 ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id)); 295 } 296 bql_unlock(); 297 } 298 #endif /* defined(TARGET_PPC64) */ 299 300 void helper_store_pidr(CPUPPCState *env, target_ulong val) 301 { 302 env->spr[SPR_BOOKS_PID] = (uint32_t)val; 303 tlb_flush(env_cpu(env)); 304 } 305 306 void helper_store_lpidr(CPUPPCState *env, target_ulong val) 307 { 308 env->spr[SPR_LPIDR] = (uint32_t)val; 309 310 /* 311 * We need to flush the TLB on LPID changes as we only tag HV vs 312 * guest in TCG TLB. Also the quadrants means the HV will 313 * potentially access and cache entries for the current LPID as 314 * well. 315 */ 316 tlb_flush(env_cpu(env)); 317 } 318 319 void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val) 320 { 321 /* Bits 26 & 27 affect single-stepping. */ 322 hreg_compute_hflags(env); 323 /* Bits 28 & 29 affect reset or shutdown. */ 324 store_40x_dbcr0(env, val); 325 } 326 327 void helper_store_40x_sler(CPUPPCState *env, target_ulong val) 328 { 329 store_40x_sler(env, val); 330 } 331 #endif 332 333 /*****************************************************************************/ 334 /* Special registers manipulation */ 335 336 /* 337 * This code is lifted from MacOnLinux. It is called whenever THRM1,2 338 * or 3 is read an fixes up the values in such a way that will make 339 * MacOS not hang. These registers exist on some 75x and 74xx 340 * processors. 341 */ 342 void helper_fixup_thrm(CPUPPCState *env) 343 { 344 target_ulong v, t; 345 int i; 346 347 #define THRM1_TIN (1 << 31) 348 #define THRM1_TIV (1 << 30) 349 #define THRM1_THRES(x) (((x) & 0x7f) << 23) 350 #define THRM1_TID (1 << 2) 351 #define THRM1_TIE (1 << 1) 352 #define THRM1_V (1 << 0) 353 #define THRM3_E (1 << 0) 354 355 if (!(env->spr[SPR_THRM3] & THRM3_E)) { 356 return; 357 } 358 359 /* Note: Thermal interrupts are unimplemented */ 360 for (i = SPR_THRM1; i <= SPR_THRM2; i++) { 361 v = env->spr[i]; 362 if (!(v & THRM1_V)) { 363 continue; 364 } 365 v |= THRM1_TIV; 366 v &= ~THRM1_TIN; 367 t = v & THRM1_THRES(127); 368 if ((v & THRM1_TID) && t < THRM1_THRES(24)) { 369 v |= THRM1_TIN; 370 } 371 if (!(v & THRM1_TID) && t > THRM1_THRES(24)) { 372 v |= THRM1_TIN; 373 } 374 env->spr[i] = v; 375 } 376 } 377 378 #if !defined(CONFIG_USER_ONLY) 379 #if defined(TARGET_PPC64) 380 void helper_clrbhrb(CPUPPCState *env) 381 { 382 helper_hfscr_facility_check(env, HFSCR_BHRB, "clrbhrb", FSCR_IC_BHRB); 383 384 helper_mmcr0_facility_check(env, MMCR0_BHRBA_NR, 0, FSCR_IC_BHRB); 385 386 if (env->flags & POWERPC_FLAG_BHRB) { 387 memset(env->bhrb, 0, sizeof(env->bhrb)); 388 } 389 } 390 391 uint64_t helper_mfbhrbe(CPUPPCState *env, uint32_t bhrbe) 392 { 393 unsigned int index; 394 395 helper_hfscr_facility_check(env, HFSCR_BHRB, "mfbhrbe", FSCR_IC_BHRB); 396 397 helper_mmcr0_facility_check(env, MMCR0_BHRBA_NR, 0, FSCR_IC_BHRB); 398 399 if (!(env->flags & POWERPC_FLAG_BHRB) || 400 (bhrbe >= env->bhrb_num_entries) || 401 (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE)) { 402 return 0; 403 } 404 405 /* 406 * Note: bhrb_offset is the byte offset for writing the 407 * next entry (over the oldest entry), which is why we 408 * must offset bhrbe by 1 to get to the 0th entry. 409 */ 410 index = ((env->bhrb_offset / sizeof(uint64_t)) - (bhrbe + 1)) % 411 env->bhrb_num_entries; 412 return env->bhrb[index]; 413 } 414 #endif 415 #endif 416