1 /*
2 * PowerPC exception emulation helpers for QEMU (TCG specific)
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/log.h"
22 #include "target/ppc/cpu.h"
23 #include "accel/tcg/cpu-ldst.h"
24 #include "exec/helper-proto.h"
25 #include "system/runstate.h"
26
27 #include "helper_regs.h"
28 #include "hw/ppc/ppc.h"
29 #include "internal.h"
30 #include "cpu.h"
31 #include "trace.h"
32
33 /*****************************************************************************/
34 /* Exceptions processing helpers */
35
raise_exception_err_ra(CPUPPCState * env,uint32_t exception,uint32_t error_code,uintptr_t raddr)36 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
37 uint32_t error_code, uintptr_t raddr)
38 {
39 CPUState *cs = env_cpu(env);
40
41 cs->exception_index = exception;
42 env->error_code = error_code;
43 cpu_loop_exit_restore(cs, raddr);
44 }
45
helper_raise_exception_err(CPUPPCState * env,uint32_t exception,uint32_t error_code)46 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
47 uint32_t error_code)
48 {
49 raise_exception_err_ra(env, exception, error_code, 0);
50 }
51
helper_raise_exception(CPUPPCState * env,uint32_t exception)52 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
53 {
54 raise_exception_err_ra(env, exception, 0, 0);
55 }
56
57 #ifndef CONFIG_USER_ONLY
58
raise_exception_err(CPUPPCState * env,uint32_t exception,uint32_t error_code)59 static G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception,
60 uint32_t error_code)
61 {
62 raise_exception_err_ra(env, exception, error_code, 0);
63 }
64
raise_exception(CPUPPCState * env,uint32_t exception)65 static G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception)
66 {
67 raise_exception_err_ra(env, exception, 0, 0);
68 }
69
70 #endif /* !CONFIG_USER_ONLY */
71
helper_TW(CPUPPCState * env,target_ulong arg1,target_ulong arg2,uint32_t flags)72 void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
73 uint32_t flags)
74 {
75 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
76 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
77 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
78 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
79 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
80 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
81 POWERPC_EXCP_TRAP, GETPC());
82 }
83 }
84
85 #ifdef TARGET_PPC64
helper_TD(CPUPPCState * env,target_ulong arg1,target_ulong arg2,uint32_t flags)86 void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
87 uint32_t flags)
88 {
89 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
90 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
91 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
92 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
93 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
94 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
95 POWERPC_EXCP_TRAP, GETPC());
96 }
97 }
98 #endif /* TARGET_PPC64 */
99
helper_SIMON_LIKE_32_64(uint32_t x,uint64_t key,uint32_t lane)100 static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
101 {
102 const uint16_t c = 0xfffc;
103 const uint64_t z0 = 0xfa2561cdf44ac398ULL;
104 uint16_t z = 0, temp;
105 uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
106
107 for (int i = 3; i >= 0; i--) {
108 k[i] = key & 0xffff;
109 key >>= 16;
110 }
111 xleft[0] = x & 0xffff;
112 xright[0] = (x >> 16) & 0xffff;
113
114 for (int i = 0; i < 28; i++) {
115 z = (z0 >> (63 - i)) & 1;
116 temp = ror16(k[i + 3], 3) ^ k[i + 1];
117 k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
118 }
119
120 for (int i = 0; i < 8; i++) {
121 eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
122 eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
123 eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
124 eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
125 }
126
127 for (int i = 0; i < 32; i++) {
128 fxleft[i] = (rol16(xleft[i], 1) &
129 rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
130 xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
131 xright[i + 1] = xleft[i];
132 }
133
134 return (((uint32_t)xright[32]) << 16) | xleft[32];
135 }
136
hash_digest(uint64_t ra,uint64_t rb,uint64_t key)137 static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
138 {
139 uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
140 uint64_t stage1_h, stage1_l;
141
142 for (int i = 0; i < 4; i++) {
143 stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
144 stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
145 stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
146 stage0_l |= (ra & 0xff) << (8 * 2 * i);
147 rb >>= 8;
148 ra >>= 8;
149 }
150
151 stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
152 stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
153 stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
154 stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
155
156 return stage1_h ^ stage1_l;
157 }
158
do_hash(CPUPPCState * env,target_ulong ea,target_ulong ra,target_ulong rb,uint64_t key,bool store)159 static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra,
160 target_ulong rb, uint64_t key, bool store)
161 {
162 uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash;
163
164 if (store) {
165 cpu_stq_data_ra(env, ea, calculated_hash, GETPC());
166 } else {
167 loaded_hash = cpu_ldq_data_ra(env, ea, GETPC());
168 if (loaded_hash != calculated_hash) {
169 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
170 POWERPC_EXCP_TRAP, GETPC());
171 }
172 }
173 }
174
175 #include "qemu/guest-random.h"
176
177 #ifdef TARGET_PPC64
178 #define HELPER_HASH(op, key, store, dexcr_aspect) \
179 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
180 target_ulong rb) \
181 { \
182 if (env->msr & R_MSR_PR_MASK) { \
183 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \
184 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
185 return; \
186 } else if (!(env->msr & R_MSR_HV_MASK)) { \
187 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \
188 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
189 return; \
190 } else if (!(env->msr & R_MSR_S_MASK)) { \
191 if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \
192 return; \
193 } \
194 \
195 do_hash(env, ea, ra, rb, key, store); \
196 }
197 #else
198 #define HELPER_HASH(op, key, store, dexcr_aspect) \
199 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
200 target_ulong rb) \
201 { \
202 do_hash(env, ea, ra, rb, key, store); \
203 }
204 #endif /* TARGET_PPC64 */
205
206 HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE)
207 HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE)
208 HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE)
209 HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
210
211 #ifndef CONFIG_USER_ONLY
212
ppc_cpu_do_unaligned_access(CPUState * cs,vaddr vaddr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)213 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
214 MMUAccessType access_type,
215 int mmu_idx, uintptr_t retaddr)
216 {
217 CPUPPCState *env = cpu_env(cs);
218 uint32_t insn;
219
220 /* Restore state and reload the insn we executed, for filling in DSISR. */
221 cpu_restore_state(cs, retaddr);
222 insn = ppc_ldl_code(env, env->nip);
223
224 switch (env->mmu_model) {
225 case POWERPC_MMU_SOFT_4xx:
226 env->spr[SPR_40x_DEAR] = vaddr;
227 break;
228 case POWERPC_MMU_BOOKE:
229 case POWERPC_MMU_BOOKE206:
230 env->spr[SPR_BOOKE_DEAR] = vaddr;
231 break;
232 default:
233 env->spr[SPR_DAR] = vaddr;
234 break;
235 }
236
237 cs->exception_index = POWERPC_EXCP_ALIGN;
238 env->error_code = insn & 0x03FF0000;
239 cpu_loop_exit(cs);
240 }
241
ppc_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr vaddr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)242 void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
243 vaddr vaddr, unsigned size,
244 MMUAccessType access_type,
245 int mmu_idx, MemTxAttrs attrs,
246 MemTxResult response, uintptr_t retaddr)
247 {
248 CPUPPCState *env = cpu_env(cs);
249
250 switch (env->excp_model) {
251 #if defined(TARGET_PPC64)
252 case POWERPC_EXCP_POWER8:
253 case POWERPC_EXCP_POWER9:
254 case POWERPC_EXCP_POWER10:
255 case POWERPC_EXCP_POWER11:
256 /*
257 * Machine check codes can be found in processor User Manual or
258 * Linux or skiboot source.
259 */
260 if (access_type == MMU_DATA_LOAD) {
261 env->spr[SPR_DAR] = vaddr;
262 env->spr[SPR_DSISR] = PPC_BIT(57);
263 env->error_code = PPC_BIT(42);
264
265 } else if (access_type == MMU_DATA_STORE) {
266 /*
267 * MCE for stores in POWER is asynchronous so hardware does
268 * not set DAR, but QEMU can do better.
269 */
270 env->spr[SPR_DAR] = vaddr;
271 env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45);
272 env->error_code |= PPC_BIT(42);
273
274 } else { /* Fetch */
275 /*
276 * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching
277 * the instruction, so that must always be clear for fetches.
278 */
279 env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45);
280 }
281 break;
282 #endif
283 default:
284 /*
285 * TODO: Check behaviour for other CPUs, for now do nothing.
286 * Could add a basic MCE even if real hardware ignores.
287 */
288 return;
289 }
290
291 cs->exception_index = POWERPC_EXCP_MCHECK;
292 cpu_loop_exit_restore(cs, retaddr);
293 }
294
ppc_cpu_debug_excp_handler(CPUState * cs)295 void ppc_cpu_debug_excp_handler(CPUState *cs)
296 {
297 #if defined(TARGET_PPC64)
298 CPUPPCState *env = cpu_env(cs);
299
300 if (env->insns_flags2 & PPC2_ISA207S) {
301 if (cs->watchpoint_hit) {
302 if (cs->watchpoint_hit->flags & BP_CPU) {
303 env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr;
304 env->spr[SPR_DSISR] = PPC_BIT(41);
305 cs->watchpoint_hit = NULL;
306 raise_exception(env, POWERPC_EXCP_DSI);
307 }
308 cs->watchpoint_hit = NULL;
309 } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) {
310 raise_exception_err(env, POWERPC_EXCP_TRACE,
311 PPC_BIT(33) | PPC_BIT(43));
312 }
313 }
314 #endif
315 }
316
ppc_cpu_debug_check_breakpoint(CPUState * cs)317 bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
318 {
319 #if defined(TARGET_PPC64)
320 CPUPPCState *env = cpu_env(cs);
321
322 if (env->insns_flags2 & PPC2_ISA207S) {
323 target_ulong priv;
324
325 priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63);
326 switch (priv) {
327 case 0x1: /* problem */
328 return env->msr & ((target_ulong)1 << MSR_PR);
329 case 0x2: /* supervisor */
330 return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
331 !(env->msr & ((target_ulong)1 << MSR_HV)));
332 case 0x3: /* hypervisor */
333 return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
334 (env->msr & ((target_ulong)1 << MSR_HV)));
335 default:
336 g_assert_not_reached();
337 }
338 }
339 #endif
340
341 return false;
342 }
343
ppc_cpu_debug_check_watchpoint(CPUState * cs,CPUWatchpoint * wp)344 bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
345 {
346 #if defined(TARGET_PPC64)
347 CPUPPCState *env = cpu_env(cs);
348 bool wt, wti, hv, sv, pr;
349 uint32_t dawrx;
350
351 if ((env->insns_flags2 & PPC2_ISA207S) &&
352 (wp == env->dawr_watchpoint[0])) {
353 dawrx = env->spr[SPR_DAWRX0];
354 } else if ((env->insns_flags2 & PPC2_ISA310) &&
355 (wp == env->dawr_watchpoint[1])) {
356 dawrx = env->spr[SPR_DAWRX1];
357 } else {
358 return false;
359 }
360
361 wt = extract32(dawrx, PPC_BIT_NR(59), 1);
362 wti = extract32(dawrx, PPC_BIT_NR(60), 1);
363 hv = extract32(dawrx, PPC_BIT_NR(61), 1);
364 sv = extract32(dawrx, PPC_BIT_NR(62), 1);
365 pr = extract32(dawrx, PPC_BIT_NR(62), 1);
366
367 if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) {
368 return false;
369 } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) {
370 return false;
371 } else if (!sv) {
372 return false;
373 }
374
375 if (!wti) {
376 if (env->msr & ((target_ulong)1 << MSR_DR)) {
377 return wt;
378 } else {
379 return !wt;
380 }
381 }
382
383 return true;
384 #endif
385
386 return false;
387 }
388
389 /*
390 * This stops the machine and logs CPU state without killing QEMU (like
391 * cpu_abort()) because it is often a guest error as opposed to a QEMU error,
392 * so the machine can still be debugged.
393 */
powerpc_checkstop(CPUPPCState * env,const char * reason)394 G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason)
395 {
396 CPUState *cs = env_cpu(env);
397 FILE *f;
398
399 f = qemu_log_trylock();
400 if (f) {
401 fprintf(f, "Entering checkstop state: %s\n", reason);
402 cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP);
403 qemu_log_unlock(f);
404 }
405
406 /*
407 * This stops the machine and logs CPU state without killing QEMU
408 * (like cpu_abort()) so the machine can still be debugged (because
409 * it is often a guest error).
410 */
411 qemu_system_guest_panicked(NULL);
412 cpu_loop_exit_noexc(cs);
413 }
414
415 /* Return true iff byteswap is needed to load instruction */
insn_need_byteswap(CPUArchState * env)416 static inline bool insn_need_byteswap(CPUArchState *env)
417 {
418 /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
419 return !!(env->msr & ((target_ulong)1 << MSR_LE));
420 }
421
ppc_ldl_code(CPUArchState * env,target_ulong addr)422 uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
423 {
424 uint32_t insn = cpu_ldl_code(env, addr);
425
426 if (insn_need_byteswap(env)) {
427 insn = bswap32(insn);
428 }
429
430 return insn;
431 }
432
433 #if defined(TARGET_PPC64)
helper_attn(CPUPPCState * env)434 void helper_attn(CPUPPCState *env)
435 {
436 /* POWER attn is unprivileged when enabled by HID, otherwise illegal */
437 if ((*env->check_attn)(env)) {
438 powerpc_checkstop(env, "host executed attn");
439 } else {
440 raise_exception_err(env, POWERPC_EXCP_HV_EMU,
441 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
442 }
443 }
444
helper_scv(CPUPPCState * env,uint32_t lev)445 void helper_scv(CPUPPCState *env, uint32_t lev)
446 {
447 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
448 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
449 } else {
450 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
451 }
452 }
453
helper_pminsn(CPUPPCState * env,uint32_t insn)454 void helper_pminsn(CPUPPCState *env, uint32_t insn)
455 {
456 CPUState *cs = env_cpu(env);
457
458 cs->halted = 1;
459
460 /* Condition for waking up at 0x100 */
461 env->resume_as_sreset = (insn != PPC_PM_STOP) ||
462 (env->spr[SPR_PSSCR] & PSSCR_EC);
463
464 /* HDECR is not to wake from PM state, it may have already fired */
465 if (env->resume_as_sreset) {
466 PowerPCCPU *cpu = env_archcpu(env);
467 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
468 }
469
470 ppc_maybe_interrupt(env);
471 }
472
473 #endif /* TARGET_PPC64 */
helper_store_msr(CPUPPCState * env,target_ulong val)474 void helper_store_msr(CPUPPCState *env, target_ulong val)
475 {
476 uint32_t excp = hreg_store_msr(env, val, 0);
477
478 if (excp != 0) {
479 cpu_interrupt_exittb(env_cpu(env));
480 raise_exception(env, excp);
481 }
482 }
483
helper_ppc_maybe_interrupt(CPUPPCState * env)484 void helper_ppc_maybe_interrupt(CPUPPCState *env)
485 {
486 ppc_maybe_interrupt(env);
487 }
488
do_rfi(CPUPPCState * env,target_ulong nip,target_ulong msr)489 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
490 {
491 /* MSR:POW cannot be set by any form of rfi */
492 msr &= ~(1ULL << MSR_POW);
493
494 /* MSR:TGPR cannot be set by any form of rfi */
495 if (env->flags & POWERPC_FLAG_TGPR) {
496 msr &= ~(1ULL << MSR_TGPR);
497 }
498
499 #ifdef TARGET_PPC64
500 /* Switching to 32-bit ? Crop the nip */
501 if (!msr_is_64bit(env, msr)) {
502 nip = (uint32_t)nip;
503 }
504 #else
505 nip = (uint32_t)nip;
506 #endif
507 /* XXX: beware: this is false if VLE is supported */
508 env->nip = nip & ~((target_ulong)0x00000003);
509 hreg_store_msr(env, msr, 1);
510 trace_ppc_excp_rfi(env->nip, env->msr);
511 /*
512 * No need to raise an exception here, as rfi is always the last
513 * insn of a TB
514 */
515 cpu_interrupt_exittb(env_cpu(env));
516 /* Reset the reservation */
517 env->reserve_addr = -1;
518
519 /* Context synchronizing: check if TCG TLB needs flush */
520 check_tlb_flush(env, false);
521 }
522
helper_rfi(CPUPPCState * env)523 void helper_rfi(CPUPPCState *env)
524 {
525 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
526 }
527
528 #ifdef TARGET_PPC64
helper_rfid(CPUPPCState * env)529 void helper_rfid(CPUPPCState *env)
530 {
531 /*
532 * The architecture defines a number of rules for which bits can
533 * change but in practice, we handle this in hreg_store_msr()
534 * which will be called by do_rfi(), so there is no need to filter
535 * here
536 */
537 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
538 }
539
helper_rfscv(CPUPPCState * env)540 void helper_rfscv(CPUPPCState *env)
541 {
542 do_rfi(env, env->lr, env->ctr);
543 }
544
helper_hrfid(CPUPPCState * env)545 void helper_hrfid(CPUPPCState *env)
546 {
547 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
548 }
549
helper_rfebb(CPUPPCState * env,target_ulong s)550 void helper_rfebb(CPUPPCState *env, target_ulong s)
551 {
552 target_ulong msr = env->msr;
553
554 /*
555 * Handling of BESCR bits 32:33 according to PowerISA v3.1:
556 *
557 * "If BESCR 32:33 != 0b00 the instruction is treated as if
558 * the instruction form were invalid."
559 */
560 if (env->spr[SPR_BESCR] & BESCR_INVALID) {
561 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
562 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
563 }
564
565 env->nip = env->spr[SPR_EBBRR];
566
567 /* Switching to 32-bit ? Crop the nip */
568 if (!msr_is_64bit(env, msr)) {
569 env->nip = (uint32_t)env->spr[SPR_EBBRR];
570 }
571
572 if (s) {
573 env->spr[SPR_BESCR] |= BESCR_GE;
574 } else {
575 env->spr[SPR_BESCR] &= ~BESCR_GE;
576 }
577 }
578
579 /*
580 * Triggers or queues an 'ebb_excp' EBB exception. All checks
581 * but FSCR, HFSCR and msr_pr must be done beforehand.
582 *
583 * PowerISA v3.1 isn't clear about whether an EBB should be
584 * postponed or cancelled if the EBB facility is unavailable.
585 * Our assumption here is that the EBB is cancelled if both
586 * FSCR and HFSCR EBB facilities aren't available.
587 */
do_ebb(CPUPPCState * env,int ebb_excp)588 static void do_ebb(CPUPPCState *env, int ebb_excp)
589 {
590 PowerPCCPU *cpu = env_archcpu(env);
591
592 /*
593 * FSCR_EBB and FSCR_IC_EBB are the same bits used with
594 * HFSCR.
595 */
596 helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
597 helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
598
599 if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
600 env->spr[SPR_BESCR] |= BESCR_PMEO;
601 } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
602 env->spr[SPR_BESCR] |= BESCR_EEO;
603 }
604
605 if (FIELD_EX64(env->msr, MSR, PR)) {
606 powerpc_excp(cpu, ebb_excp);
607 } else {
608 ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
609 }
610 }
611
raise_ebb_perfm_exception(CPUPPCState * env)612 void raise_ebb_perfm_exception(CPUPPCState *env)
613 {
614 bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
615 env->spr[SPR_BESCR] & BESCR_PME &&
616 env->spr[SPR_BESCR] & BESCR_GE;
617
618 if (!perfm_ebb_enabled) {
619 return;
620 }
621
622 do_ebb(env, POWERPC_EXCP_PERFM_EBB);
623 }
624 #endif /* TARGET_PPC64 */
625
626 /*****************************************************************************/
627 /* Embedded PowerPC specific helpers */
helper_40x_rfci(CPUPPCState * env)628 void helper_40x_rfci(CPUPPCState *env)
629 {
630 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
631 }
632
helper_rfci(CPUPPCState * env)633 void helper_rfci(CPUPPCState *env)
634 {
635 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
636 }
637
helper_rfdi(CPUPPCState * env)638 void helper_rfdi(CPUPPCState *env)
639 {
640 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
641 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
642 }
643
helper_rfmci(CPUPPCState * env)644 void helper_rfmci(CPUPPCState *env)
645 {
646 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
647 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
648 }
649
650 /* Embedded.Processor Control */
dbell2irq(target_ulong rb)651 static int dbell2irq(target_ulong rb)
652 {
653 int msg = rb & DBELL_TYPE_MASK;
654 int irq = -1;
655
656 switch (msg) {
657 case DBELL_TYPE_DBELL:
658 irq = PPC_INTERRUPT_DOORBELL;
659 break;
660 case DBELL_TYPE_DBELL_CRIT:
661 irq = PPC_INTERRUPT_CDOORBELL;
662 break;
663 case DBELL_TYPE_G_DBELL:
664 case DBELL_TYPE_G_DBELL_CRIT:
665 case DBELL_TYPE_G_DBELL_MC:
666 /* XXX implement */
667 default:
668 break;
669 }
670
671 return irq;
672 }
673
helper_msgclr(CPUPPCState * env,target_ulong rb)674 void helper_msgclr(CPUPPCState *env, target_ulong rb)
675 {
676 int irq = dbell2irq(rb);
677
678 if (irq < 0) {
679 return;
680 }
681
682 ppc_set_irq(env_archcpu(env), irq, 0);
683 }
684
helper_msgsnd(target_ulong rb)685 void helper_msgsnd(target_ulong rb)
686 {
687 int irq = dbell2irq(rb);
688 int pir = rb & DBELL_PIRTAG_MASK;
689 CPUState *cs;
690
691 if (irq < 0) {
692 return;
693 }
694
695 bql_lock();
696 CPU_FOREACH(cs) {
697 PowerPCCPU *cpu = POWERPC_CPU(cs);
698 CPUPPCState *cenv = &cpu->env;
699
700 if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
701 ppc_set_irq(cpu, irq, 1);
702 }
703 }
704 bql_unlock();
705 }
706
707 /* Server Processor Control */
708
dbell_type_server(target_ulong rb)709 static bool dbell_type_server(target_ulong rb)
710 {
711 /*
712 * A Directed Hypervisor Doorbell message is sent only if the
713 * message type is 5. All other types are reserved and the
714 * instruction is a no-op
715 */
716 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
717 }
718
dbell_bcast_core(target_ulong rb)719 static inline bool dbell_bcast_core(target_ulong rb)
720 {
721 return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
722 }
723
dbell_bcast_subproc(target_ulong rb)724 static inline bool dbell_bcast_subproc(target_ulong rb)
725 {
726 return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
727 }
728
729 /*
730 * Send an interrupt to a thread in the same core as env).
731 */
msgsnd_core_tir(CPUPPCState * env,uint32_t target_tir,int irq)732 static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
733 {
734 PowerPCCPU *cpu = env_archcpu(env);
735 CPUState *cs = env_cpu(env);
736
737 if (ppc_cpu_lpar_single_threaded(cs)) {
738 if (target_tir == 0) {
739 ppc_set_irq(cpu, irq, 1);
740 }
741 } else {
742 CPUState *ccs;
743
744 /* Does iothread need to be locked for walking CPU list? */
745 bql_lock();
746 THREAD_SIBLING_FOREACH(cs, ccs) {
747 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
748 if (target_tir == ppc_cpu_tir(ccpu)) {
749 ppc_set_irq(ccpu, irq, 1);
750 break;
751 }
752 }
753 bql_unlock();
754 }
755 }
756
helper_book3s_msgclr(CPUPPCState * env,target_ulong rb)757 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
758 {
759 if (!dbell_type_server(rb)) {
760 return;
761 }
762
763 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
764 }
765
helper_book3s_msgsnd(CPUPPCState * env,target_ulong rb)766 void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
767 {
768 int pir = rb & DBELL_PROCIDTAG_MASK;
769 bool brdcast = false;
770 CPUState *cs, *ccs;
771 PowerPCCPU *cpu;
772
773 if (!dbell_type_server(rb)) {
774 return;
775 }
776
777 /* POWER8 msgsnd is like msgsndp (targets a thread within core) */
778 if (!(env->insns_flags2 & PPC2_ISA300)) {
779 msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
780 return;
781 }
782
783 /* POWER9 and later msgsnd is a global (targets any thread) */
784 cpu = ppc_get_vcpu_by_pir(pir);
785 if (!cpu) {
786 return;
787 }
788 cs = CPU(cpu);
789
790 if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
791 (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
792 brdcast = true;
793 }
794
795 if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
796 ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
797 return;
798 }
799
800 /*
801 * Why is bql needed for walking CPU list? Answer seems to be because ppc
802 * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
803 * so could this be removed?
804 */
805 bql_lock();
806 THREAD_SIBLING_FOREACH(cs, ccs) {
807 ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
808 }
809 bql_unlock();
810 }
811
812 #ifdef TARGET_PPC64
helper_book3s_msgclrp(CPUPPCState * env,target_ulong rb)813 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
814 {
815 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
816
817 if (!dbell_type_server(rb)) {
818 return;
819 }
820
821 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
822 }
823
824 /*
825 * sends a message to another thread on the same
826 * multi-threaded processor
827 */
helper_book3s_msgsndp(CPUPPCState * env,target_ulong rb)828 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
829 {
830 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
831
832 if (!dbell_type_server(rb)) {
833 return;
834 }
835
836 msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
837 }
838 #endif /* TARGET_PPC64 */
839
840 /* Single-step tracing */
helper_book3s_trace(CPUPPCState * env,target_ulong prev_ip)841 void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
842 {
843 uint32_t error_code = 0;
844 if (env->insns_flags2 & PPC2_ISA207S) {
845 /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
846 env->spr[SPR_POWER_SIAR] = prev_ip;
847 error_code = PPC_BIT(33);
848 }
849 raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
850 }
851 #endif /* !CONFIG_USER_ONLY */
852