xref: /qemu/target/ppc/tcg-excp_helper.c (revision 51209c2aed343a45f79b522706c807abbdcf01a3)
1  /*
2   *  PowerPC exception emulation helpers for QEMU (TCG specific)
3   *
4   *  Copyright (c) 2003-2007 Jocelyn Mayer
5   *
6   * This library is free software; you can redistribute it and/or
7   * modify it under the terms of the GNU Lesser General Public
8   * License as published by the Free Software Foundation; either
9   * version 2.1 of the License, or (at your option) any later version.
10   *
11   * This library is distributed in the hope that it will be useful,
12   * but WITHOUT ANY WARRANTY; without even the implied warranty of
13   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   * Lesser General Public License for more details.
15   *
16   * You should have received a copy of the GNU Lesser General Public
17   * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18   */
19  #include "qemu/osdep.h"
20  #include "qemu/main-loop.h"
21  #include "qemu/log.h"
22  #include "target/ppc/cpu.h"
23  #include "accel/tcg/cpu-ldst.h"
24  #include "exec/exec-all.h"
25  #include "exec/helper-proto.h"
26  #include "system/runstate.h"
27  
28  #include "helper_regs.h"
29  #include "hw/ppc/ppc.h"
30  #include "internal.h"
31  #include "cpu.h"
32  #include "trace.h"
33  
34  /*****************************************************************************/
35  /* Exceptions processing helpers */
36  
37  void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
38                              uint32_t error_code, uintptr_t raddr)
39  {
40      CPUState *cs = env_cpu(env);
41  
42      cs->exception_index = exception;
43      env->error_code = error_code;
44      cpu_loop_exit_restore(cs, raddr);
45  }
46  
47  void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
48                                  uint32_t error_code)
49  {
50      raise_exception_err_ra(env, exception, error_code, 0);
51  }
52  
53  void helper_raise_exception(CPUPPCState *env, uint32_t exception)
54  {
55      raise_exception_err_ra(env, exception, 0, 0);
56  }
57  
58  #ifndef CONFIG_USER_ONLY
59  
60  static G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception,
61                                             uint32_t error_code)
62  {
63      raise_exception_err_ra(env, exception, error_code, 0);
64  }
65  
66  static G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception)
67  {
68      raise_exception_err_ra(env, exception, 0, 0);
69  }
70  
71  #endif /* !CONFIG_USER_ONLY */
72  
73  void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
74                 uint32_t flags)
75  {
76      if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
77                    ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
78                    ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
79                    ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
80                    ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
81          raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
82                                 POWERPC_EXCP_TRAP, GETPC());
83      }
84  }
85  
86  #ifdef TARGET_PPC64
87  void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
88                 uint32_t flags)
89  {
90      if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
91                    ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
92                    ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
93                    ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
94                    ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
95          raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
96                                 POWERPC_EXCP_TRAP, GETPC());
97      }
98  }
99  #endif /* TARGET_PPC64 */
100  
101  static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
102  {
103      const uint16_t c = 0xfffc;
104      const uint64_t z0 = 0xfa2561cdf44ac398ULL;
105      uint16_t z = 0, temp;
106      uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
107  
108      for (int i = 3; i >= 0; i--) {
109          k[i] = key & 0xffff;
110          key >>= 16;
111      }
112      xleft[0] = x & 0xffff;
113      xright[0] = (x >> 16) & 0xffff;
114  
115      for (int i = 0; i < 28; i++) {
116          z = (z0 >> (63 - i)) & 1;
117          temp = ror16(k[i + 3], 3) ^ k[i + 1];
118          k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
119      }
120  
121      for (int i = 0; i < 8; i++) {
122          eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
123          eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
124          eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
125          eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
126      }
127  
128      for (int i = 0; i < 32; i++) {
129          fxleft[i] = (rol16(xleft[i], 1) &
130              rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
131          xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
132          xright[i + 1] = xleft[i];
133      }
134  
135      return (((uint32_t)xright[32]) << 16) | xleft[32];
136  }
137  
138  static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
139  {
140      uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
141      uint64_t stage1_h, stage1_l;
142  
143      for (int i = 0; i < 4; i++) {
144          stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
145          stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
146          stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
147          stage0_l |= (ra & 0xff) << (8 * 2 * i);
148          rb >>= 8;
149          ra >>= 8;
150      }
151  
152      stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
153      stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
154      stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
155      stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
156  
157      return stage1_h ^ stage1_l;
158  }
159  
160  static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra,
161                      target_ulong rb, uint64_t key, bool store)
162  {
163      uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash;
164  
165      if (store) {
166          cpu_stq_data_ra(env, ea, calculated_hash, GETPC());
167      } else {
168          loaded_hash = cpu_ldq_data_ra(env, ea, GETPC());
169          if (loaded_hash != calculated_hash) {
170              raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
171                  POWERPC_EXCP_TRAP, GETPC());
172          }
173      }
174  }
175  
176  #include "qemu/guest-random.h"
177  
178  #ifdef TARGET_PPC64
179  #define HELPER_HASH(op, key, store, dexcr_aspect)                             \
180  void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra,          \
181                   target_ulong rb)                                             \
182  {                                                                             \
183      if (env->msr & R_MSR_PR_MASK) {                                           \
184          if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK ||      \
185              env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK))       \
186              return;                                                           \
187      } else if (!(env->msr & R_MSR_HV_MASK)) {                                 \
188          if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK ||      \
189              env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK))       \
190              return;                                                           \
191      } else if (!(env->msr & R_MSR_S_MASK)) {                                  \
192          if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK))     \
193              return;                                                           \
194      }                                                                         \
195                                                                                \
196      do_hash(env, ea, ra, rb, key, store);                                     \
197  }
198  #else
199  #define HELPER_HASH(op, key, store, dexcr_aspect)                             \
200  void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra,          \
201                   target_ulong rb)                                             \
202  {                                                                             \
203      do_hash(env, ea, ra, rb, key, store);                                     \
204  }
205  #endif /* TARGET_PPC64 */
206  
207  HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE)
208  HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE)
209  HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE)
210  HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
211  
212  #ifndef CONFIG_USER_ONLY
213  
214  void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
215                                   MMUAccessType access_type,
216                                   int mmu_idx, uintptr_t retaddr)
217  {
218      CPUPPCState *env = cpu_env(cs);
219      uint32_t insn;
220  
221      /* Restore state and reload the insn we executed, for filling in DSISR.  */
222      cpu_restore_state(cs, retaddr);
223      insn = ppc_ldl_code(env, env->nip);
224  
225      switch (env->mmu_model) {
226      case POWERPC_MMU_SOFT_4xx:
227          env->spr[SPR_40x_DEAR] = vaddr;
228          break;
229      case POWERPC_MMU_BOOKE:
230      case POWERPC_MMU_BOOKE206:
231          env->spr[SPR_BOOKE_DEAR] = vaddr;
232          break;
233      default:
234          env->spr[SPR_DAR] = vaddr;
235          break;
236      }
237  
238      cs->exception_index = POWERPC_EXCP_ALIGN;
239      env->error_code = insn & 0x03FF0000;
240      cpu_loop_exit(cs);
241  }
242  
243  void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
244                                     vaddr vaddr, unsigned size,
245                                     MMUAccessType access_type,
246                                     int mmu_idx, MemTxAttrs attrs,
247                                     MemTxResult response, uintptr_t retaddr)
248  {
249      CPUPPCState *env = cpu_env(cs);
250  
251      switch (env->excp_model) {
252  #if defined(TARGET_PPC64)
253      case POWERPC_EXCP_POWER8:
254      case POWERPC_EXCP_POWER9:
255      case POWERPC_EXCP_POWER10:
256      case POWERPC_EXCP_POWER11:
257          /*
258           * Machine check codes can be found in processor User Manual or
259           * Linux or skiboot source.
260           */
261          if (access_type == MMU_DATA_LOAD) {
262              env->spr[SPR_DAR] = vaddr;
263              env->spr[SPR_DSISR] = PPC_BIT(57);
264              env->error_code = PPC_BIT(42);
265  
266          } else if (access_type == MMU_DATA_STORE) {
267              /*
268               * MCE for stores in POWER is asynchronous so hardware does
269               * not set DAR, but QEMU can do better.
270               */
271              env->spr[SPR_DAR] = vaddr;
272              env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45);
273              env->error_code |= PPC_BIT(42);
274  
275          } else { /* Fetch */
276              /*
277               * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching
278               * the instruction, so that must always be clear for fetches.
279               */
280              env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45);
281          }
282          break;
283  #endif
284      default:
285          /*
286           * TODO: Check behaviour for other CPUs, for now do nothing.
287           * Could add a basic MCE even if real hardware ignores.
288           */
289          return;
290      }
291  
292      cs->exception_index = POWERPC_EXCP_MCHECK;
293      cpu_loop_exit_restore(cs, retaddr);
294  }
295  
296  void ppc_cpu_debug_excp_handler(CPUState *cs)
297  {
298  #if defined(TARGET_PPC64)
299      CPUPPCState *env = cpu_env(cs);
300  
301      if (env->insns_flags2 & PPC2_ISA207S) {
302          if (cs->watchpoint_hit) {
303              if (cs->watchpoint_hit->flags & BP_CPU) {
304                  env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr;
305                  env->spr[SPR_DSISR] = PPC_BIT(41);
306                  cs->watchpoint_hit = NULL;
307                  raise_exception(env, POWERPC_EXCP_DSI);
308              }
309              cs->watchpoint_hit = NULL;
310          } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) {
311              raise_exception_err(env, POWERPC_EXCP_TRACE,
312                                  PPC_BIT(33) | PPC_BIT(43));
313          }
314      }
315  #endif
316  }
317  
318  bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
319  {
320  #if defined(TARGET_PPC64)
321      CPUPPCState *env = cpu_env(cs);
322  
323      if (env->insns_flags2 & PPC2_ISA207S) {
324          target_ulong priv;
325  
326          priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63);
327          switch (priv) {
328          case 0x1: /* problem */
329              return env->msr & ((target_ulong)1 << MSR_PR);
330          case 0x2: /* supervisor */
331              return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
332                      !(env->msr & ((target_ulong)1 << MSR_HV)));
333          case 0x3: /* hypervisor */
334              return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
335                       (env->msr & ((target_ulong)1 << MSR_HV)));
336          default:
337              g_assert_not_reached();
338          }
339      }
340  #endif
341  
342      return false;
343  }
344  
345  bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
346  {
347  #if defined(TARGET_PPC64)
348      CPUPPCState *env = cpu_env(cs);
349      bool wt, wti, hv, sv, pr;
350      uint32_t dawrx;
351  
352      if ((env->insns_flags2 & PPC2_ISA207S) &&
353          (wp == env->dawr_watchpoint[0])) {
354          dawrx = env->spr[SPR_DAWRX0];
355      } else if ((env->insns_flags2 & PPC2_ISA310) &&
356                 (wp == env->dawr_watchpoint[1])) {
357          dawrx = env->spr[SPR_DAWRX1];
358      } else {
359          return false;
360      }
361  
362      wt = extract32(dawrx, PPC_BIT_NR(59), 1);
363      wti = extract32(dawrx, PPC_BIT_NR(60), 1);
364      hv = extract32(dawrx, PPC_BIT_NR(61), 1);
365      sv = extract32(dawrx, PPC_BIT_NR(62), 1);
366      pr = extract32(dawrx, PPC_BIT_NR(62), 1);
367  
368      if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) {
369          return false;
370      } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) {
371          return false;
372      } else if (!sv) {
373          return false;
374      }
375  
376      if (!wti) {
377          if (env->msr & ((target_ulong)1 << MSR_DR)) {
378              return wt;
379          } else {
380              return !wt;
381          }
382      }
383  
384      return true;
385  #endif
386  
387      return false;
388  }
389  
390  /*
391   * This stops the machine and logs CPU state without killing QEMU (like
392   * cpu_abort()) because it is often a guest error as opposed to a QEMU error,
393   * so the machine can still be debugged.
394   */
395  G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason)
396  {
397      CPUState *cs = env_cpu(env);
398      FILE *f;
399  
400      f = qemu_log_trylock();
401      if (f) {
402          fprintf(f, "Entering checkstop state: %s\n", reason);
403          cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP);
404          qemu_log_unlock(f);
405      }
406  
407      /*
408       * This stops the machine and logs CPU state without killing QEMU
409       * (like cpu_abort()) so the machine can still be debugged (because
410       * it is often a guest error).
411       */
412      qemu_system_guest_panicked(NULL);
413      cpu_loop_exit_noexc(cs);
414  }
415  
416  /* Return true iff byteswap is needed to load instruction */
417  static inline bool insn_need_byteswap(CPUArchState *env)
418  {
419      /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
420      return !!(env->msr & ((target_ulong)1 << MSR_LE));
421  }
422  
423  uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
424  {
425      uint32_t insn = cpu_ldl_code(env, addr);
426  
427      if (insn_need_byteswap(env)) {
428          insn = bswap32(insn);
429      }
430  
431      return insn;
432  }
433  
434  #if defined(TARGET_PPC64)
435  void helper_attn(CPUPPCState *env)
436  {
437      /* POWER attn is unprivileged when enabled by HID, otherwise illegal */
438      if ((*env->check_attn)(env)) {
439          powerpc_checkstop(env, "host executed attn");
440      } else {
441          raise_exception_err(env, POWERPC_EXCP_HV_EMU,
442                              POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
443      }
444  }
445  
446  void helper_scv(CPUPPCState *env, uint32_t lev)
447  {
448      if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
449          raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
450      } else {
451          raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
452      }
453  }
454  
455  void helper_pminsn(CPUPPCState *env, uint32_t insn)
456  {
457      CPUState *cs = env_cpu(env);
458  
459      cs->halted = 1;
460  
461      /* Condition for waking up at 0x100 */
462      env->resume_as_sreset = (insn != PPC_PM_STOP) ||
463          (env->spr[SPR_PSSCR] & PSSCR_EC);
464  
465      /* HDECR is not to wake from PM state, it may have already fired */
466      if (env->resume_as_sreset) {
467          PowerPCCPU *cpu = env_archcpu(env);
468          ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
469      }
470  
471      ppc_maybe_interrupt(env);
472  }
473  
474  #endif /* TARGET_PPC64 */
475  void helper_store_msr(CPUPPCState *env, target_ulong val)
476  {
477      uint32_t excp = hreg_store_msr(env, val, 0);
478  
479      if (excp != 0) {
480          cpu_interrupt_exittb(env_cpu(env));
481          raise_exception(env, excp);
482      }
483  }
484  
485  void helper_ppc_maybe_interrupt(CPUPPCState *env)
486  {
487      ppc_maybe_interrupt(env);
488  }
489  
490  static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
491  {
492      /* MSR:POW cannot be set by any form of rfi */
493      msr &= ~(1ULL << MSR_POW);
494  
495      /* MSR:TGPR cannot be set by any form of rfi */
496      if (env->flags & POWERPC_FLAG_TGPR) {
497          msr &= ~(1ULL << MSR_TGPR);
498      }
499  
500  #ifdef TARGET_PPC64
501      /* Switching to 32-bit ? Crop the nip */
502      if (!msr_is_64bit(env, msr)) {
503          nip = (uint32_t)nip;
504      }
505  #else
506      nip = (uint32_t)nip;
507  #endif
508      /* XXX: beware: this is false if VLE is supported */
509      env->nip = nip & ~((target_ulong)0x00000003);
510      hreg_store_msr(env, msr, 1);
511      trace_ppc_excp_rfi(env->nip, env->msr);
512      /*
513       * No need to raise an exception here, as rfi is always the last
514       * insn of a TB
515       */
516      cpu_interrupt_exittb(env_cpu(env));
517      /* Reset the reservation */
518      env->reserve_addr = -1;
519  
520      /* Context synchronizing: check if TCG TLB needs flush */
521      check_tlb_flush(env, false);
522  }
523  
524  void helper_rfi(CPUPPCState *env)
525  {
526      do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
527  }
528  
529  #ifdef TARGET_PPC64
530  void helper_rfid(CPUPPCState *env)
531  {
532      /*
533       * The architecture defines a number of rules for which bits can
534       * change but in practice, we handle this in hreg_store_msr()
535       * which will be called by do_rfi(), so there is no need to filter
536       * here
537       */
538      do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
539  }
540  
541  void helper_rfscv(CPUPPCState *env)
542  {
543      do_rfi(env, env->lr, env->ctr);
544  }
545  
546  void helper_hrfid(CPUPPCState *env)
547  {
548      do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
549  }
550  
551  void helper_rfebb(CPUPPCState *env, target_ulong s)
552  {
553      target_ulong msr = env->msr;
554  
555      /*
556       * Handling of BESCR bits 32:33 according to PowerISA v3.1:
557       *
558       * "If BESCR 32:33 != 0b00 the instruction is treated as if
559       *  the instruction form were invalid."
560       */
561      if (env->spr[SPR_BESCR] & BESCR_INVALID) {
562          raise_exception_err(env, POWERPC_EXCP_PROGRAM,
563                              POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
564      }
565  
566      env->nip = env->spr[SPR_EBBRR];
567  
568      /* Switching to 32-bit ? Crop the nip */
569      if (!msr_is_64bit(env, msr)) {
570          env->nip = (uint32_t)env->spr[SPR_EBBRR];
571      }
572  
573      if (s) {
574          env->spr[SPR_BESCR] |= BESCR_GE;
575      } else {
576          env->spr[SPR_BESCR] &= ~BESCR_GE;
577      }
578  }
579  
580  /*
581   * Triggers or queues an 'ebb_excp' EBB exception. All checks
582   * but FSCR, HFSCR and msr_pr must be done beforehand.
583   *
584   * PowerISA v3.1 isn't clear about whether an EBB should be
585   * postponed or cancelled if the EBB facility is unavailable.
586   * Our assumption here is that the EBB is cancelled if both
587   * FSCR and HFSCR EBB facilities aren't available.
588   */
589  static void do_ebb(CPUPPCState *env, int ebb_excp)
590  {
591      PowerPCCPU *cpu = env_archcpu(env);
592  
593      /*
594       * FSCR_EBB and FSCR_IC_EBB are the same bits used with
595       * HFSCR.
596       */
597      helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
598      helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
599  
600      if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
601          env->spr[SPR_BESCR] |= BESCR_PMEO;
602      } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
603          env->spr[SPR_BESCR] |= BESCR_EEO;
604      }
605  
606      if (FIELD_EX64(env->msr, MSR, PR)) {
607          powerpc_excp(cpu, ebb_excp);
608      } else {
609          ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
610      }
611  }
612  
613  void raise_ebb_perfm_exception(CPUPPCState *env)
614  {
615      bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
616                               env->spr[SPR_BESCR] & BESCR_PME &&
617                               env->spr[SPR_BESCR] & BESCR_GE;
618  
619      if (!perfm_ebb_enabled) {
620          return;
621      }
622  
623      do_ebb(env, POWERPC_EXCP_PERFM_EBB);
624  }
625  #endif /* TARGET_PPC64 */
626  
627  /*****************************************************************************/
628  /* Embedded PowerPC specific helpers */
629  void helper_40x_rfci(CPUPPCState *env)
630  {
631      do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
632  }
633  
634  void helper_rfci(CPUPPCState *env)
635  {
636      do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
637  }
638  
639  void helper_rfdi(CPUPPCState *env)
640  {
641      /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
642      do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
643  }
644  
645  void helper_rfmci(CPUPPCState *env)
646  {
647      /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
648      do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
649  }
650  
651  /* Embedded.Processor Control */
652  static int dbell2irq(target_ulong rb)
653  {
654      int msg = rb & DBELL_TYPE_MASK;
655      int irq = -1;
656  
657      switch (msg) {
658      case DBELL_TYPE_DBELL:
659          irq = PPC_INTERRUPT_DOORBELL;
660          break;
661      case DBELL_TYPE_DBELL_CRIT:
662          irq = PPC_INTERRUPT_CDOORBELL;
663          break;
664      case DBELL_TYPE_G_DBELL:
665      case DBELL_TYPE_G_DBELL_CRIT:
666      case DBELL_TYPE_G_DBELL_MC:
667          /* XXX implement */
668      default:
669          break;
670      }
671  
672      return irq;
673  }
674  
675  void helper_msgclr(CPUPPCState *env, target_ulong rb)
676  {
677      int irq = dbell2irq(rb);
678  
679      if (irq < 0) {
680          return;
681      }
682  
683      ppc_set_irq(env_archcpu(env), irq, 0);
684  }
685  
686  void helper_msgsnd(target_ulong rb)
687  {
688      int irq = dbell2irq(rb);
689      int pir = rb & DBELL_PIRTAG_MASK;
690      CPUState *cs;
691  
692      if (irq < 0) {
693          return;
694      }
695  
696      bql_lock();
697      CPU_FOREACH(cs) {
698          PowerPCCPU *cpu = POWERPC_CPU(cs);
699          CPUPPCState *cenv = &cpu->env;
700  
701          if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
702              ppc_set_irq(cpu, irq, 1);
703          }
704      }
705      bql_unlock();
706  }
707  
708  /* Server Processor Control */
709  
710  static bool dbell_type_server(target_ulong rb)
711  {
712      /*
713       * A Directed Hypervisor Doorbell message is sent only if the
714       * message type is 5. All other types are reserved and the
715       * instruction is a no-op
716       */
717      return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
718  }
719  
720  static inline bool dbell_bcast_core(target_ulong rb)
721  {
722      return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
723  }
724  
725  static inline bool dbell_bcast_subproc(target_ulong rb)
726  {
727      return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
728  }
729  
730  /*
731   * Send an interrupt to a thread in the same core as env).
732   */
733  static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
734  {
735      PowerPCCPU *cpu = env_archcpu(env);
736      CPUState *cs = env_cpu(env);
737  
738      if (ppc_cpu_lpar_single_threaded(cs)) {
739          if (target_tir == 0) {
740              ppc_set_irq(cpu, irq, 1);
741          }
742      } else {
743          CPUState *ccs;
744  
745          /* Does iothread need to be locked for walking CPU list? */
746          bql_lock();
747          THREAD_SIBLING_FOREACH(cs, ccs) {
748              PowerPCCPU *ccpu = POWERPC_CPU(ccs);
749              if (target_tir == ppc_cpu_tir(ccpu)) {
750                  ppc_set_irq(ccpu, irq, 1);
751                  break;
752              }
753          }
754          bql_unlock();
755      }
756  }
757  
758  void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
759  {
760      if (!dbell_type_server(rb)) {
761          return;
762      }
763  
764      ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
765  }
766  
767  void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
768  {
769      int pir = rb & DBELL_PROCIDTAG_MASK;
770      bool brdcast = false;
771      CPUState *cs, *ccs;
772      PowerPCCPU *cpu;
773  
774      if (!dbell_type_server(rb)) {
775          return;
776      }
777  
778      /* POWER8 msgsnd is like msgsndp (targets a thread within core) */
779      if (!(env->insns_flags2 & PPC2_ISA300)) {
780          msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
781          return;
782      }
783  
784      /* POWER9 and later msgsnd is a global (targets any thread) */
785      cpu = ppc_get_vcpu_by_pir(pir);
786      if (!cpu) {
787          return;
788      }
789      cs = CPU(cpu);
790  
791      if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
792                                   (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
793          brdcast = true;
794      }
795  
796      if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
797          ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
798          return;
799      }
800  
801      /*
802       * Why is bql needed for walking CPU list? Answer seems to be because ppc
803       * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
804       * so could this be removed?
805       */
806      bql_lock();
807      THREAD_SIBLING_FOREACH(cs, ccs) {
808          ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
809      }
810      bql_unlock();
811  }
812  
813  #ifdef TARGET_PPC64
814  void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
815  {
816      helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
817  
818      if (!dbell_type_server(rb)) {
819          return;
820      }
821  
822      ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
823  }
824  
825  /*
826   * sends a message to another thread  on the same
827   * multi-threaded processor
828   */
829  void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
830  {
831      helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
832  
833      if (!dbell_type_server(rb)) {
834          return;
835      }
836  
837      msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
838  }
839  #endif /* TARGET_PPC64 */
840  
841  /* Single-step tracing */
842  void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
843  {
844      uint32_t error_code = 0;
845      if (env->insns_flags2 & PPC2_ISA207S) {
846          /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
847          env->spr[SPR_POWER_SIAR] = prev_ip;
848          error_code = PPC_BIT(33);
849      }
850      raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
851  }
852  #endif /* !CONFIG_USER_ONLY */
853