xref: /qemu/target/s390x/tcg/excp_helper.c (revision 641f1c53862aec64810c0b93b5b1de49d55fda92)
1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cputlb.h"
26 #include "exec/exec-all.h"
27 #include "exec/target_page.h"
28 #include "exec/watchpoint.h"
29 #include "s390x-internal.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "qemu/timer.h"
33 #include "system/address-spaces.h"
34 #include "hw/s390x/ioinst.h"
35 #include "hw/s390x/s390_flic.h"
36 #include "hw/boards.h"
37 #endif
38 
39 G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env,
40                                            uint32_t code, uintptr_t ra)
41 {
42     CPUState *cs = env_cpu(env);
43 
44     cpu_restore_state(cs, ra);
45     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
46                   env->psw.addr);
47     trigger_pgm_exception(env, code);
48     cpu_loop_exit(cs);
49 }
50 
51 G_NORETURN void tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
52                                         uintptr_t ra)
53 {
54     g_assert(dxc <= 0xff);
55 #if !defined(CONFIG_USER_ONLY)
56     /* Store the DXC into the lowcore */
57     stl_phys(env_cpu(env)->as,
58              env->psa + offsetof(LowCore, data_exc_code), dxc);
59 #endif
60 
61     /* Store the DXC into the FPC if AFP is enabled */
62     if (env->cregs[0] & CR0_AFP) {
63         env->fpc = deposit32(env->fpc, 8, 8, dxc);
64     }
65     tcg_s390_program_interrupt(env, PGM_DATA, ra);
66 }
67 
68 G_NORETURN void tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
69                                           uintptr_t ra)
70 {
71     g_assert(vxc <= 0xff);
72 #if !defined(CONFIG_USER_ONLY)
73     /* Always store the VXC into the lowcore, without AFP it is undefined */
74     stl_phys(env_cpu(env)->as,
75              env->psa + offsetof(LowCore, data_exc_code), vxc);
76 #endif
77 
78     /* Always store the VXC into the FPC, without AFP it is undefined */
79     env->fpc = deposit32(env->fpc, 8, 8, vxc);
80     tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
81 }
82 
83 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
84 {
85     tcg_s390_data_exception(env, dxc, GETPC());
86 }
87 
88 /*
89  * Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
90  * this is only for the atomic and relative long operations, for which we want
91  * to raise a specification exception.
92  */
93 static G_NORETURN
94 void do_unaligned_access(CPUState *cs, uintptr_t retaddr)
95 {
96     tcg_s390_program_interrupt(cpu_env(cs), PGM_SPECIFICATION, retaddr);
97 }
98 
99 #if defined(CONFIG_USER_ONLY)
100 
101 void s390_cpu_do_interrupt(CPUState *cs)
102 {
103     cs->exception_index = -1;
104 }
105 
106 void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
107                              MMUAccessType access_type,
108                              bool maperr, uintptr_t retaddr)
109 {
110     S390CPU *cpu = S390_CPU(cs);
111 
112     trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
113     /*
114      * On real machines this value is dropped into LowMem. Since this
115      * is userland, simply put this someplace that cpu_loop can find it.
116      * S390 only gives the page of the fault, not the exact address.
117      * C.f. the construction of TEC in mmu_translate().
118      */
119     cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
120     cpu_loop_exit_restore(cs, retaddr);
121 }
122 
123 void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
124                             MMUAccessType access_type, uintptr_t retaddr)
125 {
126     do_unaligned_access(cs, retaddr);
127 }
128 
129 #else /* !CONFIG_USER_ONLY */
130 
131 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
132 {
133     switch (mmu_idx) {
134     case MMU_PRIMARY_IDX:
135         return PSW_ASC_PRIMARY;
136     case MMU_SECONDARY_IDX:
137         return PSW_ASC_SECONDARY;
138     case MMU_HOME_IDX:
139         return PSW_ASC_HOME;
140     default:
141         abort();
142     }
143 }
144 
145 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
146                        MMUAccessType access_type, int mmu_idx,
147                        bool probe, uintptr_t retaddr)
148 {
149     CPUS390XState *env = cpu_env(cs);
150     target_ulong vaddr, raddr;
151     uint64_t asc, tec;
152     int prot, excp;
153 
154     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
155                   __func__, address, access_type, mmu_idx);
156 
157     vaddr = address;
158 
159     if (mmu_idx < MMU_REAL_IDX) {
160         asc = cpu_mmu_idx_to_asc(mmu_idx);
161         /* 31-Bit mode */
162         if (!(env->psw.mask & PSW_MASK_64)) {
163             vaddr &= 0x7fffffff;
164         }
165         excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
166     } else if (mmu_idx == MMU_REAL_IDX) {
167         /* 31-Bit mode */
168         if (!(env->psw.mask & PSW_MASK_64)) {
169             vaddr &= 0x7fffffff;
170         }
171         excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
172     } else {
173         g_assert_not_reached();
174     }
175 
176     env->tlb_fill_exc = excp;
177     env->tlb_fill_tec = tec;
178 
179     if (!excp) {
180         qemu_log_mask(CPU_LOG_MMU,
181                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
182                       __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
183         tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
184                      mmu_idx, TARGET_PAGE_SIZE);
185         return true;
186     }
187     if (probe) {
188         return false;
189     }
190 
191     /*
192      * For data accesses, ILEN will be filled in from the unwind info,
193      * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
194      * and so unwinding will not occur.  However, ILEN is also undefined
195      * for that case -- we choose to set ILEN = 2.
196      */
197     env->int_pgm_ilen = 2;
198     trigger_pgm_exception(env, excp);
199     cpu_loop_exit_restore(cs, retaddr);
200 }
201 
202 static void do_program_interrupt(CPUS390XState *env)
203 {
204     uint64_t mask, addr;
205     LowCore *lowcore;
206     int ilen = env->int_pgm_ilen;
207     bool set_trans_exc_code = false;
208     bool advance = false;
209 
210     assert((env->int_pgm_code == PGM_SPECIFICATION && ilen == 0) ||
211            ilen == 2 || ilen == 4 || ilen == 6);
212 
213     switch (env->int_pgm_code) {
214     case PGM_PER:
215         /* advance already handled */
216         break;
217     case PGM_ASCE_TYPE:
218     case PGM_REG_FIRST_TRANS:
219     case PGM_REG_SEC_TRANS:
220     case PGM_REG_THIRD_TRANS:
221     case PGM_SEGMENT_TRANS:
222     case PGM_PAGE_TRANS:
223         assert(env->int_pgm_code == env->tlb_fill_exc);
224         set_trans_exc_code = true;
225         break;
226     case PGM_PROTECTION:
227         assert(env->int_pgm_code == env->tlb_fill_exc);
228         set_trans_exc_code = true;
229         advance = true;
230         break;
231     case PGM_OPERATION:
232     case PGM_PRIVILEGED:
233     case PGM_EXECUTE:
234     case PGM_ADDRESSING:
235     case PGM_SPECIFICATION:
236     case PGM_DATA:
237     case PGM_FIXPT_OVERFLOW:
238     case PGM_FIXPT_DIVIDE:
239     case PGM_DEC_OVERFLOW:
240     case PGM_DEC_DIVIDE:
241     case PGM_HFP_EXP_OVERFLOW:
242     case PGM_HFP_EXP_UNDERFLOW:
243     case PGM_HFP_SIGNIFICANCE:
244     case PGM_HFP_DIVIDE:
245     case PGM_TRANS_SPEC:
246     case PGM_SPECIAL_OP:
247     case PGM_OPERAND:
248     case PGM_HFP_SQRT:
249     case PGM_PC_TRANS_SPEC:
250     case PGM_ALET_SPEC:
251     case PGM_MONITOR:
252         advance = true;
253         break;
254     }
255 
256     /* advance the PSW if our exception is not nullifying */
257     if (advance) {
258         env->psw.addr += ilen;
259     }
260 
261     qemu_log_mask(CPU_LOG_INT,
262                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
263                   __func__, env->int_pgm_code, ilen, env->psw.mask,
264                   env->psw.addr);
265 
266     lowcore = cpu_map_lowcore(env);
267 
268     /* Signal PER events with the exception.  */
269     if (env->per_perc_atmid) {
270         env->int_pgm_code |= PGM_PER;
271         lowcore->per_address = cpu_to_be64(env->per_address);
272         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
273         env->per_perc_atmid = 0;
274     }
275 
276     if (set_trans_exc_code) {
277         lowcore->trans_exc_code = cpu_to_be64(env->tlb_fill_tec);
278     }
279 
280     lowcore->pgm_ilen = cpu_to_be16(ilen);
281     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
282     lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
283     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
284     mask = be64_to_cpu(lowcore->program_new_psw.mask);
285     addr = be64_to_cpu(lowcore->program_new_psw.addr);
286     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
287 
288     cpu_unmap_lowcore(lowcore);
289 
290     s390_cpu_set_psw(env, mask, addr);
291 }
292 
293 static void do_svc_interrupt(CPUS390XState *env)
294 {
295     uint64_t mask, addr;
296     LowCore *lowcore;
297 
298     lowcore = cpu_map_lowcore(env);
299 
300     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
301     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
302     lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
303     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
304     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
305     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
306 
307     cpu_unmap_lowcore(lowcore);
308 
309     s390_cpu_set_psw(env, mask, addr);
310 
311     /* When a PER event is pending, the PER exception has to happen
312        immediately after the SERVICE CALL one.  */
313     if (env->per_perc_atmid) {
314         env->int_pgm_code = PGM_PER;
315         env->int_pgm_ilen = env->int_svc_ilen;
316         do_program_interrupt(env);
317     }
318 }
319 
320 #define VIRTIO_SUBCODE_64 0x0D00
321 
322 static void do_ext_interrupt(CPUS390XState *env)
323 {
324     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
325     S390CPU *cpu = env_archcpu(env);
326     uint64_t mask, addr;
327     uint16_t cpu_addr;
328     LowCore *lowcore;
329 
330     if (!(env->psw.mask & PSW_MASK_EXT)) {
331         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
332     }
333 
334     lowcore = cpu_map_lowcore(env);
335 
336     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
337         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
338         MachineState *ms = MACHINE(qdev_get_machine());
339         unsigned int max_cpus = ms->smp.max_cpus;
340 
341         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
342         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
343         g_assert(cpu_addr < S390_MAX_CPUS);
344         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
345         clear_bit(cpu_addr, env->emergency_signals);
346         if (bitmap_empty(env->emergency_signals, max_cpus)) {
347             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
348         }
349     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
350                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
351         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
352         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
353         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
354     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
355                (env->cregs[0] & CR0_CKC_SC)) {
356         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
357         lowcore->cpu_addr = 0;
358         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
359     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
360                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
361         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
362         lowcore->cpu_addr = 0;
363         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
364     } else if (qemu_s390_flic_has_service(flic) &&
365                (env->cregs[0] & CR0_SERVICE_SC)) {
366         uint32_t param;
367 
368         param = qemu_s390_flic_dequeue_service(flic);
369         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
370         lowcore->ext_params = cpu_to_be32(param);
371         lowcore->cpu_addr = 0;
372     } else {
373         g_assert_not_reached();
374     }
375 
376     mask = be64_to_cpu(lowcore->external_new_psw.mask);
377     addr = be64_to_cpu(lowcore->external_new_psw.addr);
378     lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
379     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
380 
381     cpu_unmap_lowcore(lowcore);
382 
383     s390_cpu_set_psw(env, mask, addr);
384 }
385 
386 static void do_io_interrupt(CPUS390XState *env)
387 {
388     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
389     uint64_t mask, addr;
390     QEMUS390FlicIO *io;
391     LowCore *lowcore;
392 
393     g_assert(env->psw.mask & PSW_MASK_IO);
394     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
395     g_assert(io);
396 
397     lowcore = cpu_map_lowcore(env);
398 
399     lowcore->subchannel_id = cpu_to_be16(io->id);
400     lowcore->subchannel_nr = cpu_to_be16(io->nr);
401     lowcore->io_int_parm = cpu_to_be32(io->parm);
402     lowcore->io_int_word = cpu_to_be32(io->word);
403     lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
404     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
405     mask = be64_to_cpu(lowcore->io_new_psw.mask);
406     addr = be64_to_cpu(lowcore->io_new_psw.addr);
407 
408     cpu_unmap_lowcore(lowcore);
409     g_free(io);
410 
411     s390_cpu_set_psw(env, mask, addr);
412 }
413 
414 typedef struct MchkExtSaveArea {
415     uint64_t    vregs[32][2];                     /* 0x0000 */
416     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
417 } MchkExtSaveArea;
418 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
419 
420 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
421 {
422     hwaddr len = sizeof(MchkExtSaveArea);
423     MchkExtSaveArea *sa;
424     int i;
425 
426     sa = cpu_physical_memory_map(mcesao, &len, true);
427     if (!sa) {
428         return -EFAULT;
429     }
430     if (len != sizeof(MchkExtSaveArea)) {
431         cpu_physical_memory_unmap(sa, len, 1, 0);
432         return -EFAULT;
433     }
434 
435     for (i = 0; i < 32; i++) {
436         sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
437         sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
438     }
439 
440     cpu_physical_memory_unmap(sa, len, 1, len);
441     return 0;
442 }
443 
444 static void do_mchk_interrupt(CPUS390XState *env)
445 {
446     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
447     uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
448     uint64_t mask, addr, mcesao = 0;
449     LowCore *lowcore;
450     int i;
451 
452     /* for now we only support channel report machine checks (floating) */
453     g_assert(env->psw.mask & PSW_MASK_MCHECK);
454     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
455 
456     qemu_s390_flic_dequeue_crw_mchk(flic);
457 
458     lowcore = cpu_map_lowcore(env);
459 
460     /* extended save area */
461     if (mcic & MCIC_VB_VR) {
462         /* length and alignment is 1024 bytes */
463         mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
464     }
465 
466     /* try to store vector registers */
467     if (!mcesao || mchk_store_vregs(env, mcesao)) {
468         mcic &= ~MCIC_VB_VR;
469     }
470 
471     /* we are always in z/Architecture mode */
472     lowcore->ar_access_id = 1;
473 
474     for (i = 0; i < 16; i++) {
475         lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
476         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
477         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
478         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
479     }
480     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
481     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
482     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
483     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
484     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
485 
486     lowcore->mcic = cpu_to_be64(mcic);
487     lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
488     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
489     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
490     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
491 
492     cpu_unmap_lowcore(lowcore);
493 
494     s390_cpu_set_psw(env, mask, addr);
495 }
496 
497 void s390_cpu_do_interrupt(CPUState *cs)
498 {
499     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
500     S390CPU *cpu = S390_CPU(cs);
501     CPUS390XState *env = &cpu->env;
502     bool stopped = false;
503 
504     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
505                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
506 
507 try_deliver:
508     /* handle machine checks */
509     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
510         cs->exception_index = EXCP_MCHK;
511     }
512     /* handle external interrupts */
513     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
514         cs->exception_index = EXCP_EXT;
515     }
516     /* handle I/O interrupts */
517     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
518         cs->exception_index = EXCP_IO;
519     }
520     /* RESTART interrupt */
521     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
522         cs->exception_index = EXCP_RESTART;
523     }
524     /* STOP interrupt has least priority */
525     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
526         cs->exception_index = EXCP_STOP;
527     }
528 
529     switch (cs->exception_index) {
530     case EXCP_PGM:
531         do_program_interrupt(env);
532         break;
533     case EXCP_SVC:
534         do_svc_interrupt(env);
535         break;
536     case EXCP_EXT:
537         do_ext_interrupt(env);
538         break;
539     case EXCP_IO:
540         do_io_interrupt(env);
541         break;
542     case EXCP_MCHK:
543         do_mchk_interrupt(env);
544         break;
545     case EXCP_RESTART:
546         do_restart_interrupt(env);
547         break;
548     case EXCP_STOP:
549         do_stop_interrupt(env);
550         stopped = true;
551         break;
552     }
553 
554     if (cs->exception_index != -1 && !stopped) {
555         /* check if there are more pending interrupts to deliver */
556         cs->exception_index = -1;
557         goto try_deliver;
558     }
559     cs->exception_index = -1;
560 
561     /* we might still have pending interrupts, but not deliverable */
562     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
563         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
564     }
565 
566     /* WAIT PSW during interrupt injection or STOP interrupt */
567     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
568         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
569         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
570     } else if (cs->halted) {
571         /* unhalt if we had a WAIT PSW somewhere in our injection chain */
572         s390_cpu_unhalt(cpu);
573     }
574 }
575 
576 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
577 {
578     if (interrupt_request & CPU_INTERRUPT_HARD) {
579         S390CPU *cpu = S390_CPU(cs);
580         CPUS390XState *env = &cpu->env;
581 
582         if (env->ex_value) {
583             /* Execution of the target insn is indivisible from
584                the parent EXECUTE insn.  */
585             return false;
586         }
587         if (s390_cpu_has_int(cpu)) {
588             s390_cpu_do_interrupt(cs);
589             return true;
590         }
591         if (env->psw.mask & PSW_MASK_WAIT) {
592             /* Woken up because of a floating interrupt but it has already
593              * been delivered. Go back to sleep. */
594             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
595         }
596     }
597     return false;
598 }
599 
600 void s390x_cpu_debug_excp_handler(CPUState *cs)
601 {
602     CPUS390XState *env = cpu_env(cs);
603     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
604 
605     if (wp_hit && wp_hit->flags & BP_CPU) {
606         /* FIXME: When the storage-alteration-space control bit is set,
607            the exception should only be triggered if the memory access
608            is done using an address space with the storage-alteration-event
609            bit set.  We have no way to detect that with the current
610            watchpoint code.  */
611         cs->watchpoint_hit = NULL;
612 
613         env->per_address = env->psw.addr;
614         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
615         /* FIXME: We currently no way to detect the address space used
616            to trigger the watchpoint.  For now just consider it is the
617            current default ASC. This turn to be true except when MVCP
618            and MVCS instrutions are not used.  */
619         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
620 
621         /*
622          * Remove all watchpoints to re-execute the code.  A PER exception
623          * will be triggered, it will call s390_cpu_set_psw which will
624          * recompute the watchpoints.
625          */
626         cpu_watchpoint_remove_all(cs, BP_CPU);
627         cpu_loop_exit_noexc(cs);
628     }
629 }
630 
631 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
632                                    MMUAccessType access_type,
633                                    int mmu_idx, uintptr_t retaddr)
634 {
635     do_unaligned_access(cs, retaddr);
636 }
637 
638 static G_NORETURN
639 void monitor_event(CPUS390XState *env,
640                    uint64_t monitor_code,
641                    uint8_t monitor_class, uintptr_t ra)
642 {
643     /* Store the Monitor Code and the Monitor Class Number into the lowcore */
644     stq_phys(env_cpu(env)->as,
645              env->psa + offsetof(LowCore, monitor_code), monitor_code);
646     stw_phys(env_cpu(env)->as,
647              env->psa + offsetof(LowCore, mon_class_num), monitor_class);
648 
649     tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
650 }
651 
652 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
653                           uint32_t monitor_class)
654 {
655     g_assert(monitor_class <= 0xf);
656 
657     if (env->cregs[8] & (0x8000 >> monitor_class)) {
658         monitor_event(env, monitor_code, monitor_class, GETPC());
659     }
660 }
661 
662 #endif /* !CONFIG_USER_ONLY */
663