xref: /qemu/target/s390x/tcg/excp_helper.c (revision cc3d262aa93a42e19c38f6acb6d0f6012a71eb4b)
1 /*
2  * s390x exception / interrupt helpers
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2011 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cputlb.h"
26 #include "exec/exec-all.h"
27 #include "s390x-internal.h"
28 #include "tcg_s390x.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "qemu/timer.h"
31 #include "exec/address-spaces.h"
32 #include "hw/s390x/ioinst.h"
33 #include "hw/s390x/s390_flic.h"
34 #include "hw/boards.h"
35 #endif
36 
37 G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env,
38                                            uint32_t code, uintptr_t ra)
39 {
40     CPUState *cs = env_cpu(env);
41 
42     cpu_restore_state(cs, ra);
43     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
44                   env->psw.addr);
45     trigger_pgm_exception(env, code);
46     cpu_loop_exit(cs);
47 }
48 
49 G_NORETURN void tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
50                                         uintptr_t ra)
51 {
52     g_assert(dxc <= 0xff);
53 #if !defined(CONFIG_USER_ONLY)
54     /* Store the DXC into the lowcore */
55     stl_phys(env_cpu(env)->as,
56              env->psa + offsetof(LowCore, data_exc_code), dxc);
57 #endif
58 
59     /* Store the DXC into the FPC if AFP is enabled */
60     if (env->cregs[0] & CR0_AFP) {
61         env->fpc = deposit32(env->fpc, 8, 8, dxc);
62     }
63     tcg_s390_program_interrupt(env, PGM_DATA, ra);
64 }
65 
66 G_NORETURN void tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
67                                           uintptr_t ra)
68 {
69     g_assert(vxc <= 0xff);
70 #if !defined(CONFIG_USER_ONLY)
71     /* Always store the VXC into the lowcore, without AFP it is undefined */
72     stl_phys(env_cpu(env)->as,
73              env->psa + offsetof(LowCore, data_exc_code), vxc);
74 #endif
75 
76     /* Always store the VXC into the FPC, without AFP it is undefined */
77     env->fpc = deposit32(env->fpc, 8, 8, vxc);
78     tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
79 }
80 
81 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
82 {
83     tcg_s390_data_exception(env, dxc, GETPC());
84 }
85 
86 /*
87  * Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
88  * this is only for the atomic and relative long operations, for which we want
89  * to raise a specification exception.
90  */
91 static G_NORETURN
92 void do_unaligned_access(CPUState *cs, uintptr_t retaddr)
93 {
94     tcg_s390_program_interrupt(cpu_env(cs), PGM_SPECIFICATION, retaddr);
95 }
96 
97 #if defined(CONFIG_USER_ONLY)
98 
99 void s390_cpu_do_interrupt(CPUState *cs)
100 {
101     cs->exception_index = -1;
102 }
103 
104 void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
105                              MMUAccessType access_type,
106                              bool maperr, uintptr_t retaddr)
107 {
108     S390CPU *cpu = S390_CPU(cs);
109 
110     trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
111     /*
112      * On real machines this value is dropped into LowMem. Since this
113      * is userland, simply put this someplace that cpu_loop can find it.
114      * S390 only gives the page of the fault, not the exact address.
115      * C.f. the construction of TEC in mmu_translate().
116      */
117     cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
118     cpu_loop_exit_restore(cs, retaddr);
119 }
120 
121 void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
122                             MMUAccessType access_type, uintptr_t retaddr)
123 {
124     do_unaligned_access(cs, retaddr);
125 }
126 
127 #else /* !CONFIG_USER_ONLY */
128 
129 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
130 {
131     switch (mmu_idx) {
132     case MMU_PRIMARY_IDX:
133         return PSW_ASC_PRIMARY;
134     case MMU_SECONDARY_IDX:
135         return PSW_ASC_SECONDARY;
136     case MMU_HOME_IDX:
137         return PSW_ASC_HOME;
138     default:
139         abort();
140     }
141 }
142 
143 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
144                        MMUAccessType access_type, int mmu_idx,
145                        bool probe, uintptr_t retaddr)
146 {
147     CPUS390XState *env = cpu_env(cs);
148     target_ulong vaddr, raddr;
149     uint64_t asc, tec;
150     int prot, excp;
151 
152     qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
153                   __func__, address, access_type, mmu_idx);
154 
155     vaddr = address;
156 
157     if (mmu_idx < MMU_REAL_IDX) {
158         asc = cpu_mmu_idx_to_asc(mmu_idx);
159         /* 31-Bit mode */
160         if (!(env->psw.mask & PSW_MASK_64)) {
161             vaddr &= 0x7fffffff;
162         }
163         excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
164     } else if (mmu_idx == MMU_REAL_IDX) {
165         /* 31-Bit mode */
166         if (!(env->psw.mask & PSW_MASK_64)) {
167             vaddr &= 0x7fffffff;
168         }
169         excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
170     } else {
171         g_assert_not_reached();
172     }
173 
174     env->tlb_fill_exc = excp;
175     env->tlb_fill_tec = tec;
176 
177     if (!excp) {
178         qemu_log_mask(CPU_LOG_MMU,
179                       "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
180                       __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
181         tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
182                      mmu_idx, TARGET_PAGE_SIZE);
183         return true;
184     }
185     if (probe) {
186         return false;
187     }
188 
189     /*
190      * For data accesses, ILEN will be filled in from the unwind info,
191      * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
192      * and so unwinding will not occur.  However, ILEN is also undefined
193      * for that case -- we choose to set ILEN = 2.
194      */
195     env->int_pgm_ilen = 2;
196     trigger_pgm_exception(env, excp);
197     cpu_loop_exit_restore(cs, retaddr);
198 }
199 
200 static void do_program_interrupt(CPUS390XState *env)
201 {
202     uint64_t mask, addr;
203     LowCore *lowcore;
204     int ilen = env->int_pgm_ilen;
205     bool set_trans_exc_code = false;
206     bool advance = false;
207 
208     assert((env->int_pgm_code == PGM_SPECIFICATION && ilen == 0) ||
209            ilen == 2 || ilen == 4 || ilen == 6);
210 
211     switch (env->int_pgm_code) {
212     case PGM_PER:
213         /* advance already handled */
214         break;
215     case PGM_ASCE_TYPE:
216     case PGM_REG_FIRST_TRANS:
217     case PGM_REG_SEC_TRANS:
218     case PGM_REG_THIRD_TRANS:
219     case PGM_SEGMENT_TRANS:
220     case PGM_PAGE_TRANS:
221         assert(env->int_pgm_code == env->tlb_fill_exc);
222         set_trans_exc_code = true;
223         break;
224     case PGM_PROTECTION:
225         assert(env->int_pgm_code == env->tlb_fill_exc);
226         set_trans_exc_code = true;
227         advance = true;
228         break;
229     case PGM_OPERATION:
230     case PGM_PRIVILEGED:
231     case PGM_EXECUTE:
232     case PGM_ADDRESSING:
233     case PGM_SPECIFICATION:
234     case PGM_DATA:
235     case PGM_FIXPT_OVERFLOW:
236     case PGM_FIXPT_DIVIDE:
237     case PGM_DEC_OVERFLOW:
238     case PGM_DEC_DIVIDE:
239     case PGM_HFP_EXP_OVERFLOW:
240     case PGM_HFP_EXP_UNDERFLOW:
241     case PGM_HFP_SIGNIFICANCE:
242     case PGM_HFP_DIVIDE:
243     case PGM_TRANS_SPEC:
244     case PGM_SPECIAL_OP:
245     case PGM_OPERAND:
246     case PGM_HFP_SQRT:
247     case PGM_PC_TRANS_SPEC:
248     case PGM_ALET_SPEC:
249     case PGM_MONITOR:
250         advance = true;
251         break;
252     }
253 
254     /* advance the PSW if our exception is not nullifying */
255     if (advance) {
256         env->psw.addr += ilen;
257     }
258 
259     qemu_log_mask(CPU_LOG_INT,
260                   "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
261                   __func__, env->int_pgm_code, ilen, env->psw.mask,
262                   env->psw.addr);
263 
264     lowcore = cpu_map_lowcore(env);
265 
266     /* Signal PER events with the exception.  */
267     if (env->per_perc_atmid) {
268         env->int_pgm_code |= PGM_PER;
269         lowcore->per_address = cpu_to_be64(env->per_address);
270         lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
271         env->per_perc_atmid = 0;
272     }
273 
274     if (set_trans_exc_code) {
275         lowcore->trans_exc_code = cpu_to_be64(env->tlb_fill_tec);
276     }
277 
278     lowcore->pgm_ilen = cpu_to_be16(ilen);
279     lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
280     lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
281     lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
282     mask = be64_to_cpu(lowcore->program_new_psw.mask);
283     addr = be64_to_cpu(lowcore->program_new_psw.addr);
284     lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
285 
286     cpu_unmap_lowcore(lowcore);
287 
288     s390_cpu_set_psw(env, mask, addr);
289 }
290 
291 static void do_svc_interrupt(CPUS390XState *env)
292 {
293     uint64_t mask, addr;
294     LowCore *lowcore;
295 
296     lowcore = cpu_map_lowcore(env);
297 
298     lowcore->svc_code = cpu_to_be16(env->int_svc_code);
299     lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
300     lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
301     lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
302     mask = be64_to_cpu(lowcore->svc_new_psw.mask);
303     addr = be64_to_cpu(lowcore->svc_new_psw.addr);
304 
305     cpu_unmap_lowcore(lowcore);
306 
307     s390_cpu_set_psw(env, mask, addr);
308 
309     /* When a PER event is pending, the PER exception has to happen
310        immediately after the SERVICE CALL one.  */
311     if (env->per_perc_atmid) {
312         env->int_pgm_code = PGM_PER;
313         env->int_pgm_ilen = env->int_svc_ilen;
314         do_program_interrupt(env);
315     }
316 }
317 
318 #define VIRTIO_SUBCODE_64 0x0D00
319 
320 static void do_ext_interrupt(CPUS390XState *env)
321 {
322     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
323     S390CPU *cpu = env_archcpu(env);
324     uint64_t mask, addr;
325     uint16_t cpu_addr;
326     LowCore *lowcore;
327 
328     if (!(env->psw.mask & PSW_MASK_EXT)) {
329         cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
330     }
331 
332     lowcore = cpu_map_lowcore(env);
333 
334     if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
335         (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
336         MachineState *ms = MACHINE(qdev_get_machine());
337         unsigned int max_cpus = ms->smp.max_cpus;
338 
339         lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
340         cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
341         g_assert(cpu_addr < S390_MAX_CPUS);
342         lowcore->cpu_addr = cpu_to_be16(cpu_addr);
343         clear_bit(cpu_addr, env->emergency_signals);
344         if (bitmap_empty(env->emergency_signals, max_cpus)) {
345             env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
346         }
347     } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
348                (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
349         lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
350         lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
351         env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
352     } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
353                (env->cregs[0] & CR0_CKC_SC)) {
354         lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
355         lowcore->cpu_addr = 0;
356         env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
357     } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
358                (env->cregs[0] & CR0_CPU_TIMER_SC)) {
359         lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
360         lowcore->cpu_addr = 0;
361         env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
362     } else if (qemu_s390_flic_has_service(flic) &&
363                (env->cregs[0] & CR0_SERVICE_SC)) {
364         uint32_t param;
365 
366         param = qemu_s390_flic_dequeue_service(flic);
367         lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
368         lowcore->ext_params = cpu_to_be32(param);
369         lowcore->cpu_addr = 0;
370     } else {
371         g_assert_not_reached();
372     }
373 
374     mask = be64_to_cpu(lowcore->external_new_psw.mask);
375     addr = be64_to_cpu(lowcore->external_new_psw.addr);
376     lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
377     lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
378 
379     cpu_unmap_lowcore(lowcore);
380 
381     s390_cpu_set_psw(env, mask, addr);
382 }
383 
384 static void do_io_interrupt(CPUS390XState *env)
385 {
386     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
387     uint64_t mask, addr;
388     QEMUS390FlicIO *io;
389     LowCore *lowcore;
390 
391     g_assert(env->psw.mask & PSW_MASK_IO);
392     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
393     g_assert(io);
394 
395     lowcore = cpu_map_lowcore(env);
396 
397     lowcore->subchannel_id = cpu_to_be16(io->id);
398     lowcore->subchannel_nr = cpu_to_be16(io->nr);
399     lowcore->io_int_parm = cpu_to_be32(io->parm);
400     lowcore->io_int_word = cpu_to_be32(io->word);
401     lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
402     lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
403     mask = be64_to_cpu(lowcore->io_new_psw.mask);
404     addr = be64_to_cpu(lowcore->io_new_psw.addr);
405 
406     cpu_unmap_lowcore(lowcore);
407     g_free(io);
408 
409     s390_cpu_set_psw(env, mask, addr);
410 }
411 
412 typedef struct MchkExtSaveArea {
413     uint64_t    vregs[32][2];                     /* 0x0000 */
414     uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
415 } MchkExtSaveArea;
416 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
417 
418 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
419 {
420     hwaddr len = sizeof(MchkExtSaveArea);
421     MchkExtSaveArea *sa;
422     int i;
423 
424     sa = cpu_physical_memory_map(mcesao, &len, true);
425     if (!sa) {
426         return -EFAULT;
427     }
428     if (len != sizeof(MchkExtSaveArea)) {
429         cpu_physical_memory_unmap(sa, len, 1, 0);
430         return -EFAULT;
431     }
432 
433     for (i = 0; i < 32; i++) {
434         sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
435         sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
436     }
437 
438     cpu_physical_memory_unmap(sa, len, 1, len);
439     return 0;
440 }
441 
442 static void do_mchk_interrupt(CPUS390XState *env)
443 {
444     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
445     uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
446     uint64_t mask, addr, mcesao = 0;
447     LowCore *lowcore;
448     int i;
449 
450     /* for now we only support channel report machine checks (floating) */
451     g_assert(env->psw.mask & PSW_MASK_MCHECK);
452     g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
453 
454     qemu_s390_flic_dequeue_crw_mchk(flic);
455 
456     lowcore = cpu_map_lowcore(env);
457 
458     /* extended save area */
459     if (mcic & MCIC_VB_VR) {
460         /* length and alignment is 1024 bytes */
461         mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
462     }
463 
464     /* try to store vector registers */
465     if (!mcesao || mchk_store_vregs(env, mcesao)) {
466         mcic &= ~MCIC_VB_VR;
467     }
468 
469     /* we are always in z/Architecture mode */
470     lowcore->ar_access_id = 1;
471 
472     for (i = 0; i < 16; i++) {
473         lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
474         lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
475         lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
476         lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
477     }
478     lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
479     lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
480     lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
481     lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
482     lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
483 
484     lowcore->mcic = cpu_to_be64(mcic);
485     lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
486     lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
487     mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
488     addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
489 
490     cpu_unmap_lowcore(lowcore);
491 
492     s390_cpu_set_psw(env, mask, addr);
493 }
494 
495 void s390_cpu_do_interrupt(CPUState *cs)
496 {
497     QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
498     S390CPU *cpu = S390_CPU(cs);
499     CPUS390XState *env = &cpu->env;
500     bool stopped = false;
501 
502     qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
503                   __func__, cs->exception_index, env->psw.mask, env->psw.addr);
504 
505 try_deliver:
506     /* handle machine checks */
507     if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
508         cs->exception_index = EXCP_MCHK;
509     }
510     /* handle external interrupts */
511     if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
512         cs->exception_index = EXCP_EXT;
513     }
514     /* handle I/O interrupts */
515     if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
516         cs->exception_index = EXCP_IO;
517     }
518     /* RESTART interrupt */
519     if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
520         cs->exception_index = EXCP_RESTART;
521     }
522     /* STOP interrupt has least priority */
523     if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
524         cs->exception_index = EXCP_STOP;
525     }
526 
527     switch (cs->exception_index) {
528     case EXCP_PGM:
529         do_program_interrupt(env);
530         break;
531     case EXCP_SVC:
532         do_svc_interrupt(env);
533         break;
534     case EXCP_EXT:
535         do_ext_interrupt(env);
536         break;
537     case EXCP_IO:
538         do_io_interrupt(env);
539         break;
540     case EXCP_MCHK:
541         do_mchk_interrupt(env);
542         break;
543     case EXCP_RESTART:
544         do_restart_interrupt(env);
545         break;
546     case EXCP_STOP:
547         do_stop_interrupt(env);
548         stopped = true;
549         break;
550     }
551 
552     if (cs->exception_index != -1 && !stopped) {
553         /* check if there are more pending interrupts to deliver */
554         cs->exception_index = -1;
555         goto try_deliver;
556     }
557     cs->exception_index = -1;
558 
559     /* we might still have pending interrupts, but not deliverable */
560     if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
561         cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
562     }
563 
564     /* WAIT PSW during interrupt injection or STOP interrupt */
565     if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
566         /* don't trigger a cpu_loop_exit(), use an interrupt instead */
567         cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
568     } else if (cs->halted) {
569         /* unhalt if we had a WAIT PSW somewhere in our injection chain */
570         s390_cpu_unhalt(cpu);
571     }
572 }
573 
574 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
575 {
576     if (interrupt_request & CPU_INTERRUPT_HARD) {
577         S390CPU *cpu = S390_CPU(cs);
578         CPUS390XState *env = &cpu->env;
579 
580         if (env->ex_value) {
581             /* Execution of the target insn is indivisible from
582                the parent EXECUTE insn.  */
583             return false;
584         }
585         if (s390_cpu_has_int(cpu)) {
586             s390_cpu_do_interrupt(cs);
587             return true;
588         }
589         if (env->psw.mask & PSW_MASK_WAIT) {
590             /* Woken up because of a floating interrupt but it has already
591              * been delivered. Go back to sleep. */
592             cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
593         }
594     }
595     return false;
596 }
597 
598 void s390x_cpu_debug_excp_handler(CPUState *cs)
599 {
600     CPUS390XState *env = cpu_env(cs);
601     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
602 
603     if (wp_hit && wp_hit->flags & BP_CPU) {
604         /* FIXME: When the storage-alteration-space control bit is set,
605            the exception should only be triggered if the memory access
606            is done using an address space with the storage-alteration-event
607            bit set.  We have no way to detect that with the current
608            watchpoint code.  */
609         cs->watchpoint_hit = NULL;
610 
611         env->per_address = env->psw.addr;
612         env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
613         /* FIXME: We currently no way to detect the address space used
614            to trigger the watchpoint.  For now just consider it is the
615            current default ASC. This turn to be true except when MVCP
616            and MVCS instrutions are not used.  */
617         env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
618 
619         /*
620          * Remove all watchpoints to re-execute the code.  A PER exception
621          * will be triggered, it will call s390_cpu_set_psw which will
622          * recompute the watchpoints.
623          */
624         cpu_watchpoint_remove_all(cs, BP_CPU);
625         cpu_loop_exit_noexc(cs);
626     }
627 }
628 
629 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
630                                    MMUAccessType access_type,
631                                    int mmu_idx, uintptr_t retaddr)
632 {
633     do_unaligned_access(cs, retaddr);
634 }
635 
636 static G_NORETURN
637 void monitor_event(CPUS390XState *env,
638                    uint64_t monitor_code,
639                    uint8_t monitor_class, uintptr_t ra)
640 {
641     /* Store the Monitor Code and the Monitor Class Number into the lowcore */
642     stq_phys(env_cpu(env)->as,
643              env->psa + offsetof(LowCore, monitor_code), monitor_code);
644     stw_phys(env_cpu(env)->as,
645              env->psa + offsetof(LowCore, mon_class_num), monitor_class);
646 
647     tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
648 }
649 
650 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
651                           uint32_t monitor_class)
652 {
653     g_assert(monitor_class <= 0xf);
654 
655     if (env->cregs[8] & (0x8000 >> monitor_class)) {
656         monitor_event(env, monitor_code, monitor_class, GETPC());
657     }
658 }
659 
660 #endif /* !CONFIG_USER_ONLY */
661