1 /*
2 * s390x exception / interrupt helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cputlb.h"
26 #include "exec/target_page.h"
27 #include "exec/watchpoint.h"
28 #include "s390x-internal.h"
29 #include "tcg_s390x.h"
30 #ifndef CONFIG_USER_ONLY
31 #include "qemu/timer.h"
32 #include "system/address-spaces.h"
33 #include "hw/s390x/ioinst.h"
34 #include "hw/s390x/s390_flic.h"
35 #include "hw/boards.h"
36 #endif
37
tcg_s390_program_interrupt(CPUS390XState * env,uint32_t code,uintptr_t ra)38 G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env,
39 uint32_t code, uintptr_t ra)
40 {
41 CPUState *cs = env_cpu(env);
42
43 cpu_restore_state(cs, ra);
44 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
45 env->psw.addr);
46 trigger_pgm_exception(env, code);
47 cpu_loop_exit(cs);
48 }
49
tcg_s390_data_exception(CPUS390XState * env,uint32_t dxc,uintptr_t ra)50 G_NORETURN void tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
51 uintptr_t ra)
52 {
53 g_assert(dxc <= 0xff);
54 #if !defined(CONFIG_USER_ONLY)
55 /* Store the DXC into the lowcore */
56 stl_phys(env_cpu(env)->as,
57 env->psa + offsetof(LowCore, data_exc_code), dxc);
58 #endif
59
60 /* Store the DXC into the FPC if AFP is enabled */
61 if (env->cregs[0] & CR0_AFP) {
62 env->fpc = deposit32(env->fpc, 8, 8, dxc);
63 }
64 tcg_s390_program_interrupt(env, PGM_DATA, ra);
65 }
66
tcg_s390_vector_exception(CPUS390XState * env,uint32_t vxc,uintptr_t ra)67 G_NORETURN void tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
68 uintptr_t ra)
69 {
70 g_assert(vxc <= 0xff);
71 #if !defined(CONFIG_USER_ONLY)
72 /* Always store the VXC into the lowcore, without AFP it is undefined */
73 stl_phys(env_cpu(env)->as,
74 env->psa + offsetof(LowCore, data_exc_code), vxc);
75 #endif
76
77 /* Always store the VXC into the FPC, without AFP it is undefined */
78 env->fpc = deposit32(env->fpc, 8, 8, vxc);
79 tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
80 }
81
HELPER(data_exception)82 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
83 {
84 tcg_s390_data_exception(env, dxc, GETPC());
85 }
86
87 /*
88 * Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
89 * this is only for the atomic and relative long operations, for which we want
90 * to raise a specification exception.
91 */
92 static G_NORETURN
do_unaligned_access(CPUState * cs,uintptr_t retaddr)93 void do_unaligned_access(CPUState *cs, uintptr_t retaddr)
94 {
95 tcg_s390_program_interrupt(cpu_env(cs), PGM_SPECIFICATION, retaddr);
96 }
97
98 #if defined(CONFIG_USER_ONLY)
99
s390_cpu_do_interrupt(CPUState * cs)100 void s390_cpu_do_interrupt(CPUState *cs)
101 {
102 cs->exception_index = -1;
103 }
104
s390_cpu_record_sigsegv(CPUState * cs,vaddr address,MMUAccessType access_type,bool maperr,uintptr_t retaddr)105 void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
106 MMUAccessType access_type,
107 bool maperr, uintptr_t retaddr)
108 {
109 S390CPU *cpu = S390_CPU(cs);
110
111 trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
112 /*
113 * On real machines this value is dropped into LowMem. Since this
114 * is userland, simply put this someplace that cpu_loop can find it.
115 * S390 only gives the page of the fault, not the exact address.
116 * C.f. the construction of TEC in mmu_translate().
117 */
118 cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
119 cpu_loop_exit_restore(cs, retaddr);
120 }
121
s390_cpu_record_sigbus(CPUState * cs,vaddr address,MMUAccessType access_type,uintptr_t retaddr)122 void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
123 MMUAccessType access_type, uintptr_t retaddr)
124 {
125 do_unaligned_access(cs, retaddr);
126 }
127
128 #else /* !CONFIG_USER_ONLY */
129
cpu_mmu_idx_to_asc(int mmu_idx)130 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
131 {
132 switch (mmu_idx) {
133 case MMU_PRIMARY_IDX:
134 return PSW_ASC_PRIMARY;
135 case MMU_SECONDARY_IDX:
136 return PSW_ASC_SECONDARY;
137 case MMU_HOME_IDX:
138 return PSW_ASC_HOME;
139 default:
140 abort();
141 }
142 }
143
s390_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)144 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
145 MMUAccessType access_type, int mmu_idx,
146 bool probe, uintptr_t retaddr)
147 {
148 CPUS390XState *env = cpu_env(cs);
149 target_ulong vaddr, raddr;
150 uint64_t asc, tec;
151 int prot, excp;
152
153 qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
154 __func__, address, access_type, mmu_idx);
155
156 vaddr = address;
157
158 if (mmu_idx < MMU_REAL_IDX) {
159 asc = cpu_mmu_idx_to_asc(mmu_idx);
160 /* 31-Bit mode */
161 if (!(env->psw.mask & PSW_MASK_64)) {
162 vaddr &= 0x7fffffff;
163 }
164 excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
165 } else if (mmu_idx == MMU_REAL_IDX) {
166 /* 31-Bit mode */
167 if (!(env->psw.mask & PSW_MASK_64)) {
168 vaddr &= 0x7fffffff;
169 }
170 excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
171 } else {
172 g_assert_not_reached();
173 }
174
175 env->tlb_fill_exc = excp;
176 env->tlb_fill_tec = tec;
177
178 if (!excp) {
179 qemu_log_mask(CPU_LOG_MMU,
180 "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
181 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
182 tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
183 mmu_idx, TARGET_PAGE_SIZE);
184 return true;
185 }
186 if (probe) {
187 return false;
188 }
189
190 /*
191 * For data accesses, ILEN will be filled in from the unwind info,
192 * within cpu_loop_exit_restore. For code accesses, retaddr == 0,
193 * and so unwinding will not occur. However, ILEN is also undefined
194 * for that case -- we choose to set ILEN = 2.
195 */
196 env->int_pgm_ilen = 2;
197 trigger_pgm_exception(env, excp);
198 cpu_loop_exit_restore(cs, retaddr);
199 }
200
do_program_interrupt(CPUS390XState * env)201 static void do_program_interrupt(CPUS390XState *env)
202 {
203 uint64_t mask, addr;
204 LowCore *lowcore;
205 int ilen = env->int_pgm_ilen;
206 bool set_trans_exc_code = false;
207 bool advance = false;
208
209 assert((env->int_pgm_code == PGM_SPECIFICATION && ilen == 0) ||
210 ilen == 2 || ilen == 4 || ilen == 6);
211
212 switch (env->int_pgm_code) {
213 case PGM_PER:
214 /* advance already handled */
215 break;
216 case PGM_ASCE_TYPE:
217 case PGM_REG_FIRST_TRANS:
218 case PGM_REG_SEC_TRANS:
219 case PGM_REG_THIRD_TRANS:
220 case PGM_SEGMENT_TRANS:
221 case PGM_PAGE_TRANS:
222 assert(env->int_pgm_code == env->tlb_fill_exc);
223 set_trans_exc_code = true;
224 break;
225 case PGM_PROTECTION:
226 assert(env->int_pgm_code == env->tlb_fill_exc);
227 set_trans_exc_code = true;
228 advance = true;
229 break;
230 case PGM_OPERATION:
231 case PGM_PRIVILEGED:
232 case PGM_EXECUTE:
233 case PGM_ADDRESSING:
234 case PGM_SPECIFICATION:
235 case PGM_DATA:
236 case PGM_FIXPT_OVERFLOW:
237 case PGM_FIXPT_DIVIDE:
238 case PGM_DEC_OVERFLOW:
239 case PGM_DEC_DIVIDE:
240 case PGM_HFP_EXP_OVERFLOW:
241 case PGM_HFP_EXP_UNDERFLOW:
242 case PGM_HFP_SIGNIFICANCE:
243 case PGM_HFP_DIVIDE:
244 case PGM_TRANS_SPEC:
245 case PGM_SPECIAL_OP:
246 case PGM_OPERAND:
247 case PGM_HFP_SQRT:
248 case PGM_PC_TRANS_SPEC:
249 case PGM_ALET_SPEC:
250 case PGM_MONITOR:
251 advance = true;
252 break;
253 }
254
255 /* advance the PSW if our exception is not nullifying */
256 if (advance) {
257 env->psw.addr += ilen;
258 }
259
260 qemu_log_mask(CPU_LOG_INT,
261 "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
262 __func__, env->int_pgm_code, ilen, env->psw.mask,
263 env->psw.addr);
264
265 lowcore = cpu_map_lowcore(env);
266
267 /* Signal PER events with the exception. */
268 if (env->per_perc_atmid) {
269 env->int_pgm_code |= PGM_PER;
270 lowcore->per_address = cpu_to_be64(env->per_address);
271 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
272 env->per_perc_atmid = 0;
273 }
274
275 if (set_trans_exc_code) {
276 lowcore->trans_exc_code = cpu_to_be64(env->tlb_fill_tec);
277 }
278
279 lowcore->pgm_ilen = cpu_to_be16(ilen);
280 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
281 lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
282 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
283 mask = be64_to_cpu(lowcore->program_new_psw.mask);
284 addr = be64_to_cpu(lowcore->program_new_psw.addr);
285 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
286
287 cpu_unmap_lowcore(lowcore);
288
289 s390_cpu_set_psw(env, mask, addr);
290 }
291
do_svc_interrupt(CPUS390XState * env)292 static void do_svc_interrupt(CPUS390XState *env)
293 {
294 uint64_t mask, addr;
295 LowCore *lowcore;
296
297 lowcore = cpu_map_lowcore(env);
298
299 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
300 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
301 lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
302 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
303 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
304 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
305
306 cpu_unmap_lowcore(lowcore);
307
308 s390_cpu_set_psw(env, mask, addr);
309
310 /* When a PER event is pending, the PER exception has to happen
311 immediately after the SERVICE CALL one. */
312 if (env->per_perc_atmid) {
313 env->int_pgm_code = PGM_PER;
314 env->int_pgm_ilen = env->int_svc_ilen;
315 do_program_interrupt(env);
316 }
317 }
318
319 #define VIRTIO_SUBCODE_64 0x0D00
320
do_ext_interrupt(CPUS390XState * env)321 static void do_ext_interrupt(CPUS390XState *env)
322 {
323 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
324 S390CPU *cpu = env_archcpu(env);
325 uint64_t mask, addr;
326 uint16_t cpu_addr;
327 LowCore *lowcore;
328
329 if (!(env->psw.mask & PSW_MASK_EXT)) {
330 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
331 }
332
333 lowcore = cpu_map_lowcore(env);
334
335 if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
336 (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
337 MachineState *ms = MACHINE(qdev_get_machine());
338 unsigned int max_cpus = ms->smp.max_cpus;
339
340 lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
341 cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
342 g_assert(cpu_addr < S390_MAX_CPUS);
343 lowcore->cpu_addr = cpu_to_be16(cpu_addr);
344 clear_bit(cpu_addr, env->emergency_signals);
345 if (bitmap_empty(env->emergency_signals, max_cpus)) {
346 env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
347 }
348 } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
349 (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
350 lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
351 lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
352 env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
353 } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
354 (env->cregs[0] & CR0_CKC_SC)) {
355 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
356 lowcore->cpu_addr = 0;
357 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
358 } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
359 (env->cregs[0] & CR0_CPU_TIMER_SC)) {
360 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
361 lowcore->cpu_addr = 0;
362 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
363 } else if (qemu_s390_flic_has_service(flic) &&
364 (env->cregs[0] & CR0_SERVICE_SC)) {
365 uint32_t param;
366
367 param = qemu_s390_flic_dequeue_service(flic);
368 lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
369 lowcore->ext_params = cpu_to_be32(param);
370 lowcore->cpu_addr = 0;
371 } else {
372 g_assert_not_reached();
373 }
374
375 mask = be64_to_cpu(lowcore->external_new_psw.mask);
376 addr = be64_to_cpu(lowcore->external_new_psw.addr);
377 lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
378 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
379
380 cpu_unmap_lowcore(lowcore);
381
382 s390_cpu_set_psw(env, mask, addr);
383 }
384
do_io_interrupt(CPUS390XState * env)385 static void do_io_interrupt(CPUS390XState *env)
386 {
387 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
388 uint64_t mask, addr;
389 QEMUS390FlicIO *io;
390 LowCore *lowcore;
391
392 g_assert(env->psw.mask & PSW_MASK_IO);
393 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
394 g_assert(io);
395
396 lowcore = cpu_map_lowcore(env);
397
398 lowcore->subchannel_id = cpu_to_be16(io->id);
399 lowcore->subchannel_nr = cpu_to_be16(io->nr);
400 lowcore->io_int_parm = cpu_to_be32(io->parm);
401 lowcore->io_int_word = cpu_to_be32(io->word);
402 lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
403 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
404 mask = be64_to_cpu(lowcore->io_new_psw.mask);
405 addr = be64_to_cpu(lowcore->io_new_psw.addr);
406
407 cpu_unmap_lowcore(lowcore);
408 g_free(io);
409
410 s390_cpu_set_psw(env, mask, addr);
411 }
412
413 typedef struct MchkExtSaveArea {
414 uint64_t vregs[32][2]; /* 0x0000 */
415 uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */
416 } MchkExtSaveArea;
417 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
418
mchk_store_vregs(CPUS390XState * env,uint64_t mcesao)419 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
420 {
421 hwaddr len = sizeof(MchkExtSaveArea);
422 MchkExtSaveArea *sa;
423 int i;
424
425 sa = cpu_physical_memory_map(mcesao, &len, true);
426 if (!sa) {
427 return -EFAULT;
428 }
429 if (len != sizeof(MchkExtSaveArea)) {
430 cpu_physical_memory_unmap(sa, len, 1, 0);
431 return -EFAULT;
432 }
433
434 for (i = 0; i < 32; i++) {
435 sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
436 sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
437 }
438
439 cpu_physical_memory_unmap(sa, len, 1, len);
440 return 0;
441 }
442
do_mchk_interrupt(CPUS390XState * env)443 static void do_mchk_interrupt(CPUS390XState *env)
444 {
445 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
446 uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
447 uint64_t mask, addr, mcesao = 0;
448 LowCore *lowcore;
449 int i;
450
451 /* for now we only support channel report machine checks (floating) */
452 g_assert(env->psw.mask & PSW_MASK_MCHECK);
453 g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
454
455 qemu_s390_flic_dequeue_crw_mchk(flic);
456
457 lowcore = cpu_map_lowcore(env);
458
459 /* extended save area */
460 if (mcic & MCIC_VB_VR) {
461 /* length and alignment is 1024 bytes */
462 mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
463 }
464
465 /* try to store vector registers */
466 if (!mcesao || mchk_store_vregs(env, mcesao)) {
467 mcic &= ~MCIC_VB_VR;
468 }
469
470 /* we are always in z/Architecture mode */
471 lowcore->ar_access_id = 1;
472
473 for (i = 0; i < 16; i++) {
474 lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
475 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
476 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
477 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
478 }
479 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
480 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
481 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
482 lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
483 lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
484
485 lowcore->mcic = cpu_to_be64(mcic);
486 lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
487 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
488 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
489 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
490
491 cpu_unmap_lowcore(lowcore);
492
493 s390_cpu_set_psw(env, mask, addr);
494 }
495
s390_cpu_do_interrupt(CPUState * cs)496 void s390_cpu_do_interrupt(CPUState *cs)
497 {
498 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
499 S390CPU *cpu = S390_CPU(cs);
500 CPUS390XState *env = &cpu->env;
501 bool stopped = false;
502
503 qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
504 __func__, cs->exception_index, env->psw.mask, env->psw.addr);
505
506 try_deliver:
507 /* handle machine checks */
508 if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
509 cs->exception_index = EXCP_MCHK;
510 }
511 /* handle external interrupts */
512 if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
513 cs->exception_index = EXCP_EXT;
514 }
515 /* handle I/O interrupts */
516 if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
517 cs->exception_index = EXCP_IO;
518 }
519 /* RESTART interrupt */
520 if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
521 cs->exception_index = EXCP_RESTART;
522 }
523 /* STOP interrupt has least priority */
524 if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
525 cs->exception_index = EXCP_STOP;
526 }
527
528 switch (cs->exception_index) {
529 case EXCP_PGM:
530 do_program_interrupt(env);
531 break;
532 case EXCP_SVC:
533 do_svc_interrupt(env);
534 break;
535 case EXCP_EXT:
536 do_ext_interrupt(env);
537 break;
538 case EXCP_IO:
539 do_io_interrupt(env);
540 break;
541 case EXCP_MCHK:
542 do_mchk_interrupt(env);
543 break;
544 case EXCP_RESTART:
545 do_restart_interrupt(env);
546 break;
547 case EXCP_STOP:
548 do_stop_interrupt(env);
549 stopped = true;
550 break;
551 }
552
553 if (cs->exception_index != -1 && !stopped) {
554 /* check if there are more pending interrupts to deliver */
555 cs->exception_index = -1;
556 goto try_deliver;
557 }
558 cs->exception_index = -1;
559
560 /* we might still have pending interrupts, but not deliverable */
561 if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
562 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
563 }
564
565 /* WAIT PSW during interrupt injection or STOP interrupt */
566 if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
567 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
568 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
569 } else if (cs->halted) {
570 /* unhalt if we had a WAIT PSW somewhere in our injection chain */
571 s390_cpu_unhalt(cpu);
572 }
573 }
574
s390_cpu_exec_interrupt(CPUState * cs,int interrupt_request)575 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
576 {
577 if (interrupt_request & CPU_INTERRUPT_HARD) {
578 S390CPU *cpu = S390_CPU(cs);
579 CPUS390XState *env = &cpu->env;
580
581 if (env->ex_value) {
582 /* Execution of the target insn is indivisible from
583 the parent EXECUTE insn. */
584 return false;
585 }
586 if (s390_cpu_has_int(cpu)) {
587 s390_cpu_do_interrupt(cs);
588 return true;
589 }
590 if (env->psw.mask & PSW_MASK_WAIT) {
591 /* Woken up because of a floating interrupt but it has already
592 * been delivered. Go back to sleep. */
593 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
594 }
595 }
596 return false;
597 }
598
s390x_cpu_debug_excp_handler(CPUState * cs)599 void s390x_cpu_debug_excp_handler(CPUState *cs)
600 {
601 CPUS390XState *env = cpu_env(cs);
602 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
603
604 if (wp_hit && wp_hit->flags & BP_CPU) {
605 /* FIXME: When the storage-alteration-space control bit is set,
606 the exception should only be triggered if the memory access
607 is done using an address space with the storage-alteration-event
608 bit set. We have no way to detect that with the current
609 watchpoint code. */
610 cs->watchpoint_hit = NULL;
611
612 env->per_address = env->psw.addr;
613 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
614 /* FIXME: We currently no way to detect the address space used
615 to trigger the watchpoint. For now just consider it is the
616 current default ASC. This turn to be true except when MVCP
617 and MVCS instrutions are not used. */
618 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
619
620 /*
621 * Remove all watchpoints to re-execute the code. A PER exception
622 * will be triggered, it will call s390_cpu_set_psw which will
623 * recompute the watchpoints.
624 */
625 cpu_watchpoint_remove_all(cs, BP_CPU);
626 cpu_loop_exit_noexc(cs);
627 }
628 }
629
s390x_cpu_do_unaligned_access(CPUState * cs,vaddr addr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)630 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
631 MMUAccessType access_type,
632 int mmu_idx, uintptr_t retaddr)
633 {
634 do_unaligned_access(cs, retaddr);
635 }
636
637 static G_NORETURN
monitor_event(CPUS390XState * env,uint64_t monitor_code,uint8_t monitor_class,uintptr_t ra)638 void monitor_event(CPUS390XState *env,
639 uint64_t monitor_code,
640 uint8_t monitor_class, uintptr_t ra)
641 {
642 /* Store the Monitor Code and the Monitor Class Number into the lowcore */
643 stq_phys(env_cpu(env)->as,
644 env->psa + offsetof(LowCore, monitor_code), monitor_code);
645 stw_phys(env_cpu(env)->as,
646 env->psa + offsetof(LowCore, mon_class_num), monitor_class);
647
648 tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
649 }
650
HELPER(monitor_call)651 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
652 uint32_t monitor_class)
653 {
654 g_assert(monitor_class <= 0xf);
655
656 if (env->cregs[8] & (0x8000 >> monitor_class)) {
657 monitor_event(env, monitor_code, monitor_class, GETPC());
658 }
659 }
660
661 #endif /* !CONFIG_USER_ONLY */
662