xref: /qemu/target/s390x/tcg/misc_helper.c (revision 513823e7521a09ed7ad1e32e6454bac3b2cbf52d)
1 /*
2  *  S/390 misc helper routines
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2009 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/cutils.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "s390x-internal.h"
26 #include "qemu/host-utils.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/timer.h"
29 #include "exec/exec-all.h"
30 #include "exec/cpu_ldst.h"
31 #include "qapi/error.h"
32 #include "tcg_s390x.h"
33 #include "s390-tod.h"
34 
35 #if !defined(CONFIG_USER_ONLY)
36 #include "system/cpus.h"
37 #include "system/system.h"
38 #include "hw/s390x/ebcdic.h"
39 #include "hw/s390x/s390-hypercall.h"
40 #include "hw/s390x/sclp.h"
41 #include "hw/s390x/s390_flic.h"
42 #include "hw/s390x/ioinst.h"
43 #include "hw/s390x/s390-pci-inst.h"
44 #include "hw/boards.h"
45 #include "hw/s390x/tod.h"
46 #include CONFIG_DEVICES
47 #endif
48 
49 /* #define DEBUG_HELPER */
50 #ifdef DEBUG_HELPER
51 #define HELPER_LOG(x...) qemu_log(x)
52 #else
53 #define HELPER_LOG(x...)
54 #endif
55 
56 /* Raise an exception statically from a TB.  */
57 void HELPER(exception)(CPUS390XState *env, uint32_t excp)
58 {
59     CPUState *cs = env_cpu(env);
60 
61     HELPER_LOG("%s: exception %d\n", __func__, excp);
62     cs->exception_index = excp;
63     cpu_loop_exit(cs);
64 }
65 
66 /* Store CPU Timer (also used for EXTRACT CPU TIME) */
67 uint64_t HELPER(stpt)(CPUS390XState *env)
68 {
69 #if defined(CONFIG_USER_ONLY)
70     /*
71      * Fake a descending CPU timer. We could get negative values here,
72      * but we don't care as it is up to the OS when to process that
73      * interrupt and reset to > 0.
74      */
75     return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
76 #else
77     return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
78 #endif
79 }
80 
81 /* Store Clock */
82 uint64_t HELPER(stck)(CPUS390XState *env)
83 {
84 #ifdef CONFIG_USER_ONLY
85     struct timespec ts;
86     uint64_t ns;
87 
88     clock_gettime(CLOCK_REALTIME, &ts);
89     ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec;
90 
91     return TOD_UNIX_EPOCH + time2tod(ns);
92 #else
93     S390TODState *td = s390_get_todstate();
94     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
95     S390TOD tod;
96 
97     tdc->get(td, &tod, &error_abort);
98     return tod.low;
99 #endif
100 }
101 
102 #ifndef CONFIG_USER_ONLY
103 /* SCLP service call */
104 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
105 {
106     bql_lock();
107     int r = sclp_service_call(env_archcpu(env), r1, r2);
108     bql_unlock();
109     if (r < 0) {
110         tcg_s390_program_interrupt(env, -r, GETPC());
111     }
112     return r;
113 }
114 
115 void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
116 {
117     uint64_t r;
118 
119     switch (num) {
120 #ifdef CONFIG_S390_CCW_VIRTIO
121     case 0x500:
122         /* QEMU/KVM hypercall */
123         bql_lock();
124         handle_diag_500(env_archcpu(env), GETPC());
125         bql_unlock();
126         r = 0;
127         break;
128 #endif /* CONFIG_S390_CCW_VIRTIO */
129     case 0x44:
130         /* yield */
131         r = 0;
132         break;
133     case 0x308:
134         /* ipl */
135         bql_lock();
136         handle_diag_308(env, r1, r3, GETPC());
137         bql_unlock();
138         r = 0;
139         break;
140     case 0x288:
141         /* time bomb (watchdog) */
142         r = handle_diag_288(env, r1, r3);
143         break;
144     default:
145         r = -1;
146         break;
147     }
148 
149     if (r) {
150         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
151     }
152 }
153 
154 /* Set Prefix */
155 void HELPER(spx)(CPUS390XState *env, uint64_t a1)
156 {
157     const uint32_t prefix = a1 & 0x7fffe000;
158     const uint32_t old_prefix = env->psa;
159     CPUState *cs = env_cpu(env);
160 
161     if (prefix == old_prefix) {
162         return;
163     }
164     /*
165      * Since prefix got aligned to 8k and memory increments are a multiple of
166      * 8k checking the first page is sufficient
167      */
168     if (!mmu_absolute_addr_valid(prefix, true)) {
169         tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
170     }
171 
172     env->psa = prefix;
173     HELPER_LOG("prefix: %#x\n", prefix);
174     tlb_flush_page(cs, 0);
175     tlb_flush_page(cs, TARGET_PAGE_SIZE);
176     if (prefix != 0) {
177         tlb_flush_page(cs, prefix);
178         tlb_flush_page(cs, prefix + TARGET_PAGE_SIZE);
179     }
180     if (old_prefix != 0) {
181         tlb_flush_page(cs, old_prefix);
182         tlb_flush_page(cs, old_prefix + TARGET_PAGE_SIZE);
183     }
184 }
185 
186 static void update_ckc_timer(CPUS390XState *env)
187 {
188     S390TODState *td = s390_get_todstate();
189     uint64_t time;
190 
191     /* stop the timer and remove pending CKC IRQs */
192     timer_del(env->tod_timer);
193     g_assert(bql_locked());
194     env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
195 
196     /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */
197     if (env->ckc == -1ULL) {
198         return;
199     }
200 
201     /* difference between origins */
202     time = env->ckc - td->base.low;
203 
204     /* nanoseconds */
205     time = tod2time(time);
206 
207     timer_mod(env->tod_timer, time);
208 }
209 
210 /* Set Clock Comparator */
211 void HELPER(sckc)(CPUS390XState *env, uint64_t ckc)
212 {
213     env->ckc = ckc;
214 
215     bql_lock();
216     update_ckc_timer(env);
217     bql_unlock();
218 }
219 
220 void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
221 {
222     update_ckc_timer(cpu_env(cs));
223 }
224 
225 /* Set Clock */
226 uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low)
227 {
228     S390TODState *td = s390_get_todstate();
229     S390TODClass *tdc = S390_TOD_GET_CLASS(td);
230     S390TOD tod = {
231         .high = 0,
232         .low = tod_low,
233     };
234 
235     bql_lock();
236     tdc->set(td, &tod, &error_abort);
237     bql_unlock();
238     return 0;
239 }
240 
241 /* Set Tod Programmable Field */
242 void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
243 {
244     uint32_t val = r0;
245 
246     if (val & 0xffff0000) {
247         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
248     }
249     env->todpr = val;
250 }
251 
252 /* Store Clock Comparator */
253 uint64_t HELPER(stckc)(CPUS390XState *env)
254 {
255     return env->ckc;
256 }
257 
258 /* Set CPU Timer */
259 void HELPER(spt)(CPUS390XState *env, uint64_t time)
260 {
261     if (time == -1ULL) {
262         return;
263     }
264 
265     /* nanoseconds */
266     time = tod2time(time);
267 
268     env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
269 
270     timer_mod(env->cpu_timer, env->cputm);
271 }
272 
273 /* Store System Information */
274 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
275 {
276     const uintptr_t ra = GETPC();
277     const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK;
278     const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK;
279     const MachineState *ms = MACHINE(qdev_get_machine());
280     uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0;
281     S390CPU *cpu = env_archcpu(env);
282     SysIB sysib = { };
283     int i, cc = 0;
284 
285     if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) {
286         /* invalid function code: no other checks are performed */
287         return 3;
288     }
289 
290     if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) {
291         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
292     }
293 
294     if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) {
295         /* query the current level: no further checks are performed */
296         env->regs[0] = STSI_R0_FC_LEVEL_3;
297         return 0;
298     }
299 
300     if (a0 & ~TARGET_PAGE_MASK) {
301         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
302     }
303 
304     /* count the cpus and split them into configured and reserved ones */
305     for (i = 0; i < ms->possible_cpus->len; i++) {
306         total_cpus++;
307         if (ms->possible_cpus->cpus[i].cpu) {
308             conf_cpus++;
309         } else {
310             reserved_cpus++;
311         }
312     }
313 
314     /*
315      * In theory, we could report Level 1 / Level 2 as current. However,
316      * the Linux kernel will detect this as running under LPAR and assume
317      * that we have a sclp linemode console (which is always present on
318      * LPAR, but not the default for QEMU), therefore not displaying boot
319      * messages and making booting a Linux kernel under TCG harder.
320      *
321      * For now we fake the same SMP configuration on all levels.
322      *
323      * TODO: We could later make the level configurable via the machine
324      *       and change defaults (linemode console) based on machine type
325      *       and accelerator.
326      */
327     switch (r0 & STSI_R0_FC_MASK) {
328     case STSI_R0_FC_LEVEL_1:
329         if ((sel1 == 1) && (sel2 == 1)) {
330             /* Basic Machine Configuration */
331             char type[5] = {};
332 
333             ebcdic_put(sysib.sysib_111.manuf, "QEMU            ", 16);
334             /* same as machine type number in STORE CPU ID, but in EBCDIC */
335             snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
336             ebcdic_put(sysib.sysib_111.type, type, 4);
337             /* model number (not stored in STORE CPU ID for z/Architecture) */
338             ebcdic_put(sysib.sysib_111.model, "QEMU            ", 16);
339             ebcdic_put(sysib.sysib_111.sequence, "QEMU            ", 16);
340             ebcdic_put(sysib.sysib_111.plant, "QEMU", 4);
341         } else if ((sel1 == 2) && (sel2 == 1)) {
342             /* Basic Machine CPU */
343             ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16);
344             ebcdic_put(sysib.sysib_121.plant, "QEMU", 4);
345             sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id);
346         } else if ((sel1 == 2) && (sel2 == 2)) {
347             /* Basic Machine CPUs */
348             sysib.sysib_122.capability = cpu_to_be32(0x443afc29);
349             sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus);
350             sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus);
351             sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus);
352         } else {
353             cc = 3;
354         }
355         break;
356     case STSI_R0_FC_LEVEL_2:
357         if ((sel1 == 2) && (sel2 == 1)) {
358             /* LPAR CPU */
359             ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16);
360             ebcdic_put(sysib.sysib_221.plant, "QEMU", 4);
361             sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id);
362         } else if ((sel1 == 2) && (sel2 == 2)) {
363             /* LPAR CPUs */
364             sysib.sysib_222.lcpuc = 0x80; /* dedicated */
365             sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus);
366             sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus);
367             sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus);
368             ebcdic_put(sysib.sysib_222.name, "QEMU    ", 8);
369             sysib.sysib_222.caf = cpu_to_be32(1000);
370             sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus);
371         } else {
372             cc = 3;
373         }
374         break;
375     case STSI_R0_FC_LEVEL_3:
376         if ((sel1 == 2) && (sel2 == 2)) {
377             /* VM CPUs */
378             sysib.sysib_322.count = 1;
379             sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus);
380             sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus);
381             sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus);
382             sysib.sysib_322.vm[0].caf = cpu_to_be32(1000);
383             /* Linux kernel uses this to distinguish us from z/VM */
384             ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux       ", 16);
385             sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */
386 
387             /* If our VM has a name, use the real name */
388             if (qemu_name) {
389                 memset(sysib.sysib_322.vm[0].name, 0x40,
390                        sizeof(sysib.sysib_322.vm[0].name));
391                 ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name,
392                            MIN(sizeof(sysib.sysib_322.vm[0].name),
393                                strlen(qemu_name)));
394                 strpadcpy((char *)sysib.sysib_322.ext_names[0],
395                           sizeof(sysib.sysib_322.ext_names[0]),
396                           qemu_name, '\0');
397 
398             } else {
399                 ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8);
400                 strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest");
401             }
402 
403             /* add the uuid */
404             memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid,
405                    sizeof(sysib.sysib_322.vm[0].uuid));
406         } else {
407             cc = 3;
408         }
409         break;
410     }
411 
412     if (cc == 0) {
413         if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) {
414             s390_cpu_virt_mem_handle_exc(cpu, ra);
415         }
416     }
417 
418     return cc;
419 }
420 
421 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
422                       uint32_t r3)
423 {
424     int cc;
425 
426     /* TODO: needed to inject interrupts  - push further down */
427     bql_lock();
428     cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
429     bql_unlock();
430 
431     return cc;
432 }
433 #endif
434 
435 #ifndef CONFIG_USER_ONLY
436 void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
437 {
438     S390CPU *cpu = env_archcpu(env);
439     bql_lock();
440     ioinst_handle_xsch(cpu, r1, GETPC());
441     bql_unlock();
442 }
443 
444 void HELPER(csch)(CPUS390XState *env, uint64_t r1)
445 {
446     S390CPU *cpu = env_archcpu(env);
447     bql_lock();
448     ioinst_handle_csch(cpu, r1, GETPC());
449     bql_unlock();
450 }
451 
452 void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
453 {
454     S390CPU *cpu = env_archcpu(env);
455     bql_lock();
456     ioinst_handle_hsch(cpu, r1, GETPC());
457     bql_unlock();
458 }
459 
460 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
461 {
462     S390CPU *cpu = env_archcpu(env);
463     bql_lock();
464     ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
465     bql_unlock();
466 }
467 
468 void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
469 {
470     S390CPU *cpu = env_archcpu(env);
471     bql_lock();
472     ioinst_handle_rchp(cpu, r1, GETPC());
473     bql_unlock();
474 }
475 
476 void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
477 {
478     S390CPU *cpu = env_archcpu(env);
479     bql_lock();
480     ioinst_handle_rsch(cpu, r1, GETPC());
481     bql_unlock();
482 }
483 
484 void HELPER(sal)(CPUS390XState *env, uint64_t r1)
485 {
486     S390CPU *cpu = env_archcpu(env);
487 
488     bql_lock();
489     ioinst_handle_sal(cpu, r1, GETPC());
490     bql_unlock();
491 }
492 
493 void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
494 {
495     S390CPU *cpu = env_archcpu(env);
496 
497     bql_lock();
498     ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
499     bql_unlock();
500 }
501 
502 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
503 {
504     S390CPU *cpu = env_archcpu(env);
505     bql_lock();
506     ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
507     bql_unlock();
508 }
509 
510 void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
511 {
512     S390CPU *cpu = env_archcpu(env);
513 
514     bql_lock();
515     ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
516     bql_unlock();
517 }
518 
519 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
520 {
521     S390CPU *cpu = env_archcpu(env);
522     bql_lock();
523     ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
524     bql_unlock();
525 }
526 
527 uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
528 {
529     const uintptr_t ra = GETPC();
530     S390CPU *cpu = env_archcpu(env);
531     QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
532     QEMUS390FlicIO *io = NULL;
533     LowCore *lowcore;
534 
535     if (addr & 0x3) {
536         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
537     }
538 
539     bql_lock();
540     io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
541     if (!io) {
542         bql_unlock();
543         return 0;
544     }
545 
546     if (addr) {
547         struct {
548             uint16_t id;
549             uint16_t nr;
550             uint32_t parm;
551         } intc = {
552             .id = cpu_to_be16(io->id),
553             .nr = cpu_to_be16(io->nr),
554             .parm = cpu_to_be32(io->parm),
555         };
556 
557         if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
558             /* writing failed, reinject and properly clean up */
559             s390_io_interrupt(io->id, io->nr, io->parm, io->word);
560             bql_unlock();
561             g_free(io);
562             s390_cpu_virt_mem_handle_exc(cpu, ra);
563             return 0;
564         }
565     } else {
566         /* no protection applies */
567         lowcore = cpu_map_lowcore(env);
568         lowcore->subchannel_id = cpu_to_be16(io->id);
569         lowcore->subchannel_nr = cpu_to_be16(io->nr);
570         lowcore->io_int_parm = cpu_to_be32(io->parm);
571         lowcore->io_int_word = cpu_to_be32(io->word);
572         cpu_unmap_lowcore(lowcore);
573     }
574 
575     g_free(io);
576     bql_unlock();
577     return 1;
578 }
579 
580 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
581 {
582     S390CPU *cpu = env_archcpu(env);
583     bql_lock();
584     ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
585     bql_unlock();
586 }
587 
588 void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
589 {
590     S390CPU *cpu = env_archcpu(env);
591     bql_lock();
592     ioinst_handle_chsc(cpu, inst >> 16, GETPC());
593     bql_unlock();
594 }
595 #endif
596 
597 #ifndef CONFIG_USER_ONLY
598 static G_NORETURN void per_raise_exception(CPUS390XState *env)
599 {
600     trigger_pgm_exception(env, PGM_PER);
601     cpu_loop_exit(env_cpu(env));
602 }
603 
604 static G_NORETURN void per_raise_exception_log(CPUS390XState *env)
605 {
606     qemu_log_mask(CPU_LOG_INT, "PER interrupt after 0x%" PRIx64 "\n",
607                   env->per_address);
608     per_raise_exception(env);
609 }
610 
611 void HELPER(per_check_exception)(CPUS390XState *env)
612 {
613     /* psw_addr, per_address and int_pgm_ilen are already set. */
614     if (unlikely(env->per_perc_atmid)) {
615         per_raise_exception_log(env);
616     }
617 }
618 
619 /* Check if an address is within the PER starting address and the PER
620    ending address.  The address range might loop.  */
621 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
622 {
623     if (env->cregs[10] <= env->cregs[11]) {
624         return env->cregs[10] <= addr && addr <= env->cregs[11];
625     } else {
626         return env->cregs[10] <= addr || addr <= env->cregs[11];
627     }
628 }
629 
630 void HELPER(per_branch)(CPUS390XState *env, uint64_t dest, uint32_t ilen)
631 {
632     if ((env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
633         && !get_per_in_range(env, dest)) {
634         return;
635     }
636 
637     env->psw.addr = dest;
638     env->int_pgm_ilen = ilen;
639     env->per_address = env->gbea;
640     env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
641     per_raise_exception_log(env);
642 }
643 
644 void HELPER(per_ifetch)(CPUS390XState *env, uint32_t ilen)
645 {
646     if (get_per_in_range(env, env->psw.addr)) {
647         env->per_address = env->psw.addr;
648         env->int_pgm_ilen = ilen;
649         env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
650 
651         /* If the instruction has to be nullified, trigger the
652            exception immediately. */
653         if (env->cregs[9] & PER_CR9_EVENT_IFETCH_NULLIFICATION) {
654             env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
655             qemu_log_mask(CPU_LOG_INT, "PER interrupt before 0x%" PRIx64 "\n",
656                           env->per_address);
657             per_raise_exception(env);
658         }
659     }
660 }
661 
662 void HELPER(per_store_real)(CPUS390XState *env, uint32_t ilen)
663 {
664     /* PSW is saved just before calling the helper.  */
665     env->per_address = env->psw.addr;
666     env->int_pgm_ilen = ilen;
667     env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
668     per_raise_exception_log(env);
669 }
670 #endif
671 
672 static uint8_t stfl_bytes[2048];
673 static unsigned int used_stfl_bytes;
674 
675 static void prepare_stfl(void)
676 {
677     static bool initialized;
678     int i;
679 
680     /* racy, but we don't care, the same values are always written */
681     if (initialized) {
682         return;
683     }
684 
685     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
686     for (i = 0; i < sizeof(stfl_bytes); i++) {
687         if (stfl_bytes[i]) {
688             used_stfl_bytes = i + 1;
689         }
690     }
691     initialized = true;
692 }
693 
694 #ifndef CONFIG_USER_ONLY
695 void HELPER(stfl)(CPUS390XState *env)
696 {
697     LowCore *lowcore;
698 
699     lowcore = cpu_map_lowcore(env);
700     prepare_stfl();
701     memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
702     cpu_unmap_lowcore(lowcore);
703 }
704 #endif
705 
706 uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
707 {
708     const uintptr_t ra = GETPC();
709     const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
710     int max_bytes;
711     int i;
712 
713     if (addr & 0x7) {
714         tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
715     }
716 
717     prepare_stfl();
718     max_bytes = ROUND_UP(used_stfl_bytes, 8);
719 
720     /*
721      * The PoP says that doublewords beyond the highest-numbered facility
722      * bit may or may not be stored.  However, existing hardware appears to
723      * not store the words, and existing software depend on that.
724      */
725     for (i = 0; i < MIN(count_bytes, max_bytes); ++i) {
726         cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
727     }
728 
729     env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
730     return count_bytes >= max_bytes ? 0 : 3;
731 }
732 
733 #ifndef CONFIG_USER_ONLY
734 /*
735  * Note: we ignore any return code of the functions called for the pci
736  * instructions, as the only time they return !0 is when the stub is
737  * called, and in that case we didn't even offer the zpci facility.
738  * The only exception is SIC, where program checks need to be handled
739  * by the caller.
740  */
741 void HELPER(clp)(CPUS390XState *env, uint32_t r2)
742 {
743     S390CPU *cpu = env_archcpu(env);
744 
745     bql_lock();
746     clp_service_call(cpu, r2, GETPC());
747     bql_unlock();
748 }
749 
750 void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
751 {
752     S390CPU *cpu = env_archcpu(env);
753 
754     bql_lock();
755     pcilg_service_call(cpu, r1, r2, GETPC());
756     bql_unlock();
757 }
758 
759 void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
760 {
761     S390CPU *cpu = env_archcpu(env);
762 
763     bql_lock();
764     pcistg_service_call(cpu, r1, r2, GETPC());
765     bql_unlock();
766 }
767 
768 void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
769                      uint32_t ar)
770 {
771     S390CPU *cpu = env_archcpu(env);
772 
773     bql_lock();
774     stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
775     bql_unlock();
776 }
777 
778 void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
779 {
780     S390CPU *cpu = env_archcpu(env);
781     int r;
782 
783     bql_lock();
784     r = css_do_sic(cpu, (r3 >> 27) & 0x7, r1 & 0xffff);
785     bql_unlock();
786     /* css_do_sic() may actually return a PGM_xxx value to inject */
787     if (r) {
788         tcg_s390_program_interrupt(env, -r, GETPC());
789     }
790 }
791 
792 void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
793 {
794     S390CPU *cpu = env_archcpu(env);
795 
796     bql_lock();
797     rpcit_service_call(cpu, r1, r2, GETPC());
798     bql_unlock();
799 }
800 
801 void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
802                     uint64_t gaddr, uint32_t ar)
803 {
804     S390CPU *cpu = env_archcpu(env);
805 
806     bql_lock();
807     pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
808     bql_unlock();
809 }
810 
811 void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
812                     uint32_t ar)
813 {
814     S390CPU *cpu = env_archcpu(env);
815 
816     bql_lock();
817     mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
818     bql_unlock();
819 }
820 #endif
821