xref: /qemu/target/s390x/kvm/kvm.c (revision 1d7db85b61cb9888b8ed8c8923343b468405b7a0)
1 /*
2  * QEMU S390x KVM implementation
3  *
4  * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
5  * Copyright IBM Corp. 2012
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * Contributions after 2012-10-29 are licensed under the terms of the
18  * GNU GPL, version 2 or (at your option) any later version.
19  *
20  * You should have received a copy of the GNU (Lesser) General Public
21  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #include "qemu/osdep.h"
25 #include <sys/ioctl.h>
26 
27 #include <linux/kvm.h>
28 #include <asm/ptrace.h>
29 
30 #include "qemu-common.h"
31 #include "cpu.h"
32 #include "internal.h"
33 #include "kvm_s390x.h"
34 #include "qapi/error.h"
35 #include "qemu/error-report.h"
36 #include "qemu/timer.h"
37 #include "qemu/units.h"
38 #include "qemu/mmap-alloc.h"
39 #include "qemu/log.h"
40 #include "sysemu/sysemu.h"
41 #include "sysemu/hw_accel.h"
42 #include "hw/hw.h"
43 #include "sysemu/device_tree.h"
44 #include "exec/gdbstub.h"
45 #include "trace.h"
46 #include "hw/s390x/s390-pci-inst.h"
47 #include "hw/s390x/s390-pci-bus.h"
48 #include "hw/s390x/ipl.h"
49 #include "hw/s390x/ebcdic.h"
50 #include "exec/memattrs.h"
51 #include "hw/s390x/s390-virtio-ccw.h"
52 #include "hw/s390x/s390-virtio-hcall.h"
53 
54 #ifndef DEBUG_KVM
55 #define DEBUG_KVM  0
56 #endif
57 
58 #define DPRINTF(fmt, ...) do {                \
59     if (DEBUG_KVM) {                          \
60         fprintf(stderr, fmt, ## __VA_ARGS__); \
61     }                                         \
62 } while (0)
63 
64 #define kvm_vm_check_mem_attr(s, attr) \
65     kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr)
66 
67 #define IPA0_DIAG                       0x8300
68 #define IPA0_SIGP                       0xae00
69 #define IPA0_B2                         0xb200
70 #define IPA0_B9                         0xb900
71 #define IPA0_EB                         0xeb00
72 #define IPA0_E3                         0xe300
73 
74 #define PRIV_B2_SCLP_CALL               0x20
75 #define PRIV_B2_CSCH                    0x30
76 #define PRIV_B2_HSCH                    0x31
77 #define PRIV_B2_MSCH                    0x32
78 #define PRIV_B2_SSCH                    0x33
79 #define PRIV_B2_STSCH                   0x34
80 #define PRIV_B2_TSCH                    0x35
81 #define PRIV_B2_TPI                     0x36
82 #define PRIV_B2_SAL                     0x37
83 #define PRIV_B2_RSCH                    0x38
84 #define PRIV_B2_STCRW                   0x39
85 #define PRIV_B2_STCPS                   0x3a
86 #define PRIV_B2_RCHP                    0x3b
87 #define PRIV_B2_SCHM                    0x3c
88 #define PRIV_B2_CHSC                    0x5f
89 #define PRIV_B2_SIGA                    0x74
90 #define PRIV_B2_XSCH                    0x76
91 
92 #define PRIV_EB_SQBS                    0x8a
93 #define PRIV_EB_PCISTB                  0xd0
94 #define PRIV_EB_SIC                     0xd1
95 
96 #define PRIV_B9_EQBS                    0x9c
97 #define PRIV_B9_CLP                     0xa0
98 #define PRIV_B9_PCISTG                  0xd0
99 #define PRIV_B9_PCILG                   0xd2
100 #define PRIV_B9_RPCIT                   0xd3
101 
102 #define PRIV_E3_MPCIFC                  0xd0
103 #define PRIV_E3_STPCIFC                 0xd4
104 
105 #define DIAG_TIMEREVENT                 0x288
106 #define DIAG_IPL                        0x308
107 #define DIAG_KVM_HYPERCALL              0x500
108 #define DIAG_KVM_BREAKPOINT             0x501
109 
110 #define ICPT_INSTRUCTION                0x04
111 #define ICPT_PROGRAM                    0x08
112 #define ICPT_EXT_INT                    0x14
113 #define ICPT_WAITPSW                    0x1c
114 #define ICPT_SOFT_INTERCEPT             0x24
115 #define ICPT_CPU_STOP                   0x28
116 #define ICPT_OPEREXC                    0x2c
117 #define ICPT_IO                         0x40
118 
119 #define NR_LOCAL_IRQS 32
120 /*
121  * Needs to be big enough to contain max_cpus emergency signals
122  * and in addition NR_LOCAL_IRQS interrupts
123  */
124 #define VCPU_IRQ_BUF_SIZE (sizeof(struct kvm_s390_irq) * \
125                            (max_cpus + NR_LOCAL_IRQS))
126 
127 static CPUWatchpoint hw_watchpoint;
128 /*
129  * We don't use a list because this structure is also used to transmit the
130  * hardware breakpoints to the kernel.
131  */
132 static struct kvm_hw_breakpoint *hw_breakpoints;
133 static int nb_hw_breakpoints;
134 
135 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
136     KVM_CAP_LAST_INFO
137 };
138 
139 static int cap_sync_regs;
140 static int cap_async_pf;
141 static int cap_mem_op;
142 static int cap_s390_irq;
143 static int cap_ri;
144 static int cap_gs;
145 static int cap_hpage_1m;
146 
147 static int active_cmma;
148 
149 static void *legacy_s390_alloc(size_t size, uint64_t *align, bool shared);
150 
151 static int kvm_s390_query_mem_limit(uint64_t *memory_limit)
152 {
153     struct kvm_device_attr attr = {
154         .group = KVM_S390_VM_MEM_CTRL,
155         .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
156         .addr = (uint64_t) memory_limit,
157     };
158 
159     return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
160 }
161 
162 int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit)
163 {
164     int rc;
165 
166     struct kvm_device_attr attr = {
167         .group = KVM_S390_VM_MEM_CTRL,
168         .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
169         .addr = (uint64_t) &new_limit,
170     };
171 
172     if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) {
173         return 0;
174     }
175 
176     rc = kvm_s390_query_mem_limit(hw_limit);
177     if (rc) {
178         return rc;
179     } else if (*hw_limit < new_limit) {
180         return -E2BIG;
181     }
182 
183     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
184 }
185 
186 int kvm_s390_cmma_active(void)
187 {
188     return active_cmma;
189 }
190 
191 static bool kvm_s390_cmma_available(void)
192 {
193     static bool initialized, value;
194 
195     if (!initialized) {
196         initialized = true;
197         value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) &&
198                 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA);
199     }
200     return value;
201 }
202 
203 void kvm_s390_cmma_reset(void)
204 {
205     int rc;
206     struct kvm_device_attr attr = {
207         .group = KVM_S390_VM_MEM_CTRL,
208         .attr = KVM_S390_VM_MEM_CLR_CMMA,
209     };
210 
211     if (!kvm_s390_cmma_active()) {
212         return;
213     }
214 
215     rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
216     trace_kvm_clear_cmma(rc);
217 }
218 
219 static void kvm_s390_enable_cmma(void)
220 {
221     int rc;
222     struct kvm_device_attr attr = {
223         .group = KVM_S390_VM_MEM_CTRL,
224         .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
225     };
226 
227     if (cap_hpage_1m) {
228         warn_report("CMM will not be enabled because it is not "
229                     "compatible with huge memory backings.");
230         return;
231     }
232     rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
233     active_cmma = !rc;
234     trace_kvm_enable_cmma(rc);
235 }
236 
237 static void kvm_s390_set_attr(uint64_t attr)
238 {
239     struct kvm_device_attr attribute = {
240         .group = KVM_S390_VM_CRYPTO,
241         .attr  = attr,
242     };
243 
244     int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
245 
246     if (ret) {
247         error_report("Failed to set crypto device attribute %lu: %s",
248                      attr, strerror(-ret));
249     }
250 }
251 
252 static void kvm_s390_init_aes_kw(void)
253 {
254     uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW;
255 
256     if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap",
257                                  NULL)) {
258             attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW;
259     }
260 
261     if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
262             kvm_s390_set_attr(attr);
263     }
264 }
265 
266 static void kvm_s390_init_dea_kw(void)
267 {
268     uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW;
269 
270     if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap",
271                                  NULL)) {
272             attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW;
273     }
274 
275     if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
276             kvm_s390_set_attr(attr);
277     }
278 }
279 
280 void kvm_s390_crypto_reset(void)
281 {
282     if (s390_has_feat(S390_FEAT_MSA_EXT_3)) {
283         kvm_s390_init_aes_kw();
284         kvm_s390_init_dea_kw();
285     }
286 }
287 
288 static int kvm_s390_configure_mempath_backing(KVMState *s)
289 {
290     size_t path_psize = qemu_mempath_getpagesize(mem_path);
291 
292     if (path_psize == 4 * KiB) {
293         return 0;
294     }
295 
296     if (!hpage_1m_allowed()) {
297         error_report("This QEMU machine does not support huge page "
298                      "mappings");
299         return -EINVAL;
300     }
301 
302     if (path_psize != 1 * MiB) {
303         error_report("Memory backing with 2G pages was specified, "
304                      "but KVM does not support this memory backing");
305         return -EINVAL;
306     }
307 
308     if (kvm_vm_enable_cap(s, KVM_CAP_S390_HPAGE_1M, 0)) {
309         error_report("Memory backing with 1M pages was specified, "
310                      "but KVM does not support this memory backing");
311         return -EINVAL;
312     }
313 
314     cap_hpage_1m = 1;
315     return 0;
316 }
317 
318 int kvm_arch_init(MachineState *ms, KVMState *s)
319 {
320     MachineClass *mc = MACHINE_GET_CLASS(ms);
321 
322     if (mem_path && kvm_s390_configure_mempath_backing(s)) {
323         return -EINVAL;
324     }
325 
326     mc->default_cpu_type = S390_CPU_TYPE_NAME("host");
327     cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
328     cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
329     cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
330     cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ);
331 
332     if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
333         || !kvm_check_extension(s, KVM_CAP_S390_COW)) {
334         phys_mem_set_alloc(legacy_s390_alloc);
335     }
336 
337     kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
338     kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0);
339     kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
340     if (ri_allowed()) {
341         if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) {
342             cap_ri = 1;
343         }
344     }
345     if (cpu_model_allowed()) {
346         if (kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0) == 0) {
347             cap_gs = 1;
348         }
349     }
350 
351     /*
352      * The migration interface for ais was introduced with kernel 4.13
353      * but the capability itself had been active since 4.12. As migration
354      * support is considered necessary let's disable ais in the 2.10
355      * machine.
356      */
357     /* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */
358 
359     return 0;
360 }
361 
362 int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
363 {
364     return 0;
365 }
366 
367 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
368 {
369     return cpu->cpu_index;
370 }
371 
372 int kvm_arch_init_vcpu(CPUState *cs)
373 {
374     S390CPU *cpu = S390_CPU(cs);
375     kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
376     cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE);
377     return 0;
378 }
379 
380 void kvm_s390_reset_vcpu(S390CPU *cpu)
381 {
382     CPUState *cs = CPU(cpu);
383 
384     /* The initial reset call is needed here to reset in-kernel
385      * vcpu data that we can't access directly from QEMU
386      * (i.e. with older kernels which don't support sync_regs/ONE_REG).
387      * Before this ioctl cpu_synchronize_state() is called in common kvm
388      * code (kvm-all) */
389     if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) {
390         error_report("Initial CPU reset failed on CPU %i", cs->cpu_index);
391     }
392 }
393 
394 static int can_sync_regs(CPUState *cs, int regs)
395 {
396     return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
397 }
398 
399 int kvm_arch_put_registers(CPUState *cs, int level)
400 {
401     S390CPU *cpu = S390_CPU(cs);
402     CPUS390XState *env = &cpu->env;
403     struct kvm_sregs sregs;
404     struct kvm_regs regs;
405     struct kvm_fpu fpu = {};
406     int r;
407     int i;
408 
409     /* always save the PSW  and the GPRS*/
410     cs->kvm_run->psw_addr = env->psw.addr;
411     cs->kvm_run->psw_mask = env->psw.mask;
412 
413     if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
414         for (i = 0; i < 16; i++) {
415             cs->kvm_run->s.regs.gprs[i] = env->regs[i];
416             cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
417         }
418     } else {
419         for (i = 0; i < 16; i++) {
420             regs.gprs[i] = env->regs[i];
421         }
422         r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
423         if (r < 0) {
424             return r;
425         }
426     }
427 
428     if (can_sync_regs(cs, KVM_SYNC_VRS)) {
429         for (i = 0; i < 32; i++) {
430             cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0].ll;
431             cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1].ll;
432         }
433         cs->kvm_run->s.regs.fpc = env->fpc;
434         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS;
435     } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
436         for (i = 0; i < 16; i++) {
437             cs->kvm_run->s.regs.fprs[i] = get_freg(env, i)->ll;
438         }
439         cs->kvm_run->s.regs.fpc = env->fpc;
440         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS;
441     } else {
442         /* Floating point */
443         for (i = 0; i < 16; i++) {
444             fpu.fprs[i] = get_freg(env, i)->ll;
445         }
446         fpu.fpc = env->fpc;
447 
448         r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
449         if (r < 0) {
450             return r;
451         }
452     }
453 
454     /* Do we need to save more than that? */
455     if (level == KVM_PUT_RUNTIME_STATE) {
456         return 0;
457     }
458 
459     if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
460         cs->kvm_run->s.regs.cputm = env->cputm;
461         cs->kvm_run->s.regs.ckc = env->ckc;
462         cs->kvm_run->s.regs.todpr = env->todpr;
463         cs->kvm_run->s.regs.gbea = env->gbea;
464         cs->kvm_run->s.regs.pp = env->pp;
465         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
466     } else {
467         /*
468          * These ONE_REGS are not protected by a capability. As they are only
469          * necessary for migration we just trace a possible error, but don't
470          * return with an error return code.
471          */
472         kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
473         kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
474         kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
475         kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
476         kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
477     }
478 
479     if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
480         memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64);
481         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB;
482     }
483 
484     /* pfault parameters */
485     if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
486         cs->kvm_run->s.regs.pft = env->pfault_token;
487         cs->kvm_run->s.regs.pfs = env->pfault_select;
488         cs->kvm_run->s.regs.pfc = env->pfault_compare;
489         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
490     } else if (cap_async_pf) {
491         r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
492         if (r < 0) {
493             return r;
494         }
495         r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
496         if (r < 0) {
497             return r;
498         }
499         r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
500         if (r < 0) {
501             return r;
502         }
503     }
504 
505     /* access registers and control registers*/
506     if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
507         for (i = 0; i < 16; i++) {
508             cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
509             cs->kvm_run->s.regs.crs[i] = env->cregs[i];
510         }
511         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
512         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
513     } else {
514         for (i = 0; i < 16; i++) {
515             sregs.acrs[i] = env->aregs[i];
516             sregs.crs[i] = env->cregs[i];
517         }
518         r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
519         if (r < 0) {
520             return r;
521         }
522     }
523 
524     if (can_sync_regs(cs, KVM_SYNC_GSCB)) {
525         memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32);
526         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB;
527     }
528 
529     if (can_sync_regs(cs, KVM_SYNC_BPBC)) {
530         cs->kvm_run->s.regs.bpbc = env->bpbc;
531         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC;
532     }
533 
534     if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) {
535         cs->kvm_run->s.regs.etoken = env->etoken;
536         cs->kvm_run->s.regs.etoken_extension  = env->etoken_extension;
537         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN;
538     }
539 
540     /* Finally the prefix */
541     if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
542         cs->kvm_run->s.regs.prefix = env->psa;
543         cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
544     } else {
545         /* prefix is only supported via sync regs */
546     }
547     return 0;
548 }
549 
550 int kvm_arch_get_registers(CPUState *cs)
551 {
552     S390CPU *cpu = S390_CPU(cs);
553     CPUS390XState *env = &cpu->env;
554     struct kvm_sregs sregs;
555     struct kvm_regs regs;
556     struct kvm_fpu fpu;
557     int i, r;
558 
559     /* get the PSW */
560     env->psw.addr = cs->kvm_run->psw_addr;
561     env->psw.mask = cs->kvm_run->psw_mask;
562 
563     /* the GPRS */
564     if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
565         for (i = 0; i < 16; i++) {
566             env->regs[i] = cs->kvm_run->s.regs.gprs[i];
567         }
568     } else {
569         r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
570         if (r < 0) {
571             return r;
572         }
573          for (i = 0; i < 16; i++) {
574             env->regs[i] = regs.gprs[i];
575         }
576     }
577 
578     /* The ACRS and CRS */
579     if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
580         for (i = 0; i < 16; i++) {
581             env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
582             env->cregs[i] = cs->kvm_run->s.regs.crs[i];
583         }
584     } else {
585         r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
586         if (r < 0) {
587             return r;
588         }
589          for (i = 0; i < 16; i++) {
590             env->aregs[i] = sregs.acrs[i];
591             env->cregs[i] = sregs.crs[i];
592         }
593     }
594 
595     /* Floating point and vector registers */
596     if (can_sync_regs(cs, KVM_SYNC_VRS)) {
597         for (i = 0; i < 32; i++) {
598             env->vregs[i][0].ll = cs->kvm_run->s.regs.vrs[i][0];
599             env->vregs[i][1].ll = cs->kvm_run->s.regs.vrs[i][1];
600         }
601         env->fpc = cs->kvm_run->s.regs.fpc;
602     } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
603         for (i = 0; i < 16; i++) {
604             get_freg(env, i)->ll = cs->kvm_run->s.regs.fprs[i];
605         }
606         env->fpc = cs->kvm_run->s.regs.fpc;
607     } else {
608         r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
609         if (r < 0) {
610             return r;
611         }
612         for (i = 0; i < 16; i++) {
613             get_freg(env, i)->ll = fpu.fprs[i];
614         }
615         env->fpc = fpu.fpc;
616     }
617 
618     /* The prefix */
619     if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
620         env->psa = cs->kvm_run->s.regs.prefix;
621     }
622 
623     if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
624         env->cputm = cs->kvm_run->s.regs.cputm;
625         env->ckc = cs->kvm_run->s.regs.ckc;
626         env->todpr = cs->kvm_run->s.regs.todpr;
627         env->gbea = cs->kvm_run->s.regs.gbea;
628         env->pp = cs->kvm_run->s.regs.pp;
629     } else {
630         /*
631          * These ONE_REGS are not protected by a capability. As they are only
632          * necessary for migration we just trace a possible error, but don't
633          * return with an error return code.
634          */
635         kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
636         kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
637         kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
638         kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
639         kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
640     }
641 
642     if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
643         memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64);
644     }
645 
646     if (can_sync_regs(cs, KVM_SYNC_GSCB)) {
647         memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32);
648     }
649 
650     if (can_sync_regs(cs, KVM_SYNC_BPBC)) {
651         env->bpbc = cs->kvm_run->s.regs.bpbc;
652     }
653 
654     if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) {
655         env->etoken = cs->kvm_run->s.regs.etoken;
656         env->etoken_extension = cs->kvm_run->s.regs.etoken_extension;
657     }
658 
659     /* pfault parameters */
660     if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
661         env->pfault_token = cs->kvm_run->s.regs.pft;
662         env->pfault_select = cs->kvm_run->s.regs.pfs;
663         env->pfault_compare = cs->kvm_run->s.regs.pfc;
664     } else if (cap_async_pf) {
665         r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
666         if (r < 0) {
667             return r;
668         }
669         r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
670         if (r < 0) {
671             return r;
672         }
673         r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
674         if (r < 0) {
675             return r;
676         }
677     }
678 
679     return 0;
680 }
681 
682 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
683 {
684     int r;
685     struct kvm_device_attr attr = {
686         .group = KVM_S390_VM_TOD,
687         .attr = KVM_S390_VM_TOD_LOW,
688         .addr = (uint64_t)tod_low,
689     };
690 
691     r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
692     if (r) {
693         return r;
694     }
695 
696     attr.attr = KVM_S390_VM_TOD_HIGH;
697     attr.addr = (uint64_t)tod_high;
698     return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
699 }
700 
701 int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low)
702 {
703     int r;
704     struct kvm_s390_vm_tod_clock gtod;
705     struct kvm_device_attr attr = {
706         .group = KVM_S390_VM_TOD,
707         .attr = KVM_S390_VM_TOD_EXT,
708         .addr = (uint64_t)&gtod,
709     };
710 
711     r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
712     *tod_high = gtod.epoch_idx;
713     *tod_low  = gtod.tod;
714 
715     return r;
716 }
717 
718 int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low)
719 {
720     int r;
721     struct kvm_device_attr attr = {
722         .group = KVM_S390_VM_TOD,
723         .attr = KVM_S390_VM_TOD_LOW,
724         .addr = (uint64_t)&tod_low,
725     };
726 
727     r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
728     if (r) {
729         return r;
730     }
731 
732     attr.attr = KVM_S390_VM_TOD_HIGH;
733     attr.addr = (uint64_t)&tod_high;
734     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
735 }
736 
737 int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low)
738 {
739     struct kvm_s390_vm_tod_clock gtod = {
740         .epoch_idx = tod_high,
741         .tod  = tod_low,
742     };
743     struct kvm_device_attr attr = {
744         .group = KVM_S390_VM_TOD,
745         .attr = KVM_S390_VM_TOD_EXT,
746         .addr = (uint64_t)&gtod,
747     };
748 
749     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
750 }
751 
752 /**
753  * kvm_s390_mem_op:
754  * @addr:      the logical start address in guest memory
755  * @ar:        the access register number
756  * @hostbuf:   buffer in host memory. NULL = do only checks w/o copying
757  * @len:       length that should be transferred
758  * @is_write:  true = write, false = read
759  * Returns:    0 on success, non-zero if an exception or error occurred
760  *
761  * Use KVM ioctl to read/write from/to guest memory. An access exception
762  * is injected into the vCPU in case of translation errors.
763  */
764 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
765                     int len, bool is_write)
766 {
767     struct kvm_s390_mem_op mem_op = {
768         .gaddr = addr,
769         .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION,
770         .size = len,
771         .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE
772                        : KVM_S390_MEMOP_LOGICAL_READ,
773         .buf = (uint64_t)hostbuf,
774         .ar = ar,
775     };
776     int ret;
777 
778     if (!cap_mem_op) {
779         return -ENOSYS;
780     }
781     if (!hostbuf) {
782         mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
783     }
784 
785     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
786     if (ret < 0) {
787         error_printf("KVM_S390_MEM_OP failed: %s\n", strerror(-ret));
788     }
789     return ret;
790 }
791 
792 /*
793  * Legacy layout for s390:
794  * Older S390 KVM requires the topmost vma of the RAM to be
795  * smaller than an system defined value, which is at least 256GB.
796  * Larger systems have larger values. We put the guest between
797  * the end of data segment (system break) and this value. We
798  * use 32GB as a base to have enough room for the system break
799  * to grow. We also have to use MAP parameters that avoid
800  * read-only mapping of guest pages.
801  */
802 static void *legacy_s390_alloc(size_t size, uint64_t *align, bool shared)
803 {
804     static void *mem;
805 
806     if (mem) {
807         /* we only support one allocation, which is enough for initial ram */
808         return NULL;
809     }
810 
811     mem = mmap((void *) 0x800000000ULL, size,
812                PROT_EXEC|PROT_READ|PROT_WRITE,
813                MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
814     if (mem == MAP_FAILED) {
815         mem = NULL;
816     }
817     if (mem && align) {
818         *align = QEMU_VMALLOC_ALIGN;
819     }
820     return mem;
821 }
822 
823 static uint8_t const *sw_bp_inst;
824 static uint8_t sw_bp_ilen;
825 
826 static void determine_sw_breakpoint_instr(void)
827 {
828         /* DIAG 501 is used for sw breakpoints with old kernels */
829         static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
830         /* Instruction 0x0000 is used for sw breakpoints with recent kernels */
831         static const uint8_t instr_0x0000[] = {0x00, 0x00};
832 
833         if (sw_bp_inst) {
834             return;
835         }
836         if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) {
837             sw_bp_inst = diag_501;
838             sw_bp_ilen = sizeof(diag_501);
839             DPRINTF("KVM: will use 4-byte sw breakpoints.\n");
840         } else {
841             sw_bp_inst = instr_0x0000;
842             sw_bp_ilen = sizeof(instr_0x0000);
843             DPRINTF("KVM: will use 2-byte sw breakpoints.\n");
844         }
845 }
846 
847 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
848 {
849     determine_sw_breakpoint_instr();
850 
851     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
852                             sw_bp_ilen, 0) ||
853         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) {
854         return -EINVAL;
855     }
856     return 0;
857 }
858 
859 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
860 {
861     uint8_t t[MAX_ILEN];
862 
863     if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) {
864         return -EINVAL;
865     } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) {
866         return -EINVAL;
867     } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
868                                    sw_bp_ilen, 1)) {
869         return -EINVAL;
870     }
871 
872     return 0;
873 }
874 
875 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
876                                                     int len, int type)
877 {
878     int n;
879 
880     for (n = 0; n < nb_hw_breakpoints; n++) {
881         if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
882             (hw_breakpoints[n].len == len || len == -1)) {
883             return &hw_breakpoints[n];
884         }
885     }
886 
887     return NULL;
888 }
889 
890 static int insert_hw_breakpoint(target_ulong addr, int len, int type)
891 {
892     int size;
893 
894     if (find_hw_breakpoint(addr, len, type)) {
895         return -EEXIST;
896     }
897 
898     size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
899 
900     if (!hw_breakpoints) {
901         nb_hw_breakpoints = 0;
902         hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
903     } else {
904         hw_breakpoints =
905             (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
906     }
907 
908     if (!hw_breakpoints) {
909         nb_hw_breakpoints = 0;
910         return -ENOMEM;
911     }
912 
913     hw_breakpoints[nb_hw_breakpoints].addr = addr;
914     hw_breakpoints[nb_hw_breakpoints].len = len;
915     hw_breakpoints[nb_hw_breakpoints].type = type;
916 
917     nb_hw_breakpoints++;
918 
919     return 0;
920 }
921 
922 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
923                                   target_ulong len, int type)
924 {
925     switch (type) {
926     case GDB_BREAKPOINT_HW:
927         type = KVM_HW_BP;
928         break;
929     case GDB_WATCHPOINT_WRITE:
930         if (len < 1) {
931             return -EINVAL;
932         }
933         type = KVM_HW_WP_WRITE;
934         break;
935     default:
936         return -ENOSYS;
937     }
938     return insert_hw_breakpoint(addr, len, type);
939 }
940 
941 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
942                                   target_ulong len, int type)
943 {
944     int size;
945     struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
946 
947     if (bp == NULL) {
948         return -ENOENT;
949     }
950 
951     nb_hw_breakpoints--;
952     if (nb_hw_breakpoints > 0) {
953         /*
954          * In order to trim the array, move the last element to the position to
955          * be removed - if necessary.
956          */
957         if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
958             *bp = hw_breakpoints[nb_hw_breakpoints];
959         }
960         size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
961         hw_breakpoints =
962              (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size);
963     } else {
964         g_free(hw_breakpoints);
965         hw_breakpoints = NULL;
966     }
967 
968     return 0;
969 }
970 
971 void kvm_arch_remove_all_hw_breakpoints(void)
972 {
973     nb_hw_breakpoints = 0;
974     g_free(hw_breakpoints);
975     hw_breakpoints = NULL;
976 }
977 
978 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
979 {
980     int i;
981 
982     if (nb_hw_breakpoints > 0) {
983         dbg->arch.nr_hw_bp = nb_hw_breakpoints;
984         dbg->arch.hw_bp = hw_breakpoints;
985 
986         for (i = 0; i < nb_hw_breakpoints; ++i) {
987             hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
988                                                        hw_breakpoints[i].addr);
989         }
990         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
991     } else {
992         dbg->arch.nr_hw_bp = 0;
993         dbg->arch.hw_bp = NULL;
994     }
995 }
996 
997 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
998 {
999 }
1000 
1001 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1002 {
1003     return MEMTXATTRS_UNSPECIFIED;
1004 }
1005 
1006 int kvm_arch_process_async_events(CPUState *cs)
1007 {
1008     return cs->halted;
1009 }
1010 
1011 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
1012                                      struct kvm_s390_interrupt *interrupt)
1013 {
1014     int r = 0;
1015 
1016     interrupt->type = irq->type;
1017     switch (irq->type) {
1018     case KVM_S390_INT_VIRTIO:
1019         interrupt->parm = irq->u.ext.ext_params;
1020         /* fall through */
1021     case KVM_S390_INT_PFAULT_INIT:
1022     case KVM_S390_INT_PFAULT_DONE:
1023         interrupt->parm64 = irq->u.ext.ext_params2;
1024         break;
1025     case KVM_S390_PROGRAM_INT:
1026         interrupt->parm = irq->u.pgm.code;
1027         break;
1028     case KVM_S390_SIGP_SET_PREFIX:
1029         interrupt->parm = irq->u.prefix.address;
1030         break;
1031     case KVM_S390_INT_SERVICE:
1032         interrupt->parm = irq->u.ext.ext_params;
1033         break;
1034     case KVM_S390_MCHK:
1035         interrupt->parm = irq->u.mchk.cr14;
1036         interrupt->parm64 = irq->u.mchk.mcic;
1037         break;
1038     case KVM_S390_INT_EXTERNAL_CALL:
1039         interrupt->parm = irq->u.extcall.code;
1040         break;
1041     case KVM_S390_INT_EMERGENCY:
1042         interrupt->parm = irq->u.emerg.code;
1043         break;
1044     case KVM_S390_SIGP_STOP:
1045     case KVM_S390_RESTART:
1046         break; /* These types have no parameters */
1047     case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1048         interrupt->parm = irq->u.io.subchannel_id << 16;
1049         interrupt->parm |= irq->u.io.subchannel_nr;
1050         interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
1051         interrupt->parm64 |= irq->u.io.io_int_word;
1052         break;
1053     default:
1054         r = -EINVAL;
1055         break;
1056     }
1057     return r;
1058 }
1059 
1060 static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq)
1061 {
1062     struct kvm_s390_interrupt kvmint = {};
1063     int r;
1064 
1065     r = s390_kvm_irq_to_interrupt(irq, &kvmint);
1066     if (r < 0) {
1067         fprintf(stderr, "%s called with bogus interrupt\n", __func__);
1068         exit(1);
1069     }
1070 
1071     r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
1072     if (r < 0) {
1073         fprintf(stderr, "KVM failed to inject interrupt\n");
1074         exit(1);
1075     }
1076 }
1077 
1078 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
1079 {
1080     CPUState *cs = CPU(cpu);
1081     int r;
1082 
1083     if (cap_s390_irq) {
1084         r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq);
1085         if (!r) {
1086             return;
1087         }
1088         error_report("KVM failed to inject interrupt %llx", irq->type);
1089         exit(1);
1090     }
1091 
1092     inject_vcpu_irq_legacy(cs, irq);
1093 }
1094 
1095 void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq)
1096 {
1097     struct kvm_s390_interrupt kvmint = {};
1098     int r;
1099 
1100     r = s390_kvm_irq_to_interrupt(irq, &kvmint);
1101     if (r < 0) {
1102         fprintf(stderr, "%s called with bogus interrupt\n", __func__);
1103         exit(1);
1104     }
1105 
1106     r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
1107     if (r < 0) {
1108         fprintf(stderr, "KVM failed to inject interrupt\n");
1109         exit(1);
1110     }
1111 }
1112 
1113 void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code)
1114 {
1115     struct kvm_s390_irq irq = {
1116         .type = KVM_S390_PROGRAM_INT,
1117         .u.pgm.code = code,
1118     };
1119     qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
1120                   cpu->env.psw.addr);
1121     kvm_s390_vcpu_interrupt(cpu, &irq);
1122 }
1123 
1124 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
1125 {
1126     struct kvm_s390_irq irq = {
1127         .type = KVM_S390_PROGRAM_INT,
1128         .u.pgm.code = code,
1129         .u.pgm.trans_exc_code = te_code,
1130         .u.pgm.exc_access_id = te_code & 3,
1131     };
1132 
1133     kvm_s390_vcpu_interrupt(cpu, &irq);
1134 }
1135 
1136 static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
1137                                  uint16_t ipbh0)
1138 {
1139     CPUS390XState *env = &cpu->env;
1140     uint64_t sccb;
1141     uint32_t code;
1142     int r = 0;
1143 
1144     sccb = env->regs[ipbh0 & 0xf];
1145     code = env->regs[(ipbh0 & 0xf0) >> 4];
1146 
1147     r = sclp_service_call(env, sccb, code);
1148     if (r < 0) {
1149         kvm_s390_program_interrupt(cpu, -r);
1150     } else {
1151         setcc(cpu, r);
1152     }
1153 
1154     return 0;
1155 }
1156 
1157 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1158 {
1159     CPUS390XState *env = &cpu->env;
1160     int rc = 0;
1161     uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
1162 
1163     switch (ipa1) {
1164     case PRIV_B2_XSCH:
1165         ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED);
1166         break;
1167     case PRIV_B2_CSCH:
1168         ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED);
1169         break;
1170     case PRIV_B2_HSCH:
1171         ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED);
1172         break;
1173     case PRIV_B2_MSCH:
1174         ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
1175         break;
1176     case PRIV_B2_SSCH:
1177         ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
1178         break;
1179     case PRIV_B2_STCRW:
1180         ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED);
1181         break;
1182     case PRIV_B2_STSCH:
1183         ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
1184         break;
1185     case PRIV_B2_TSCH:
1186         /* We should only get tsch via KVM_EXIT_S390_TSCH. */
1187         fprintf(stderr, "Spurious tsch intercept\n");
1188         break;
1189     case PRIV_B2_CHSC:
1190         ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED);
1191         break;
1192     case PRIV_B2_TPI:
1193         /* This should have been handled by kvm already. */
1194         fprintf(stderr, "Spurious tpi intercept\n");
1195         break;
1196     case PRIV_B2_SCHM:
1197         ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
1198                            run->s390_sieic.ipb, RA_IGNORED);
1199         break;
1200     case PRIV_B2_RSCH:
1201         ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED);
1202         break;
1203     case PRIV_B2_RCHP:
1204         ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED);
1205         break;
1206     case PRIV_B2_STCPS:
1207         /* We do not provide this instruction, it is suppressed. */
1208         break;
1209     case PRIV_B2_SAL:
1210         ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED);
1211         break;
1212     case PRIV_B2_SIGA:
1213         /* Not provided, set CC = 3 for subchannel not operational */
1214         setcc(cpu, 3);
1215         break;
1216     case PRIV_B2_SCLP_CALL:
1217         rc = kvm_sclp_service_call(cpu, run, ipbh0);
1218         break;
1219     default:
1220         rc = -1;
1221         DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
1222         break;
1223     }
1224 
1225     return rc;
1226 }
1227 
1228 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run,
1229                                   uint8_t *ar)
1230 {
1231     CPUS390XState *env = &cpu->env;
1232     uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
1233     uint32_t base2 = run->s390_sieic.ipb >> 28;
1234     uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1235                      ((run->s390_sieic.ipb & 0xff00) << 4);
1236 
1237     if (disp2 & 0x80000) {
1238         disp2 += 0xfff00000;
1239     }
1240     if (ar) {
1241         *ar = base2;
1242     }
1243 
1244     return (base2 ? env->regs[base2] : 0) +
1245            (x2 ? env->regs[x2] : 0) + (long)(int)disp2;
1246 }
1247 
1248 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run,
1249                                   uint8_t *ar)
1250 {
1251     CPUS390XState *env = &cpu->env;
1252     uint32_t base2 = run->s390_sieic.ipb >> 28;
1253     uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1254                      ((run->s390_sieic.ipb & 0xff00) << 4);
1255 
1256     if (disp2 & 0x80000) {
1257         disp2 += 0xfff00000;
1258     }
1259     if (ar) {
1260         *ar = base2;
1261     }
1262 
1263     return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
1264 }
1265 
1266 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
1267 {
1268     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1269 
1270     if (s390_has_feat(S390_FEAT_ZPCI)) {
1271         return clp_service_call(cpu, r2, RA_IGNORED);
1272     } else {
1273         return -1;
1274     }
1275 }
1276 
1277 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
1278 {
1279     uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1280     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1281 
1282     if (s390_has_feat(S390_FEAT_ZPCI)) {
1283         return pcilg_service_call(cpu, r1, r2, RA_IGNORED);
1284     } else {
1285         return -1;
1286     }
1287 }
1288 
1289 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
1290 {
1291     uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1292     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1293 
1294     if (s390_has_feat(S390_FEAT_ZPCI)) {
1295         return pcistg_service_call(cpu, r1, r2, RA_IGNORED);
1296     } else {
1297         return -1;
1298     }
1299 }
1300 
1301 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1302 {
1303     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1304     uint64_t fiba;
1305     uint8_t ar;
1306 
1307     if (s390_has_feat(S390_FEAT_ZPCI)) {
1308         fiba = get_base_disp_rxy(cpu, run, &ar);
1309 
1310         return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
1311     } else {
1312         return -1;
1313     }
1314 }
1315 
1316 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
1317 {
1318     CPUS390XState *env = &cpu->env;
1319     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1320     uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1321     uint8_t isc;
1322     uint16_t mode;
1323     int r;
1324 
1325     mode = env->regs[r1] & 0xffff;
1326     isc = (env->regs[r3] >> 27) & 0x7;
1327     r = css_do_sic(env, isc, mode);
1328     if (r) {
1329         kvm_s390_program_interrupt(cpu, -r);
1330     }
1331 
1332     return 0;
1333 }
1334 
1335 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
1336 {
1337     uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1338     uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1339 
1340     if (s390_has_feat(S390_FEAT_ZPCI)) {
1341         return rpcit_service_call(cpu, r1, r2, RA_IGNORED);
1342     } else {
1343         return -1;
1344     }
1345 }
1346 
1347 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
1348 {
1349     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1350     uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1351     uint64_t gaddr;
1352     uint8_t ar;
1353 
1354     if (s390_has_feat(S390_FEAT_ZPCI)) {
1355         gaddr = get_base_disp_rsy(cpu, run, &ar);
1356 
1357         return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED);
1358     } else {
1359         return -1;
1360     }
1361 }
1362 
1363 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1364 {
1365     uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1366     uint64_t fiba;
1367     uint8_t ar;
1368 
1369     if (s390_has_feat(S390_FEAT_ZPCI)) {
1370         fiba = get_base_disp_rxy(cpu, run, &ar);
1371 
1372         return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
1373     } else {
1374         return -1;
1375     }
1376 }
1377 
1378 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1379 {
1380     int r = 0;
1381 
1382     switch (ipa1) {
1383     case PRIV_B9_CLP:
1384         r = kvm_clp_service_call(cpu, run);
1385         break;
1386     case PRIV_B9_PCISTG:
1387         r = kvm_pcistg_service_call(cpu, run);
1388         break;
1389     case PRIV_B9_PCILG:
1390         r = kvm_pcilg_service_call(cpu, run);
1391         break;
1392     case PRIV_B9_RPCIT:
1393         r = kvm_rpcit_service_call(cpu, run);
1394         break;
1395     case PRIV_B9_EQBS:
1396         /* just inject exception */
1397         r = -1;
1398         break;
1399     default:
1400         r = -1;
1401         DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
1402         break;
1403     }
1404 
1405     return r;
1406 }
1407 
1408 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1409 {
1410     int r = 0;
1411 
1412     switch (ipbl) {
1413     case PRIV_EB_PCISTB:
1414         r = kvm_pcistb_service_call(cpu, run);
1415         break;
1416     case PRIV_EB_SIC:
1417         r = kvm_sic_service_call(cpu, run);
1418         break;
1419     case PRIV_EB_SQBS:
1420         /* just inject exception */
1421         r = -1;
1422         break;
1423     default:
1424         r = -1;
1425         DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl);
1426         break;
1427     }
1428 
1429     return r;
1430 }
1431 
1432 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1433 {
1434     int r = 0;
1435 
1436     switch (ipbl) {
1437     case PRIV_E3_MPCIFC:
1438         r = kvm_mpcifc_service_call(cpu, run);
1439         break;
1440     case PRIV_E3_STPCIFC:
1441         r = kvm_stpcifc_service_call(cpu, run);
1442         break;
1443     default:
1444         r = -1;
1445         DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl);
1446         break;
1447     }
1448 
1449     return r;
1450 }
1451 
1452 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
1453 {
1454     CPUS390XState *env = &cpu->env;
1455     int ret;
1456 
1457     ret = s390_virtio_hypercall(env);
1458     if (ret == -EINVAL) {
1459         kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
1460         return 0;
1461     }
1462 
1463     return ret;
1464 }
1465 
1466 static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run)
1467 {
1468     uint64_t r1, r3;
1469     int rc;
1470 
1471     r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1472     r3 = run->s390_sieic.ipa & 0x000f;
1473     rc = handle_diag_288(&cpu->env, r1, r3);
1474     if (rc) {
1475         kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
1476     }
1477 }
1478 
1479 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
1480 {
1481     uint64_t r1, r3;
1482 
1483     r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1484     r3 = run->s390_sieic.ipa & 0x000f;
1485     handle_diag_308(&cpu->env, r1, r3, RA_IGNORED);
1486 }
1487 
1488 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
1489 {
1490     CPUS390XState *env = &cpu->env;
1491     unsigned long pc;
1492 
1493     pc = env->psw.addr - sw_bp_ilen;
1494     if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
1495         env->psw.addr = pc;
1496         return EXCP_DEBUG;
1497     }
1498 
1499     return -ENOENT;
1500 }
1501 
1502 #define DIAG_KVM_CODE_MASK 0x000000000000ffff
1503 
1504 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
1505 {
1506     int r = 0;
1507     uint16_t func_code;
1508 
1509     /*
1510      * For any diagnose call we support, bits 48-63 of the resulting
1511      * address specify the function code; the remainder is ignored.
1512      */
1513     func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK;
1514     switch (func_code) {
1515     case DIAG_TIMEREVENT:
1516         kvm_handle_diag_288(cpu, run);
1517         break;
1518     case DIAG_IPL:
1519         kvm_handle_diag_308(cpu, run);
1520         break;
1521     case DIAG_KVM_HYPERCALL:
1522         r = handle_hypercall(cpu, run);
1523         break;
1524     case DIAG_KVM_BREAKPOINT:
1525         r = handle_sw_breakpoint(cpu, run);
1526         break;
1527     default:
1528         DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
1529         kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
1530         break;
1531     }
1532 
1533     return r;
1534 }
1535 
1536 static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb)
1537 {
1538     CPUS390XState *env = &cpu->env;
1539     const uint8_t r1 = ipa1 >> 4;
1540     const uint8_t r3 = ipa1 & 0x0f;
1541     int ret;
1542     uint8_t order;
1543 
1544     /* get order code */
1545     order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK;
1546 
1547     ret = handle_sigp(env, order, r1, r3);
1548     setcc(cpu, ret);
1549     return 0;
1550 }
1551 
1552 static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
1553 {
1554     unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
1555     uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
1556     int r = -1;
1557 
1558     DPRINTF("handle_instruction 0x%x 0x%x\n",
1559             run->s390_sieic.ipa, run->s390_sieic.ipb);
1560     switch (ipa0) {
1561     case IPA0_B2:
1562         r = handle_b2(cpu, run, ipa1);
1563         break;
1564     case IPA0_B9:
1565         r = handle_b9(cpu, run, ipa1);
1566         break;
1567     case IPA0_EB:
1568         r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
1569         break;
1570     case IPA0_E3:
1571         r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
1572         break;
1573     case IPA0_DIAG:
1574         r = handle_diag(cpu, run, run->s390_sieic.ipb);
1575         break;
1576     case IPA0_SIGP:
1577         r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb);
1578         break;
1579     }
1580 
1581     if (r < 0) {
1582         r = 0;
1583         kvm_s390_program_interrupt(cpu, PGM_OPERATION);
1584     }
1585 
1586     return r;
1587 }
1588 
1589 static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason,
1590                                    int pswoffset)
1591 {
1592     CPUState *cs = CPU(cpu);
1593 
1594     s390_cpu_halt(cpu);
1595     cpu->env.crash_reason = reason;
1596     qemu_system_guest_panicked(cpu_get_crash_info(cs));
1597 }
1598 
1599 /* try to detect pgm check loops */
1600 static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run)
1601 {
1602     CPUState *cs = CPU(cpu);
1603     PSW oldpsw, newpsw;
1604 
1605     newpsw.mask = ldq_phys(cs->as, cpu->env.psa +
1606                            offsetof(LowCore, program_new_psw));
1607     newpsw.addr = ldq_phys(cs->as, cpu->env.psa +
1608                            offsetof(LowCore, program_new_psw) + 8);
1609     oldpsw.mask  = run->psw_mask;
1610     oldpsw.addr  = run->psw_addr;
1611     /*
1612      * Avoid endless loops of operation exceptions, if the pgm new
1613      * PSW will cause a new operation exception.
1614      * The heuristic checks if the pgm new psw is within 6 bytes before
1615      * the faulting psw address (with same DAT, AS settings) and the
1616      * new psw is not a wait psw and the fault was not triggered by
1617      * problem state. In that case go into crashed state.
1618      */
1619 
1620     if (oldpsw.addr - newpsw.addr <= 6 &&
1621         !(newpsw.mask & PSW_MASK_WAIT) &&
1622         !(oldpsw.mask & PSW_MASK_PSTATE) &&
1623         (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
1624         (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) {
1625         unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP,
1626                                offsetof(LowCore, program_new_psw));
1627         return EXCP_HALTED;
1628     }
1629     return 0;
1630 }
1631 
1632 static int handle_intercept(S390CPU *cpu)
1633 {
1634     CPUState *cs = CPU(cpu);
1635     struct kvm_run *run = cs->kvm_run;
1636     int icpt_code = run->s390_sieic.icptcode;
1637     int r = 0;
1638 
1639     DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code,
1640             (long)cs->kvm_run->psw_addr);
1641     switch (icpt_code) {
1642         case ICPT_INSTRUCTION:
1643             r = handle_instruction(cpu, run);
1644             break;
1645         case ICPT_PROGRAM:
1646             unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP,
1647                                    offsetof(LowCore, program_new_psw));
1648             r = EXCP_HALTED;
1649             break;
1650         case ICPT_EXT_INT:
1651             unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP,
1652                                    offsetof(LowCore, external_new_psw));
1653             r = EXCP_HALTED;
1654             break;
1655         case ICPT_WAITPSW:
1656             /* disabled wait, since enabled wait is handled in kernel */
1657             s390_handle_wait(cpu);
1658             r = EXCP_HALTED;
1659             break;
1660         case ICPT_CPU_STOP:
1661             do_stop_interrupt(&cpu->env);
1662             r = EXCP_HALTED;
1663             break;
1664         case ICPT_OPEREXC:
1665             /* check for break points */
1666             r = handle_sw_breakpoint(cpu, run);
1667             if (r == -ENOENT) {
1668                 /* Then check for potential pgm check loops */
1669                 r = handle_oper_loop(cpu, run);
1670                 if (r == 0) {
1671                     kvm_s390_program_interrupt(cpu, PGM_OPERATION);
1672                 }
1673             }
1674             break;
1675         case ICPT_SOFT_INTERCEPT:
1676             fprintf(stderr, "KVM unimplemented icpt SOFT\n");
1677             exit(1);
1678             break;
1679         case ICPT_IO:
1680             fprintf(stderr, "KVM unimplemented icpt IO\n");
1681             exit(1);
1682             break;
1683         default:
1684             fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
1685             exit(1);
1686             break;
1687     }
1688 
1689     return r;
1690 }
1691 
1692 static int handle_tsch(S390CPU *cpu)
1693 {
1694     CPUState *cs = CPU(cpu);
1695     struct kvm_run *run = cs->kvm_run;
1696     int ret;
1697 
1698     ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb,
1699                              RA_IGNORED);
1700     if (ret < 0) {
1701         /*
1702          * Failure.
1703          * If an I/O interrupt had been dequeued, we have to reinject it.
1704          */
1705         if (run->s390_tsch.dequeued) {
1706             s390_io_interrupt(run->s390_tsch.subchannel_id,
1707                               run->s390_tsch.subchannel_nr,
1708                               run->s390_tsch.io_int_parm,
1709                               run->s390_tsch.io_int_word);
1710         }
1711         ret = 0;
1712     }
1713     return ret;
1714 }
1715 
1716 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
1717 {
1718     SysIB_322 sysib;
1719     int del;
1720 
1721     if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
1722         return;
1723     }
1724     /* Shift the stack of Extended Names to prepare for our own data */
1725     memmove(&sysib.ext_names[1], &sysib.ext_names[0],
1726             sizeof(sysib.ext_names[0]) * (sysib.count - 1));
1727     /* First virt level, that doesn't provide Ext Names delimits stack. It is
1728      * assumed it's not capable of managing Extended Names for lower levels.
1729      */
1730     for (del = 1; del < sysib.count; del++) {
1731         if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) {
1732             break;
1733         }
1734     }
1735     if (del < sysib.count) {
1736         memset(sysib.ext_names[del], 0,
1737                sizeof(sysib.ext_names[0]) * (sysib.count - del));
1738     }
1739     /* Insert short machine name in EBCDIC, padded with blanks */
1740     if (qemu_name) {
1741         memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name));
1742         ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name),
1743                                                     strlen(qemu_name)));
1744     }
1745     sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */
1746     memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0]));
1747     /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's
1748      * considered by s390 as not capable of providing any Extended Name.
1749      * Therefore if no name was specified on qemu invocation, we go with the
1750      * same "KVMguest" default, which KVM has filled into short name field.
1751      */
1752     if (qemu_name) {
1753         strncpy((char *)sysib.ext_names[0], qemu_name,
1754                 sizeof(sysib.ext_names[0]));
1755     } else {
1756         strcpy((char *)sysib.ext_names[0], "KVMguest");
1757     }
1758     /* Insert UUID */
1759     memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid));
1760 
1761     s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
1762 }
1763 
1764 static int handle_stsi(S390CPU *cpu)
1765 {
1766     CPUState *cs = CPU(cpu);
1767     struct kvm_run *run = cs->kvm_run;
1768 
1769     switch (run->s390_stsi.fc) {
1770     case 3:
1771         if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) {
1772             return 0;
1773         }
1774         /* Only sysib 3.2.2 needs post-handling for now. */
1775         insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar);
1776         return 0;
1777     default:
1778         return 0;
1779     }
1780 }
1781 
1782 static int kvm_arch_handle_debug_exit(S390CPU *cpu)
1783 {
1784     CPUState *cs = CPU(cpu);
1785     struct kvm_run *run = cs->kvm_run;
1786 
1787     int ret = 0;
1788     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1789 
1790     switch (arch_info->type) {
1791     case KVM_HW_WP_WRITE:
1792         if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1793             cs->watchpoint_hit = &hw_watchpoint;
1794             hw_watchpoint.vaddr = arch_info->addr;
1795             hw_watchpoint.flags = BP_MEM_WRITE;
1796             ret = EXCP_DEBUG;
1797         }
1798         break;
1799     case KVM_HW_BP:
1800         if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1801             ret = EXCP_DEBUG;
1802         }
1803         break;
1804     case KVM_SINGLESTEP:
1805         if (cs->singlestep_enabled) {
1806             ret = EXCP_DEBUG;
1807         }
1808         break;
1809     default:
1810         ret = -ENOSYS;
1811     }
1812 
1813     return ret;
1814 }
1815 
1816 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1817 {
1818     S390CPU *cpu = S390_CPU(cs);
1819     int ret = 0;
1820 
1821     qemu_mutex_lock_iothread();
1822 
1823     kvm_cpu_synchronize_state(cs);
1824 
1825     switch (run->exit_reason) {
1826         case KVM_EXIT_S390_SIEIC:
1827             ret = handle_intercept(cpu);
1828             break;
1829         case KVM_EXIT_S390_RESET:
1830             s390_ipl_reset_request(cs, S390_RESET_REIPL);
1831             break;
1832         case KVM_EXIT_S390_TSCH:
1833             ret = handle_tsch(cpu);
1834             break;
1835         case KVM_EXIT_S390_STSI:
1836             ret = handle_stsi(cpu);
1837             break;
1838         case KVM_EXIT_DEBUG:
1839             ret = kvm_arch_handle_debug_exit(cpu);
1840             break;
1841         default:
1842             fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
1843             break;
1844     }
1845     qemu_mutex_unlock_iothread();
1846 
1847     if (ret == 0) {
1848         ret = EXCP_INTERRUPT;
1849     }
1850     return ret;
1851 }
1852 
1853 bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
1854 {
1855     return true;
1856 }
1857 
1858 void kvm_s390_enable_css_support(S390CPU *cpu)
1859 {
1860     int r;
1861 
1862     /* Activate host kernel channel subsystem support. */
1863     r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
1864     assert(r == 0);
1865 }
1866 
1867 void kvm_arch_init_irq_routing(KVMState *s)
1868 {
1869     /*
1870      * Note that while irqchip capabilities generally imply that cpustates
1871      * are handled in-kernel, it is not true for s390 (yet); therefore, we
1872      * have to override the common code kvm_halt_in_kernel_allowed setting.
1873      */
1874     if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
1875         kvm_gsi_routing_allowed = true;
1876         kvm_halt_in_kernel_allowed = false;
1877     }
1878 }
1879 
1880 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
1881                                     int vq, bool assign)
1882 {
1883     struct kvm_ioeventfd kick = {
1884         .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
1885         KVM_IOEVENTFD_FLAG_DATAMATCH,
1886         .fd = event_notifier_get_fd(notifier),
1887         .datamatch = vq,
1888         .addr = sch,
1889         .len = 8,
1890     };
1891     if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
1892         return -ENOSYS;
1893     }
1894     if (!assign) {
1895         kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1896     }
1897     return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1898 }
1899 
1900 int kvm_s390_get_ri(void)
1901 {
1902     return cap_ri;
1903 }
1904 
1905 int kvm_s390_get_gs(void)
1906 {
1907     return cap_gs;
1908 }
1909 
1910 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
1911 {
1912     struct kvm_mp_state mp_state = {};
1913     int ret;
1914 
1915     /* the kvm part might not have been initialized yet */
1916     if (CPU(cpu)->kvm_state == NULL) {
1917         return 0;
1918     }
1919 
1920     switch (cpu_state) {
1921     case S390_CPU_STATE_STOPPED:
1922         mp_state.mp_state = KVM_MP_STATE_STOPPED;
1923         break;
1924     case S390_CPU_STATE_CHECK_STOP:
1925         mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
1926         break;
1927     case S390_CPU_STATE_OPERATING:
1928         mp_state.mp_state = KVM_MP_STATE_OPERATING;
1929         break;
1930     case S390_CPU_STATE_LOAD:
1931         mp_state.mp_state = KVM_MP_STATE_LOAD;
1932         break;
1933     default:
1934         error_report("Requested CPU state is not a valid S390 CPU state: %u",
1935                      cpu_state);
1936         exit(1);
1937     }
1938 
1939     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
1940     if (ret) {
1941         trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
1942                                        strerror(-ret));
1943     }
1944 
1945     return ret;
1946 }
1947 
1948 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
1949 {
1950     struct kvm_s390_irq_state irq_state = {
1951         .buf = (uint64_t) cpu->irqstate,
1952         .len = VCPU_IRQ_BUF_SIZE,
1953     };
1954     CPUState *cs = CPU(cpu);
1955     int32_t bytes;
1956 
1957     if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
1958         return;
1959     }
1960 
1961     bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state);
1962     if (bytes < 0) {
1963         cpu->irqstate_saved_size = 0;
1964         error_report("Migration of interrupt state failed");
1965         return;
1966     }
1967 
1968     cpu->irqstate_saved_size = bytes;
1969 }
1970 
1971 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
1972 {
1973     CPUState *cs = CPU(cpu);
1974     struct kvm_s390_irq_state irq_state = {
1975         .buf = (uint64_t) cpu->irqstate,
1976         .len = cpu->irqstate_saved_size,
1977     };
1978     int r;
1979 
1980     if (cpu->irqstate_saved_size == 0) {
1981         return 0;
1982     }
1983 
1984     if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
1985         return -ENOSYS;
1986     }
1987 
1988     r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state);
1989     if (r) {
1990         error_report("Setting interrupt state failed %d", r);
1991     }
1992     return r;
1993 }
1994 
1995 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1996                              uint64_t address, uint32_t data, PCIDevice *dev)
1997 {
1998     S390PCIBusDevice *pbdev;
1999     uint32_t vec = data & ZPCI_MSI_VEC_MASK;
2000 
2001     if (!dev) {
2002         DPRINTF("add_msi_route no pci device\n");
2003         return -ENODEV;
2004     }
2005 
2006     pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id);
2007     if (!pbdev) {
2008         DPRINTF("add_msi_route no zpci device\n");
2009         return -ENODEV;
2010     }
2011 
2012     route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
2013     route->flags = 0;
2014     route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
2015     route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
2016     route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
2017     route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec;
2018     route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
2019     return 0;
2020 }
2021 
2022 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
2023                                 int vector, PCIDevice *dev)
2024 {
2025     return 0;
2026 }
2027 
2028 int kvm_arch_release_virq_post(int virq)
2029 {
2030     return 0;
2031 }
2032 
2033 int kvm_arch_msi_data_to_gsi(uint32_t data)
2034 {
2035     abort();
2036 }
2037 
2038 static int query_cpu_subfunc(S390FeatBitmap features)
2039 {
2040     struct kvm_s390_vm_cpu_subfunc prop;
2041     struct kvm_device_attr attr = {
2042         .group = KVM_S390_VM_CPU_MODEL,
2043         .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC,
2044         .addr = (uint64_t) &prop,
2045     };
2046     int rc;
2047 
2048     rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2049     if (rc) {
2050         return  rc;
2051     }
2052 
2053     /*
2054      * We're going to add all subfunctions now, if the corresponding feature
2055      * is available that unlocks the query functions.
2056      */
2057     s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
2058     if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
2059         s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
2060     }
2061     if (test_bit(S390_FEAT_MSA, features)) {
2062         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
2063         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
2064         s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
2065         s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
2066         s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
2067     }
2068     if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
2069         s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
2070     }
2071     if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
2072         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
2073         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
2074         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
2075         s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
2076     }
2077     if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
2078         s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
2079     }
2080     if (test_bit(S390_FEAT_MSA_EXT_8, features)) {
2081         s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma);
2082     }
2083     return 0;
2084 }
2085 
2086 static int configure_cpu_subfunc(const S390FeatBitmap features)
2087 {
2088     struct kvm_s390_vm_cpu_subfunc prop = {};
2089     struct kvm_device_attr attr = {
2090         .group = KVM_S390_VM_CPU_MODEL,
2091         .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC,
2092         .addr = (uint64_t) &prop,
2093     };
2094 
2095     if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2096                            KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) {
2097         /* hardware support might be missing, IBC will handle most of this */
2098         return 0;
2099     }
2100 
2101     s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
2102     if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
2103         s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
2104     }
2105     if (test_bit(S390_FEAT_MSA, features)) {
2106         s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
2107         s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
2108         s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
2109         s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
2110         s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
2111     }
2112     if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
2113         s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
2114     }
2115     if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
2116         s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
2117         s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
2118         s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
2119         s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
2120     }
2121     if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
2122         s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
2123     }
2124     if (test_bit(S390_FEAT_MSA_EXT_8, features)) {
2125         s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma);
2126     }
2127     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2128 }
2129 
2130 static int kvm_to_feat[][2] = {
2131     { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP },
2132     { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 },
2133     { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO },
2134     { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF },
2135     { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE },
2136     { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS },
2137     { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB },
2138     { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI },
2139     { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS },
2140     { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY },
2141     { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA },
2142     { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI},
2143     { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF},
2144     { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS},
2145 };
2146 
2147 static int query_cpu_feat(S390FeatBitmap features)
2148 {
2149     struct kvm_s390_vm_cpu_feat prop;
2150     struct kvm_device_attr attr = {
2151         .group = KVM_S390_VM_CPU_MODEL,
2152         .attr = KVM_S390_VM_CPU_MACHINE_FEAT,
2153         .addr = (uint64_t) &prop,
2154     };
2155     int rc;
2156     int i;
2157 
2158     rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2159     if (rc) {
2160         return  rc;
2161     }
2162 
2163     for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
2164         if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) {
2165             set_bit(kvm_to_feat[i][1], features);
2166         }
2167     }
2168     return 0;
2169 }
2170 
2171 static int configure_cpu_feat(const S390FeatBitmap features)
2172 {
2173     struct kvm_s390_vm_cpu_feat prop = {};
2174     struct kvm_device_attr attr = {
2175         .group = KVM_S390_VM_CPU_MODEL,
2176         .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT,
2177         .addr = (uint64_t) &prop,
2178     };
2179     int i;
2180 
2181     for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
2182         if (test_bit(kvm_to_feat[i][1], features)) {
2183             set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat);
2184         }
2185     }
2186     return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2187 }
2188 
2189 bool kvm_s390_cpu_models_supported(void)
2190 {
2191     if (!cpu_model_allowed()) {
2192         /* compatibility machines interfere with the cpu model */
2193         return false;
2194     }
2195     return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2196                              KVM_S390_VM_CPU_MACHINE) &&
2197            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2198                              KVM_S390_VM_CPU_PROCESSOR) &&
2199            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2200                              KVM_S390_VM_CPU_MACHINE_FEAT) &&
2201            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2202                              KVM_S390_VM_CPU_PROCESSOR_FEAT) &&
2203            kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2204                              KVM_S390_VM_CPU_MACHINE_SUBFUNC);
2205 }
2206 
2207 void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
2208 {
2209     struct kvm_s390_vm_cpu_machine prop = {};
2210     struct kvm_device_attr attr = {
2211         .group = KVM_S390_VM_CPU_MODEL,
2212         .attr = KVM_S390_VM_CPU_MACHINE,
2213         .addr = (uint64_t) &prop,
2214     };
2215     uint16_t unblocked_ibc = 0, cpu_type = 0;
2216     int rc;
2217 
2218     memset(model, 0, sizeof(*model));
2219 
2220     if (!kvm_s390_cpu_models_supported()) {
2221         error_setg(errp, "KVM doesn't support CPU models");
2222         return;
2223     }
2224 
2225     /* query the basic cpu model properties */
2226     rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2227     if (rc) {
2228         error_setg(errp, "KVM: Error querying host CPU model: %d", rc);
2229         return;
2230     }
2231 
2232     cpu_type = cpuid_type(prop.cpuid);
2233     if (has_ibc(prop.ibc)) {
2234         model->lowest_ibc = lowest_ibc(prop.ibc);
2235         unblocked_ibc = unblocked_ibc(prop.ibc);
2236     }
2237     model->cpu_id = cpuid_id(prop.cpuid);
2238     model->cpu_id_format = cpuid_format(prop.cpuid);
2239     model->cpu_ver = 0xff;
2240 
2241     /* get supported cpu features indicated via STFL(E) */
2242     s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL,
2243                              (uint8_t *) prop.fac_mask);
2244     /* dat-enhancement facility 2 has no bit but was introduced with stfle */
2245     if (test_bit(S390_FEAT_STFLE, model->features)) {
2246         set_bit(S390_FEAT_DAT_ENH_2, model->features);
2247     }
2248     /* get supported cpu features indicated e.g. via SCLP */
2249     rc = query_cpu_feat(model->features);
2250     if (rc) {
2251         error_setg(errp, "KVM: Error querying CPU features: %d", rc);
2252         return;
2253     }
2254     /* get supported cpu subfunctions indicated via query / test bit */
2255     rc = query_cpu_subfunc(model->features);
2256     if (rc) {
2257         error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc);
2258         return;
2259     }
2260 
2261     /* PTFF subfunctions might be indicated although kernel support missing */
2262     if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) {
2263         clear_bit(S390_FEAT_PTFF_QSIE, model->features);
2264         clear_bit(S390_FEAT_PTFF_QTOUE, model->features);
2265         clear_bit(S390_FEAT_PTFF_STOE, model->features);
2266         clear_bit(S390_FEAT_PTFF_STOUE, model->features);
2267     }
2268 
2269     /* with cpu model support, CMM is only indicated if really available */
2270     if (kvm_s390_cmma_available()) {
2271         set_bit(S390_FEAT_CMM, model->features);
2272     } else {
2273         /* no cmm -> no cmm nt */
2274         clear_bit(S390_FEAT_CMM_NT, model->features);
2275     }
2276 
2277     /* bpb needs kernel support for migration, VSIE and reset */
2278     if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) {
2279         clear_bit(S390_FEAT_BPB, model->features);
2280     }
2281 
2282     /* We emulate a zPCI bus and AEN, therefore we don't need HW support */
2283     if (pci_available) {
2284         set_bit(S390_FEAT_ZPCI, model->features);
2285     }
2286     set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features);
2287 
2288     if (s390_known_cpu_type(cpu_type)) {
2289         /* we want the exact model, even if some features are missing */
2290         model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc),
2291                                        ibc_ec_ga(unblocked_ibc), NULL);
2292     } else {
2293         /* model unknown, e.g. too new - search using features */
2294         model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc),
2295                                        ibc_ec_ga(unblocked_ibc),
2296                                        model->features);
2297     }
2298     if (!model->def) {
2299         error_setg(errp, "KVM: host CPU model could not be identified");
2300         return;
2301     }
2302     /* for now, we can only provide the AP feature with HW support */
2303     if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO,
2304         KVM_S390_VM_CRYPTO_ENABLE_APIE)) {
2305         set_bit(S390_FEAT_AP, model->features);
2306     }
2307     /* strip of features that are not part of the maximum model */
2308     bitmap_and(model->features, model->features, model->def->full_feat,
2309                S390_FEAT_MAX);
2310 }
2311 
2312 static void kvm_s390_configure_apie(bool interpret)
2313 {
2314     uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE :
2315                                 KVM_S390_VM_CRYPTO_DISABLE_APIE;
2316 
2317     if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
2318         kvm_s390_set_attr(attr);
2319     }
2320 }
2321 
2322 void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
2323 {
2324     struct kvm_s390_vm_cpu_processor prop  = {
2325         .fac_list = { 0 },
2326     };
2327     struct kvm_device_attr attr = {
2328         .group = KVM_S390_VM_CPU_MODEL,
2329         .attr = KVM_S390_VM_CPU_PROCESSOR,
2330         .addr = (uint64_t) &prop,
2331     };
2332     int rc;
2333 
2334     if (!model) {
2335         /* compatibility handling if cpu models are disabled */
2336         if (kvm_s390_cmma_available()) {
2337             kvm_s390_enable_cmma();
2338         }
2339         return;
2340     }
2341     if (!kvm_s390_cpu_models_supported()) {
2342         error_setg(errp, "KVM doesn't support CPU models");
2343         return;
2344     }
2345     prop.cpuid = s390_cpuid_from_cpu_model(model);
2346     prop.ibc = s390_ibc_from_cpu_model(model);
2347     /* configure cpu features indicated via STFL(e) */
2348     s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL,
2349                          (uint8_t *) prop.fac_list);
2350     rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2351     if (rc) {
2352         error_setg(errp, "KVM: Error configuring the CPU model: %d", rc);
2353         return;
2354     }
2355     /* configure cpu features indicated e.g. via SCLP */
2356     rc = configure_cpu_feat(model->features);
2357     if (rc) {
2358         error_setg(errp, "KVM: Error configuring CPU features: %d", rc);
2359         return;
2360     }
2361     /* configure cpu subfunctions indicated via query / test bit */
2362     rc = configure_cpu_subfunc(model->features);
2363     if (rc) {
2364         error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc);
2365         return;
2366     }
2367     /* enable CMM via CMMA */
2368     if (test_bit(S390_FEAT_CMM, model->features)) {
2369         kvm_s390_enable_cmma();
2370     }
2371 
2372     if (test_bit(S390_FEAT_AP, model->features)) {
2373         kvm_s390_configure_apie(true);
2374     }
2375 }
2376 
2377 void kvm_s390_restart_interrupt(S390CPU *cpu)
2378 {
2379     struct kvm_s390_irq irq = {
2380         .type = KVM_S390_RESTART,
2381     };
2382 
2383     kvm_s390_vcpu_interrupt(cpu, &irq);
2384 }
2385 
2386 void kvm_s390_stop_interrupt(S390CPU *cpu)
2387 {
2388     struct kvm_s390_irq irq = {
2389         .type = KVM_S390_SIGP_STOP,
2390     };
2391 
2392     kvm_s390_vcpu_interrupt(cpu, &irq);
2393 }
2394