1 /*
2 * S/390 misc helper routines
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "qemu/cutils.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "s390x-internal.h"
26 #include "qemu/host-utils.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/timer.h"
29 #include "exec/cputlb.h"
30 #include "accel/tcg/cpu-ldst.h"
31 #include "exec/target_page.h"
32 #include "qapi/error.h"
33 #include "tcg_s390x.h"
34 #include "s390-tod.h"
35
36 #if !defined(CONFIG_USER_ONLY)
37 #include "system/cpus.h"
38 #include "system/system.h"
39 #include "hw/s390x/ebcdic.h"
40 #include "hw/s390x/s390-hypercall.h"
41 #include "hw/s390x/sclp.h"
42 #include "hw/s390x/s390_flic.h"
43 #include "hw/s390x/ioinst.h"
44 #include "hw/s390x/s390-pci-inst.h"
45 #include "hw/boards.h"
46 #include "hw/s390x/tod.h"
47 #include CONFIG_DEVICES
48 #endif
49
50 /* #define DEBUG_HELPER */
51 #ifdef DEBUG_HELPER
52 #define HELPER_LOG(x...) qemu_log(x)
53 #else
54 #define HELPER_LOG(x...)
55 #endif
56
57 /* Raise an exception statically from a TB. */
HELPER(exception)58 void HELPER(exception)(CPUS390XState *env, uint32_t excp)
59 {
60 CPUState *cs = env_cpu(env);
61
62 HELPER_LOG("%s: exception %d\n", __func__, excp);
63 cs->exception_index = excp;
64 cpu_loop_exit(cs);
65 }
66
67 /* Store CPU Timer (also used for EXTRACT CPU TIME) */
HELPER(stpt)68 uint64_t HELPER(stpt)(CPUS390XState *env)
69 {
70 #if defined(CONFIG_USER_ONLY)
71 /*
72 * Fake a descending CPU timer. We could get negative values here,
73 * but we don't care as it is up to the OS when to process that
74 * interrupt and reset to > 0.
75 */
76 return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
77 #else
78 return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
79 #endif
80 }
81
82 /* Store Clock */
HELPER(stck)83 uint64_t HELPER(stck)(CPUS390XState *env)
84 {
85 #ifdef CONFIG_USER_ONLY
86 struct timespec ts;
87 uint64_t ns;
88
89 clock_gettime(CLOCK_REALTIME, &ts);
90 ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec;
91
92 return TOD_UNIX_EPOCH + time2tod(ns);
93 #else
94 S390TODState *td = s390_get_todstate();
95 S390TODClass *tdc = S390_TOD_GET_CLASS(td);
96 S390TOD tod;
97
98 tdc->get(td, &tod, &error_abort);
99 return tod.low;
100 #endif
101 }
102
103 #ifndef CONFIG_USER_ONLY
104 /* SCLP service call */
HELPER(servc)105 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
106 {
107 bql_lock();
108 int r = sclp_service_call(env_archcpu(env), r1, r2);
109 bql_unlock();
110 if (r < 0) {
111 tcg_s390_program_interrupt(env, -r, GETPC());
112 }
113 return r;
114 }
115
HELPER(diag)116 void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
117 {
118 uint64_t r;
119
120 switch (num) {
121 #ifdef CONFIG_S390_CCW_VIRTIO
122 case 0x500:
123 /* QEMU/KVM hypercall */
124 bql_lock();
125 handle_diag_500(env_archcpu(env), GETPC());
126 bql_unlock();
127 r = 0;
128 break;
129 #endif /* CONFIG_S390_CCW_VIRTIO */
130 case 0x44:
131 /* yield */
132 r = 0;
133 break;
134 case 0x308:
135 /* ipl */
136 bql_lock();
137 handle_diag_308(env, r1, r3, GETPC());
138 bql_unlock();
139 r = 0;
140 break;
141 case 0x288:
142 /* time bomb (watchdog) */
143 r = handle_diag_288(env, r1, r3);
144 break;
145 default:
146 r = -1;
147 break;
148 }
149
150 if (r) {
151 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
152 }
153 }
154
155 /* Set Prefix */
HELPER(spx)156 void HELPER(spx)(CPUS390XState *env, uint64_t a1)
157 {
158 const uint32_t prefix = a1 & 0x7fffe000;
159 const uint32_t old_prefix = env->psa;
160 CPUState *cs = env_cpu(env);
161
162 if (prefix == old_prefix) {
163 return;
164 }
165 /*
166 * Since prefix got aligned to 8k and memory increments are a multiple of
167 * 8k checking the first page is sufficient
168 */
169 if (!mmu_absolute_addr_valid(prefix, true)) {
170 tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
171 }
172
173 env->psa = prefix;
174 HELPER_LOG("prefix: %#x\n", prefix);
175 tlb_flush_page(cs, 0);
176 tlb_flush_page(cs, TARGET_PAGE_SIZE);
177 if (prefix != 0) {
178 tlb_flush_page(cs, prefix);
179 tlb_flush_page(cs, prefix + TARGET_PAGE_SIZE);
180 }
181 if (old_prefix != 0) {
182 tlb_flush_page(cs, old_prefix);
183 tlb_flush_page(cs, old_prefix + TARGET_PAGE_SIZE);
184 }
185 }
186
update_ckc_timer(CPUS390XState * env)187 static void update_ckc_timer(CPUS390XState *env)
188 {
189 S390TODState *td = s390_get_todstate();
190 uint64_t time;
191
192 /* stop the timer and remove pending CKC IRQs */
193 timer_del(env->tod_timer);
194 g_assert(bql_locked());
195 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
196
197 /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */
198 if (env->ckc == -1ULL) {
199 return;
200 }
201
202 /* difference between origins */
203 time = env->ckc - td->base.low;
204
205 /* nanoseconds */
206 time = tod2time(time);
207
208 timer_mod(env->tod_timer, time);
209 }
210
211 /* Set Clock Comparator */
HELPER(sckc)212 void HELPER(sckc)(CPUS390XState *env, uint64_t ckc)
213 {
214 env->ckc = ckc;
215
216 bql_lock();
217 update_ckc_timer(env);
218 bql_unlock();
219 }
220
tcg_s390_tod_updated(CPUState * cs,run_on_cpu_data opaque)221 void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
222 {
223 update_ckc_timer(cpu_env(cs));
224 }
225
226 /* Set Clock */
HELPER(sck)227 uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low)
228 {
229 S390TODState *td = s390_get_todstate();
230 S390TODClass *tdc = S390_TOD_GET_CLASS(td);
231 S390TOD tod = {
232 .high = 0,
233 .low = tod_low,
234 };
235
236 bql_lock();
237 tdc->set(td, &tod, &error_abort);
238 bql_unlock();
239 return 0;
240 }
241
242 /* Set Tod Programmable Field */
HELPER(sckpf)243 void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
244 {
245 uint32_t val = r0;
246
247 if (val & 0xffff0000) {
248 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
249 }
250 env->todpr = val;
251 }
252
253 /* Store Clock Comparator */
HELPER(stckc)254 uint64_t HELPER(stckc)(CPUS390XState *env)
255 {
256 return env->ckc;
257 }
258
259 /* Set CPU Timer */
HELPER(spt)260 void HELPER(spt)(CPUS390XState *env, uint64_t time)
261 {
262 if (time == -1ULL) {
263 return;
264 }
265
266 /* nanoseconds */
267 time = tod2time(time);
268
269 env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
270
271 timer_mod(env->cpu_timer, env->cputm);
272 }
273
274 /* Store System Information */
HELPER(stsi)275 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
276 {
277 const uintptr_t ra = GETPC();
278 const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK;
279 const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK;
280 const MachineState *ms = MACHINE(qdev_get_machine());
281 uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0;
282 S390CPU *cpu = env_archcpu(env);
283 SysIB sysib = { };
284 int i, cc = 0;
285
286 if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) {
287 /* invalid function code: no other checks are performed */
288 return 3;
289 }
290
291 if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) {
292 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
293 }
294
295 if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) {
296 /* query the current level: no further checks are performed */
297 env->regs[0] = STSI_R0_FC_LEVEL_3;
298 return 0;
299 }
300
301 if (a0 & ~TARGET_PAGE_MASK) {
302 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
303 }
304
305 /* count the cpus and split them into configured and reserved ones */
306 for (i = 0; i < ms->possible_cpus->len; i++) {
307 total_cpus++;
308 if (ms->possible_cpus->cpus[i].cpu) {
309 conf_cpus++;
310 } else {
311 reserved_cpus++;
312 }
313 }
314
315 /*
316 * In theory, we could report Level 1 / Level 2 as current. However,
317 * the Linux kernel will detect this as running under LPAR and assume
318 * that we have a sclp linemode console (which is always present on
319 * LPAR, but not the default for QEMU), therefore not displaying boot
320 * messages and making booting a Linux kernel under TCG harder.
321 *
322 * For now we fake the same SMP configuration on all levels.
323 *
324 * TODO: We could later make the level configurable via the machine
325 * and change defaults (linemode console) based on machine type
326 * and accelerator.
327 */
328 switch (r0 & STSI_R0_FC_MASK) {
329 case STSI_R0_FC_LEVEL_1:
330 if ((sel1 == 1) && (sel2 == 1)) {
331 /* Basic Machine Configuration */
332 char type[5] = {};
333
334 ebcdic_put(sysib.sysib_111.manuf, "QEMU ", 16);
335 /* same as machine type number in STORE CPU ID, but in EBCDIC */
336 snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
337 ebcdic_put(sysib.sysib_111.type, type, 4);
338 /* model number (not stored in STORE CPU ID for z/Architecture) */
339 ebcdic_put(sysib.sysib_111.model, "QEMU ", 16);
340 ebcdic_put(sysib.sysib_111.sequence, "QEMU ", 16);
341 ebcdic_put(sysib.sysib_111.plant, "QEMU", 4);
342 } else if ((sel1 == 2) && (sel2 == 1)) {
343 /* Basic Machine CPU */
344 ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16);
345 ebcdic_put(sysib.sysib_121.plant, "QEMU", 4);
346 sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id);
347 } else if ((sel1 == 2) && (sel2 == 2)) {
348 /* Basic Machine CPUs */
349 sysib.sysib_122.capability = cpu_to_be32(0x443afc29);
350 sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus);
351 sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus);
352 sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus);
353 } else {
354 cc = 3;
355 }
356 break;
357 case STSI_R0_FC_LEVEL_2:
358 if ((sel1 == 2) && (sel2 == 1)) {
359 /* LPAR CPU */
360 ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16);
361 ebcdic_put(sysib.sysib_221.plant, "QEMU", 4);
362 sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id);
363 } else if ((sel1 == 2) && (sel2 == 2)) {
364 /* LPAR CPUs */
365 sysib.sysib_222.lcpuc = 0x80; /* dedicated */
366 sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus);
367 sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus);
368 sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus);
369 ebcdic_put(sysib.sysib_222.name, "QEMU ", 8);
370 sysib.sysib_222.caf = cpu_to_be32(1000);
371 sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus);
372 } else {
373 cc = 3;
374 }
375 break;
376 case STSI_R0_FC_LEVEL_3:
377 if ((sel1 == 2) && (sel2 == 2)) {
378 /* VM CPUs */
379 sysib.sysib_322.count = 1;
380 sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus);
381 sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus);
382 sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus);
383 sysib.sysib_322.vm[0].caf = cpu_to_be32(1000);
384 /* Linux kernel uses this to distinguish us from z/VM */
385 ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux ", 16);
386 sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */
387
388 /* If our VM has a name, use the real name */
389 if (qemu_name) {
390 memset(sysib.sysib_322.vm[0].name, 0x40,
391 sizeof(sysib.sysib_322.vm[0].name));
392 ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name,
393 MIN(sizeof(sysib.sysib_322.vm[0].name),
394 strlen(qemu_name)));
395 strpadcpy((char *)sysib.sysib_322.ext_names[0],
396 sizeof(sysib.sysib_322.ext_names[0]),
397 qemu_name, '\0');
398
399 } else {
400 ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8);
401 strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest");
402 }
403
404 /* add the uuid */
405 memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid,
406 sizeof(sysib.sysib_322.vm[0].uuid));
407 } else {
408 cc = 3;
409 }
410 break;
411 }
412
413 if (cc == 0) {
414 if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) {
415 s390_cpu_virt_mem_handle_exc(cpu, ra);
416 }
417 }
418
419 return cc;
420 }
421
HELPER(sigp)422 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
423 uint32_t r3)
424 {
425 int cc;
426
427 /* TODO: needed to inject interrupts - push further down */
428 bql_lock();
429 cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
430 bql_unlock();
431
432 return cc;
433 }
434 #endif
435
436 #ifndef CONFIG_USER_ONLY
HELPER(xsch)437 void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
438 {
439 S390CPU *cpu = env_archcpu(env);
440 bql_lock();
441 ioinst_handle_xsch(cpu, r1, GETPC());
442 bql_unlock();
443 }
444
HELPER(csch)445 void HELPER(csch)(CPUS390XState *env, uint64_t r1)
446 {
447 S390CPU *cpu = env_archcpu(env);
448 bql_lock();
449 ioinst_handle_csch(cpu, r1, GETPC());
450 bql_unlock();
451 }
452
HELPER(hsch)453 void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
454 {
455 S390CPU *cpu = env_archcpu(env);
456 bql_lock();
457 ioinst_handle_hsch(cpu, r1, GETPC());
458 bql_unlock();
459 }
460
HELPER(msch)461 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
462 {
463 S390CPU *cpu = env_archcpu(env);
464 bql_lock();
465 ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
466 bql_unlock();
467 }
468
HELPER(rchp)469 void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
470 {
471 S390CPU *cpu = env_archcpu(env);
472 bql_lock();
473 ioinst_handle_rchp(cpu, r1, GETPC());
474 bql_unlock();
475 }
476
HELPER(rsch)477 void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
478 {
479 S390CPU *cpu = env_archcpu(env);
480 bql_lock();
481 ioinst_handle_rsch(cpu, r1, GETPC());
482 bql_unlock();
483 }
484
HELPER(sal)485 void HELPER(sal)(CPUS390XState *env, uint64_t r1)
486 {
487 S390CPU *cpu = env_archcpu(env);
488
489 bql_lock();
490 ioinst_handle_sal(cpu, r1, GETPC());
491 bql_unlock();
492 }
493
HELPER(schm)494 void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
495 {
496 S390CPU *cpu = env_archcpu(env);
497
498 bql_lock();
499 ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
500 bql_unlock();
501 }
502
HELPER(ssch)503 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
504 {
505 S390CPU *cpu = env_archcpu(env);
506 bql_lock();
507 ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
508 bql_unlock();
509 }
510
HELPER(stcrw)511 void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
512 {
513 S390CPU *cpu = env_archcpu(env);
514
515 bql_lock();
516 ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
517 bql_unlock();
518 }
519
HELPER(stsch)520 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
521 {
522 S390CPU *cpu = env_archcpu(env);
523 bql_lock();
524 ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
525 bql_unlock();
526 }
527
HELPER(tpi)528 uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
529 {
530 const uintptr_t ra = GETPC();
531 S390CPU *cpu = env_archcpu(env);
532 QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
533 QEMUS390FlicIO *io = NULL;
534 LowCore *lowcore;
535
536 if (addr & 0x3) {
537 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
538 }
539
540 bql_lock();
541 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
542 if (!io) {
543 bql_unlock();
544 return 0;
545 }
546
547 if (addr) {
548 struct {
549 uint16_t id;
550 uint16_t nr;
551 uint32_t parm;
552 } intc = {
553 .id = cpu_to_be16(io->id),
554 .nr = cpu_to_be16(io->nr),
555 .parm = cpu_to_be32(io->parm),
556 };
557
558 if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
559 /* writing failed, reinject and properly clean up */
560 s390_io_interrupt(io->id, io->nr, io->parm, io->word);
561 bql_unlock();
562 g_free(io);
563 s390_cpu_virt_mem_handle_exc(cpu, ra);
564 return 0;
565 }
566 } else {
567 /* no protection applies */
568 lowcore = cpu_map_lowcore(env);
569 lowcore->subchannel_id = cpu_to_be16(io->id);
570 lowcore->subchannel_nr = cpu_to_be16(io->nr);
571 lowcore->io_int_parm = cpu_to_be32(io->parm);
572 lowcore->io_int_word = cpu_to_be32(io->word);
573 cpu_unmap_lowcore(lowcore);
574 }
575
576 g_free(io);
577 bql_unlock();
578 return 1;
579 }
580
HELPER(tsch)581 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
582 {
583 S390CPU *cpu = env_archcpu(env);
584 bql_lock();
585 ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
586 bql_unlock();
587 }
588
HELPER(chsc)589 void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
590 {
591 S390CPU *cpu = env_archcpu(env);
592 bql_lock();
593 ioinst_handle_chsc(cpu, inst >> 16, GETPC());
594 bql_unlock();
595 }
596 #endif
597
598 #ifndef CONFIG_USER_ONLY
per_raise_exception(CPUS390XState * env)599 static G_NORETURN void per_raise_exception(CPUS390XState *env)
600 {
601 trigger_pgm_exception(env, PGM_PER);
602 cpu_loop_exit(env_cpu(env));
603 }
604
per_raise_exception_log(CPUS390XState * env)605 static G_NORETURN void per_raise_exception_log(CPUS390XState *env)
606 {
607 qemu_log_mask(CPU_LOG_INT, "PER interrupt after 0x%" PRIx64 "\n",
608 env->per_address);
609 per_raise_exception(env);
610 }
611
HELPER(per_check_exception)612 void HELPER(per_check_exception)(CPUS390XState *env)
613 {
614 /* psw_addr, per_address and int_pgm_ilen are already set. */
615 if (unlikely(env->per_perc_atmid)) {
616 per_raise_exception_log(env);
617 }
618 }
619
620 /* Check if an address is within the PER starting address and the PER
621 ending address. The address range might loop. */
get_per_in_range(CPUS390XState * env,uint64_t addr)622 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
623 {
624 if (env->cregs[10] <= env->cregs[11]) {
625 return env->cregs[10] <= addr && addr <= env->cregs[11];
626 } else {
627 return env->cregs[10] <= addr || addr <= env->cregs[11];
628 }
629 }
630
HELPER(per_branch)631 void HELPER(per_branch)(CPUS390XState *env, uint64_t dest, uint32_t ilen)
632 {
633 if ((env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
634 && !get_per_in_range(env, dest)) {
635 return;
636 }
637
638 env->psw.addr = dest;
639 env->int_pgm_ilen = ilen;
640 env->per_address = env->gbea;
641 env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
642 per_raise_exception_log(env);
643 }
644
HELPER(per_ifetch)645 void HELPER(per_ifetch)(CPUS390XState *env, uint32_t ilen)
646 {
647 if (get_per_in_range(env, env->psw.addr)) {
648 env->per_address = env->psw.addr;
649 env->int_pgm_ilen = ilen;
650 env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
651
652 /* If the instruction has to be nullified, trigger the
653 exception immediately. */
654 if (env->cregs[9] & PER_CR9_EVENT_IFETCH_NULLIFICATION) {
655 env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
656 qemu_log_mask(CPU_LOG_INT, "PER interrupt before 0x%" PRIx64 "\n",
657 env->per_address);
658 per_raise_exception(env);
659 }
660 }
661 }
662
HELPER(per_store_real)663 void HELPER(per_store_real)(CPUS390XState *env, uint32_t ilen)
664 {
665 /* PSW is saved just before calling the helper. */
666 env->per_address = env->psw.addr;
667 env->int_pgm_ilen = ilen;
668 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
669 per_raise_exception_log(env);
670 }
671 #endif
672
673 static uint8_t stfl_bytes[2048];
674 static unsigned int used_stfl_bytes;
675
prepare_stfl(void)676 static void prepare_stfl(void)
677 {
678 static bool initialized;
679 int i;
680
681 /* racy, but we don't care, the same values are always written */
682 if (initialized) {
683 return;
684 }
685
686 s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
687 for (i = 0; i < sizeof(stfl_bytes); i++) {
688 if (stfl_bytes[i]) {
689 used_stfl_bytes = i + 1;
690 }
691 }
692 initialized = true;
693 }
694
695 #ifndef CONFIG_USER_ONLY
HELPER(stfl)696 void HELPER(stfl)(CPUS390XState *env)
697 {
698 LowCore *lowcore;
699
700 lowcore = cpu_map_lowcore(env);
701 prepare_stfl();
702 memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
703 cpu_unmap_lowcore(lowcore);
704 }
705 #endif
706
HELPER(stfle)707 uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
708 {
709 const uintptr_t ra = GETPC();
710 const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
711 int max_bytes;
712 int i;
713
714 if (addr & 0x7) {
715 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
716 }
717
718 prepare_stfl();
719 max_bytes = ROUND_UP(used_stfl_bytes, 8);
720
721 /*
722 * The PoP says that doublewords beyond the highest-numbered facility
723 * bit may or may not be stored. However, existing hardware appears to
724 * not store the words, and existing software depend on that.
725 */
726 for (i = 0; i < MIN(count_bytes, max_bytes); ++i) {
727 cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
728 }
729
730 env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
731 return count_bytes >= max_bytes ? 0 : 3;
732 }
733
734 #ifndef CONFIG_USER_ONLY
735 /*
736 * Note: we ignore any return code of the functions called for the pci
737 * instructions, as the only time they return !0 is when the stub is
738 * called, and in that case we didn't even offer the zpci facility.
739 * The only exception is SIC, where program checks need to be handled
740 * by the caller.
741 */
HELPER(clp)742 void HELPER(clp)(CPUS390XState *env, uint32_t r2)
743 {
744 S390CPU *cpu = env_archcpu(env);
745
746 bql_lock();
747 clp_service_call(cpu, r2, GETPC());
748 bql_unlock();
749 }
750
HELPER(pcilg)751 void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
752 {
753 S390CPU *cpu = env_archcpu(env);
754
755 bql_lock();
756 pcilg_service_call(cpu, r1, r2, GETPC());
757 bql_unlock();
758 }
759
HELPER(pcistg)760 void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
761 {
762 S390CPU *cpu = env_archcpu(env);
763
764 bql_lock();
765 pcistg_service_call(cpu, r1, r2, GETPC());
766 bql_unlock();
767 }
768
HELPER(stpcifc)769 void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
770 uint32_t ar)
771 {
772 S390CPU *cpu = env_archcpu(env);
773
774 bql_lock();
775 stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
776 bql_unlock();
777 }
778
HELPER(sic)779 void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
780 {
781 S390CPU *cpu = env_archcpu(env);
782 int r;
783
784 bql_lock();
785 r = css_do_sic(cpu, (r3 >> 27) & 0x7, r1 & 0xffff);
786 bql_unlock();
787 /* css_do_sic() may actually return a PGM_xxx value to inject */
788 if (r) {
789 tcg_s390_program_interrupt(env, -r, GETPC());
790 }
791 }
792
HELPER(rpcit)793 void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
794 {
795 S390CPU *cpu = env_archcpu(env);
796
797 bql_lock();
798 rpcit_service_call(cpu, r1, r2, GETPC());
799 bql_unlock();
800 }
801
HELPER(pcistb)802 void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
803 uint64_t gaddr, uint32_t ar)
804 {
805 S390CPU *cpu = env_archcpu(env);
806
807 bql_lock();
808 pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
809 bql_unlock();
810 }
811
HELPER(mpcifc)812 void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
813 uint32_t ar)
814 {
815 S390CPU *cpu = env_archcpu(env);
816
817 bql_lock();
818 mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
819 bql_unlock();
820 }
821 #endif
822