1 /* 2 * s390x exception / interrupt helpers 3 * 4 * Copyright (c) 2009 Ulrich Hecht 5 * Copyright (c) 2011 Alexander Graf 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qapi/error.h" 23 #include "cpu.h" 24 #include "qemu/timer.h" 25 #include "exec/exec-all.h" 26 #include "exec/cpu_ldst.h" 27 #include "hw/s390x/ioinst.h" 28 #ifndef CONFIG_USER_ONLY 29 #include "sysemu/sysemu.h" 30 #endif 31 32 /* #define DEBUG_S390 */ 33 /* #define DEBUG_S390_STDOUT */ 34 35 #ifdef DEBUG_S390 36 #ifdef DEBUG_S390_STDOUT 37 #define DPRINTF(fmt, ...) \ 38 do { fprintf(stderr, fmt, ## __VA_ARGS__); \ 39 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0) 40 #else 41 #define DPRINTF(fmt, ...) \ 42 do { qemu_log(fmt, ## __VA_ARGS__); } while (0) 43 #endif 44 #else 45 #define DPRINTF(fmt, ...) \ 46 do { } while (0) 47 #endif 48 49 #if defined(CONFIG_USER_ONLY) 50 51 void s390_cpu_do_interrupt(CPUState *cs) 52 { 53 cs->exception_index = -1; 54 } 55 56 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, 57 int rw, int mmu_idx) 58 { 59 S390CPU *cpu = S390_CPU(cs); 60 61 cs->exception_index = EXCP_PGM; 62 cpu->env.int_pgm_code = PGM_ADDRESSING; 63 /* On real machines this value is dropped into LowMem. Since this 64 is userland, simply put this someplace that cpu_loop can find it. */ 65 cpu->env.__excp_addr = address; 66 return 1; 67 } 68 69 #else /* !CONFIG_USER_ONLY */ 70 71 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx) 72 { 73 switch (mmu_idx) { 74 case MMU_PRIMARY_IDX: 75 return PSW_ASC_PRIMARY; 76 case MMU_SECONDARY_IDX: 77 return PSW_ASC_SECONDARY; 78 case MMU_HOME_IDX: 79 return PSW_ASC_HOME; 80 default: 81 abort(); 82 } 83 } 84 85 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, 86 int rw, int mmu_idx) 87 { 88 S390CPU *cpu = S390_CPU(cs); 89 CPUS390XState *env = &cpu->env; 90 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx); 91 target_ulong vaddr, raddr; 92 int prot; 93 94 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n", 95 __func__, orig_vaddr, rw, mmu_idx); 96 97 orig_vaddr &= TARGET_PAGE_MASK; 98 vaddr = orig_vaddr; 99 100 /* 31-Bit mode */ 101 if (!(env->psw.mask & PSW_MASK_64)) { 102 vaddr &= 0x7fffffff; 103 } 104 105 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) { 106 /* Translation ended in exception */ 107 return 1; 108 } 109 110 /* check out of RAM access */ 111 if (raddr > ram_size) { 112 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__, 113 (uint64_t)raddr, (uint64_t)ram_size); 114 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO); 115 return 1; 116 } 117 118 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", 119 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot); 120 121 tlb_set_page(cs, orig_vaddr, raddr, prot, 122 mmu_idx, TARGET_PAGE_SIZE); 123 124 return 0; 125 } 126 127 static void do_program_interrupt(CPUS390XState *env) 128 { 129 uint64_t mask, addr; 130 LowCore *lowcore; 131 int ilen = env->int_pgm_ilen; 132 133 if (ilen == ILEN_AUTO) { 134 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr)); 135 } 136 assert(ilen == 2 || ilen == 4 || ilen == 6); 137 138 switch (env->int_pgm_code) { 139 case PGM_PER: 140 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) { 141 break; 142 } 143 /* FALL THROUGH */ 144 case PGM_OPERATION: 145 case PGM_PRIVILEGED: 146 case PGM_EXECUTE: 147 case PGM_PROTECTION: 148 case PGM_ADDRESSING: 149 case PGM_SPECIFICATION: 150 case PGM_DATA: 151 case PGM_FIXPT_OVERFLOW: 152 case PGM_FIXPT_DIVIDE: 153 case PGM_DEC_OVERFLOW: 154 case PGM_DEC_DIVIDE: 155 case PGM_HFP_EXP_OVERFLOW: 156 case PGM_HFP_EXP_UNDERFLOW: 157 case PGM_HFP_SIGNIFICANCE: 158 case PGM_HFP_DIVIDE: 159 case PGM_TRANS_SPEC: 160 case PGM_SPECIAL_OP: 161 case PGM_OPERAND: 162 case PGM_HFP_SQRT: 163 case PGM_PC_TRANS_SPEC: 164 case PGM_ALET_SPEC: 165 case PGM_MONITOR: 166 /* advance the PSW if our exception is not nullifying */ 167 env->psw.addr += ilen; 168 break; 169 } 170 171 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n", 172 __func__, env->int_pgm_code, ilen); 173 174 lowcore = cpu_map_lowcore(env); 175 176 /* Signal PER events with the exception. */ 177 if (env->per_perc_atmid) { 178 env->int_pgm_code |= PGM_PER; 179 lowcore->per_address = cpu_to_be64(env->per_address); 180 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid); 181 env->per_perc_atmid = 0; 182 } 183 184 lowcore->pgm_ilen = cpu_to_be16(ilen); 185 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code); 186 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env)); 187 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr); 188 mask = be64_to_cpu(lowcore->program_new_psw.mask); 189 addr = be64_to_cpu(lowcore->program_new_psw.addr); 190 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea); 191 192 cpu_unmap_lowcore(lowcore); 193 194 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__, 195 env->int_pgm_code, ilen, env->psw.mask, 196 env->psw.addr); 197 198 load_psw(env, mask, addr); 199 } 200 201 static void do_svc_interrupt(CPUS390XState *env) 202 { 203 uint64_t mask, addr; 204 LowCore *lowcore; 205 206 lowcore = cpu_map_lowcore(env); 207 208 lowcore->svc_code = cpu_to_be16(env->int_svc_code); 209 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen); 210 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env)); 211 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen); 212 mask = be64_to_cpu(lowcore->svc_new_psw.mask); 213 addr = be64_to_cpu(lowcore->svc_new_psw.addr); 214 215 cpu_unmap_lowcore(lowcore); 216 217 load_psw(env, mask, addr); 218 219 /* When a PER event is pending, the PER exception has to happen 220 immediately after the SERVICE CALL one. */ 221 if (env->per_perc_atmid) { 222 env->int_pgm_code = PGM_PER; 223 env->int_pgm_ilen = env->int_svc_ilen; 224 do_program_interrupt(env); 225 } 226 } 227 228 #define VIRTIO_SUBCODE_64 0x0D00 229 230 static void do_ext_interrupt(CPUS390XState *env) 231 { 232 S390CPU *cpu = s390_env_get_cpu(env); 233 uint64_t mask, addr; 234 LowCore *lowcore; 235 ExtQueue *q; 236 237 if (!(env->psw.mask & PSW_MASK_EXT)) { 238 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n"); 239 } 240 241 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) { 242 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index); 243 } 244 245 q = &env->ext_queue[env->ext_index]; 246 lowcore = cpu_map_lowcore(env); 247 248 lowcore->ext_int_code = cpu_to_be16(q->code); 249 lowcore->ext_params = cpu_to_be32(q->param); 250 lowcore->ext_params2 = cpu_to_be64(q->param64); 251 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env)); 252 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr); 253 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64); 254 mask = be64_to_cpu(lowcore->external_new_psw.mask); 255 addr = be64_to_cpu(lowcore->external_new_psw.addr); 256 257 cpu_unmap_lowcore(lowcore); 258 259 env->ext_index--; 260 if (env->ext_index == -1) { 261 env->pending_int &= ~INTERRUPT_EXT; 262 } 263 264 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, 265 env->psw.mask, env->psw.addr); 266 267 load_psw(env, mask, addr); 268 } 269 270 static void do_io_interrupt(CPUS390XState *env) 271 { 272 S390CPU *cpu = s390_env_get_cpu(env); 273 LowCore *lowcore; 274 IOIntQueue *q; 275 uint8_t isc; 276 int disable = 1; 277 int found = 0; 278 279 if (!(env->psw.mask & PSW_MASK_IO)) { 280 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n"); 281 } 282 283 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) { 284 uint64_t isc_bits; 285 286 if (env->io_index[isc] < 0) { 287 continue; 288 } 289 if (env->io_index[isc] >= MAX_IO_QUEUE) { 290 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n", 291 isc, env->io_index[isc]); 292 } 293 294 q = &env->io_queue[env->io_index[isc]][isc]; 295 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word)); 296 if (!(env->cregs[6] & isc_bits)) { 297 disable = 0; 298 continue; 299 } 300 if (!found) { 301 uint64_t mask, addr; 302 303 found = 1; 304 lowcore = cpu_map_lowcore(env); 305 306 lowcore->subchannel_id = cpu_to_be16(q->id); 307 lowcore->subchannel_nr = cpu_to_be16(q->nr); 308 lowcore->io_int_parm = cpu_to_be32(q->parm); 309 lowcore->io_int_word = cpu_to_be32(q->word); 310 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env)); 311 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr); 312 mask = be64_to_cpu(lowcore->io_new_psw.mask); 313 addr = be64_to_cpu(lowcore->io_new_psw.addr); 314 315 cpu_unmap_lowcore(lowcore); 316 317 env->io_index[isc]--; 318 319 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, 320 env->psw.mask, env->psw.addr); 321 load_psw(env, mask, addr); 322 } 323 if (env->io_index[isc] >= 0) { 324 disable = 0; 325 } 326 continue; 327 } 328 329 if (disable) { 330 env->pending_int &= ~INTERRUPT_IO; 331 } 332 333 } 334 335 static void do_mchk_interrupt(CPUS390XState *env) 336 { 337 S390CPU *cpu = s390_env_get_cpu(env); 338 uint64_t mask, addr; 339 LowCore *lowcore; 340 MchkQueue *q; 341 int i; 342 343 if (!(env->psw.mask & PSW_MASK_MCHECK)) { 344 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n"); 345 } 346 347 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) { 348 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index); 349 } 350 351 q = &env->mchk_queue[env->mchk_index]; 352 353 if (q->type != 1) { 354 /* Don't know how to handle this... */ 355 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type); 356 } 357 if (!(env->cregs[14] & (1 << 28))) { 358 /* CRW machine checks disabled */ 359 return; 360 } 361 362 lowcore = cpu_map_lowcore(env); 363 364 for (i = 0; i < 16; i++) { 365 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll); 366 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]); 367 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]); 368 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]); 369 } 370 lowcore->prefixreg_save_area = cpu_to_be32(env->psa); 371 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc); 372 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr); 373 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32); 374 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm); 375 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32); 376 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc); 377 378 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d); 379 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000); 380 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env)); 381 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr); 382 mask = be64_to_cpu(lowcore->mcck_new_psw.mask); 383 addr = be64_to_cpu(lowcore->mcck_new_psw.addr); 384 385 cpu_unmap_lowcore(lowcore); 386 387 env->mchk_index--; 388 if (env->mchk_index == -1) { 389 env->pending_int &= ~INTERRUPT_MCHK; 390 } 391 392 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, 393 env->psw.mask, env->psw.addr); 394 395 load_psw(env, mask, addr); 396 } 397 398 void s390_cpu_do_interrupt(CPUState *cs) 399 { 400 S390CPU *cpu = S390_CPU(cs); 401 CPUS390XState *env = &cpu->env; 402 403 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n", 404 __func__, cs->exception_index, env->psw.addr); 405 406 s390_cpu_set_state(CPU_STATE_OPERATING, cpu); 407 /* handle machine checks */ 408 if ((env->psw.mask & PSW_MASK_MCHECK) && 409 (cs->exception_index == -1)) { 410 if (env->pending_int & INTERRUPT_MCHK) { 411 cs->exception_index = EXCP_MCHK; 412 } 413 } 414 /* handle external interrupts */ 415 if ((env->psw.mask & PSW_MASK_EXT) && 416 cs->exception_index == -1) { 417 if (env->pending_int & INTERRUPT_EXT) { 418 /* code is already in env */ 419 cs->exception_index = EXCP_EXT; 420 } else if (env->pending_int & INTERRUPT_TOD) { 421 cpu_inject_ext(cpu, 0x1004, 0, 0); 422 cs->exception_index = EXCP_EXT; 423 env->pending_int &= ~INTERRUPT_EXT; 424 env->pending_int &= ~INTERRUPT_TOD; 425 } else if (env->pending_int & INTERRUPT_CPUTIMER) { 426 cpu_inject_ext(cpu, 0x1005, 0, 0); 427 cs->exception_index = EXCP_EXT; 428 env->pending_int &= ~INTERRUPT_EXT; 429 env->pending_int &= ~INTERRUPT_TOD; 430 } 431 } 432 /* handle I/O interrupts */ 433 if ((env->psw.mask & PSW_MASK_IO) && 434 (cs->exception_index == -1)) { 435 if (env->pending_int & INTERRUPT_IO) { 436 cs->exception_index = EXCP_IO; 437 } 438 } 439 440 switch (cs->exception_index) { 441 case EXCP_PGM: 442 do_program_interrupt(env); 443 break; 444 case EXCP_SVC: 445 do_svc_interrupt(env); 446 break; 447 case EXCP_EXT: 448 do_ext_interrupt(env); 449 break; 450 case EXCP_IO: 451 do_io_interrupt(env); 452 break; 453 case EXCP_MCHK: 454 do_mchk_interrupt(env); 455 break; 456 } 457 cs->exception_index = -1; 458 459 if (!env->pending_int) { 460 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 461 } 462 } 463 464 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 465 { 466 if (interrupt_request & CPU_INTERRUPT_HARD) { 467 S390CPU *cpu = S390_CPU(cs); 468 CPUS390XState *env = &cpu->env; 469 470 if (env->ex_value) { 471 /* Execution of the target insn is indivisible from 472 the parent EXECUTE insn. */ 473 return false; 474 } 475 if (env->psw.mask & PSW_MASK_EXT) { 476 s390_cpu_do_interrupt(cs); 477 return true; 478 } 479 } 480 return false; 481 } 482 483 void s390x_cpu_debug_excp_handler(CPUState *cs) 484 { 485 S390CPU *cpu = S390_CPU(cs); 486 CPUS390XState *env = &cpu->env; 487 CPUWatchpoint *wp_hit = cs->watchpoint_hit; 488 489 if (wp_hit && wp_hit->flags & BP_CPU) { 490 /* FIXME: When the storage-alteration-space control bit is set, 491 the exception should only be triggered if the memory access 492 is done using an address space with the storage-alteration-event 493 bit set. We have no way to detect that with the current 494 watchpoint code. */ 495 cs->watchpoint_hit = NULL; 496 497 env->per_address = env->psw.addr; 498 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env); 499 /* FIXME: We currently no way to detect the address space used 500 to trigger the watchpoint. For now just consider it is the 501 current default ASC. This turn to be true except when MVCP 502 and MVCS instrutions are not used. */ 503 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46; 504 505 /* Remove all watchpoints to re-execute the code. A PER exception 506 will be triggered, it will call load_psw which will recompute 507 the watchpoints. */ 508 cpu_watchpoint_remove_all(cs, BP_CPU); 509 cpu_loop_exit_noexc(cs); 510 } 511 } 512 513 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, 514 this is only for the atomic operations, for which we want to raise a 515 specification exception. */ 516 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 517 MMUAccessType access_type, 518 int mmu_idx, uintptr_t retaddr) 519 { 520 S390CPU *cpu = S390_CPU(cs); 521 CPUS390XState *env = &cpu->env; 522 523 if (retaddr) { 524 cpu_restore_state(cs, retaddr); 525 } 526 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO); 527 } 528 529 #endif /* CONFIG_USER_ONLY */ 530