1 /* 2 * MicroBlaze helper routines. 3 * 4 * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com> 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/cputlb.h" 24 #include "accel/tcg/cpu-mmu-index.h" 25 #include "exec/page-protection.h" 26 #include "exec/target_page.h" 27 #include "qemu/host-utils.h" 28 #include "exec/log.h" 29 30 #ifndef CONFIG_USER_ONLY 31 static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu, 32 MMUAccessType access_type) 33 { 34 if (access_type == MMU_INST_FETCH) { 35 return !cpu->ns_axi_ip; 36 } else { 37 return !cpu->ns_axi_dp; 38 } 39 } 40 41 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 42 MMUAccessType access_type, int mmu_idx, 43 bool probe, uintptr_t retaddr) 44 { 45 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 46 CPUMBState *env = &cpu->env; 47 MicroBlazeMMULookup lu; 48 unsigned int hit; 49 int prot; 50 MemTxAttrs attrs = {}; 51 52 attrs.secure = mb_cpu_access_is_secure(cpu, access_type); 53 54 if (mmu_idx == MMU_NOMMU_IDX) { 55 /* MMU disabled or not available. */ 56 address &= TARGET_PAGE_MASK; 57 prot = PAGE_RWX; 58 tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx, 59 TARGET_PAGE_SIZE); 60 return true; 61 } 62 63 hit = mmu_translate(cpu, &lu, address, access_type, mmu_idx); 64 if (likely(hit)) { 65 uint32_t vaddr = address & TARGET_PAGE_MASK; 66 uint32_t paddr = lu.paddr + vaddr - lu.vaddr; 67 68 qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n", 69 mmu_idx, vaddr, paddr, lu.prot); 70 tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, mmu_idx, 71 TARGET_PAGE_SIZE); 72 return true; 73 } 74 75 /* TLB miss. */ 76 if (probe) { 77 return false; 78 } 79 80 qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n", 81 mmu_idx, address); 82 83 env->ear = address; 84 switch (lu.err) { 85 case ERR_PROT: 86 env->esr = access_type == MMU_INST_FETCH ? 17 : 16; 87 env->esr |= (access_type == MMU_DATA_STORE) << 10; 88 break; 89 case ERR_MISS: 90 env->esr = access_type == MMU_INST_FETCH ? 19 : 18; 91 env->esr |= (access_type == MMU_DATA_STORE) << 10; 92 break; 93 default: 94 abort(); 95 } 96 97 if (cs->exception_index == EXCP_MMU) { 98 cpu_abort(cs, "recursive faults\n"); 99 } 100 101 /* TLB miss. */ 102 cs->exception_index = EXCP_MMU; 103 cpu_loop_exit_restore(cs, retaddr); 104 } 105 106 void mb_cpu_do_interrupt(CPUState *cs) 107 { 108 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 109 CPUMBState *env = &cpu->env; 110 uint32_t t, msr = mb_cpu_read_msr(env); 111 bool set_esr; 112 113 /* IMM flag cannot propagate across a branch and into the dslot. */ 114 assert((env->iflags & (D_FLAG | IMM_FLAG)) != (D_FLAG | IMM_FLAG)); 115 /* BIMM flag cannot be set without D_FLAG. */ 116 assert((env->iflags & (D_FLAG | BIMM_FLAG)) != BIMM_FLAG); 117 /* RTI flags are private to translate. */ 118 assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG))); 119 120 switch (cs->exception_index) { 121 case EXCP_HW_EXCP: 122 if (!(cpu->cfg.pvr_regs[0] & PVR0_USE_EXC_MASK)) { 123 qemu_log_mask(LOG_GUEST_ERROR, 124 "Exception raised on system without exceptions!\n"); 125 return; 126 } 127 128 qemu_log_mask(CPU_LOG_INT, 129 "INT: HWE at pc=%08x msr=%08x iflags=%x\n", 130 env->pc, msr, env->iflags); 131 132 /* Exception breaks branch + dslot sequence? */ 133 set_esr = true; 134 env->esr &= ~D_FLAG; 135 if (env->iflags & D_FLAG) { 136 env->esr |= D_FLAG; 137 env->btr = env->btarget; 138 } 139 140 /* Exception in progress. */ 141 msr |= MSR_EIP; 142 env->regs[17] = env->pc + 4; 143 env->pc = cpu->cfg.base_vectors + 0x20; 144 break; 145 146 case EXCP_MMU: 147 qemu_log_mask(CPU_LOG_INT, 148 "INT: MMU at pc=%08x msr=%08x " 149 "ear=%" PRIx64 " iflags=%x\n", 150 env->pc, msr, env->ear, env->iflags); 151 152 /* Exception breaks branch + dslot sequence? */ 153 set_esr = true; 154 env->esr &= ~D_FLAG; 155 if (env->iflags & D_FLAG) { 156 env->esr |= D_FLAG; 157 env->btr = env->btarget; 158 /* Reexecute the branch. */ 159 env->regs[17] = env->pc - (env->iflags & BIMM_FLAG ? 8 : 4); 160 } else if (env->iflags & IMM_FLAG) { 161 /* Reexecute the imm. */ 162 env->regs[17] = env->pc - 4; 163 } else { 164 env->regs[17] = env->pc; 165 } 166 167 /* Exception in progress. */ 168 msr |= MSR_EIP; 169 env->pc = cpu->cfg.base_vectors + 0x20; 170 break; 171 172 case EXCP_IRQ: 173 assert(!(msr & (MSR_EIP | MSR_BIP))); 174 assert(msr & MSR_IE); 175 assert(!(env->iflags & (D_FLAG | IMM_FLAG))); 176 177 qemu_log_mask(CPU_LOG_INT, 178 "INT: DEV at pc=%08x msr=%08x iflags=%x\n", 179 env->pc, msr, env->iflags); 180 set_esr = false; 181 182 /* Disable interrupts. */ 183 msr &= ~MSR_IE; 184 env->regs[14] = env->pc; 185 env->pc = cpu->cfg.base_vectors + 0x10; 186 break; 187 188 case EXCP_HW_BREAK: 189 assert(!(env->iflags & (D_FLAG | IMM_FLAG))); 190 191 qemu_log_mask(CPU_LOG_INT, 192 "INT: BRK at pc=%08x msr=%08x iflags=%x\n", 193 env->pc, msr, env->iflags); 194 set_esr = false; 195 196 /* Break in progress. */ 197 msr |= MSR_BIP; 198 env->regs[16] = env->pc; 199 env->pc = cpu->cfg.base_vectors + 0x18; 200 break; 201 202 default: 203 cpu_abort(cs, "unhandled exception type=%d\n", cs->exception_index); 204 /* not reached */ 205 } 206 207 /* Save previous mode, disable mmu, disable user-mode. */ 208 t = (msr & (MSR_VM | MSR_UM)) << 1; 209 msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM); 210 msr |= t; 211 mb_cpu_write_msr(env, msr); 212 213 env->res_addr = RES_ADDR_NONE; 214 env->iflags = 0; 215 216 if (!set_esr) { 217 qemu_log_mask(CPU_LOG_INT, 218 " to pc=%08x msr=%08x\n", env->pc, msr); 219 } else if (env->esr & D_FLAG) { 220 qemu_log_mask(CPU_LOG_INT, 221 " to pc=%08x msr=%08x esr=%04x btr=%08x\n", 222 env->pc, msr, env->esr, env->btr); 223 } else { 224 qemu_log_mask(CPU_LOG_INT, 225 " to pc=%08x msr=%08x esr=%04x\n", 226 env->pc, msr, env->esr); 227 } 228 } 229 230 hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 231 MemTxAttrs *attrs) 232 { 233 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 234 target_ulong vaddr, paddr = 0; 235 MicroBlazeMMULookup lu; 236 int mmu_idx = cpu_mmu_index(cs, false); 237 unsigned int hit; 238 239 /* Caller doesn't initialize */ 240 *attrs = (MemTxAttrs) {}; 241 attrs->secure = mb_cpu_access_is_secure(cpu, MMU_DATA_LOAD); 242 243 if (mmu_idx != MMU_NOMMU_IDX) { 244 hit = mmu_translate(cpu, &lu, addr, 0, 0); 245 if (hit) { 246 vaddr = addr & TARGET_PAGE_MASK; 247 paddr = lu.paddr + vaddr - lu.vaddr; 248 } else 249 paddr = 0; /* ???. */ 250 } else 251 paddr = addr & TARGET_PAGE_MASK; 252 253 return paddr; 254 } 255 256 bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 257 { 258 CPUMBState *env = cpu_env(cs); 259 260 if ((interrupt_request & CPU_INTERRUPT_HARD) 261 && (env->msr & MSR_IE) 262 && !(env->msr & (MSR_EIP | MSR_BIP)) 263 && !(env->iflags & (D_FLAG | IMM_FLAG))) { 264 cs->exception_index = EXCP_IRQ; 265 mb_cpu_do_interrupt(cs); 266 return true; 267 } 268 return false; 269 } 270 271 #endif /* !CONFIG_USER_ONLY */ 272 273 void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 274 MMUAccessType access_type, 275 int mmu_idx, uintptr_t retaddr) 276 { 277 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 278 uint32_t esr, iflags; 279 280 /* Recover the pc and iflags from the corresponding insn_start. */ 281 cpu_restore_state(cs, retaddr); 282 iflags = cpu->env.iflags; 283 284 qemu_log_mask(CPU_LOG_INT, 285 "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n", 286 (target_ulong)addr, cpu->env.pc, iflags); 287 288 esr = ESR_EC_UNALIGNED_DATA; 289 if (likely(iflags & ESR_ESS_FLAG)) { 290 esr |= iflags & ESR_ESS_MASK; 291 } else { 292 qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n"); 293 } 294 295 cpu->env.ear = addr; 296 cpu->env.esr = esr; 297 cs->exception_index = EXCP_HW_EXCP; 298 cpu_loop_exit(cs); 299 } 300