1 /*
2 * MicroBlaze helper routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/cputlb.h"
24 #include "accel/tcg/cpu-mmu-index.h"
25 #include "exec/page-protection.h"
26 #include "exec/target_page.h"
27 #include "qemu/host-utils.h"
28 #include "exec/log.h"
29 #include "exec/helper-proto.h"
30
31
32 G_NORETURN
mb_unaligned_access_internal(CPUState * cs,uint64_t addr,uintptr_t retaddr)33 static void mb_unaligned_access_internal(CPUState *cs, uint64_t addr,
34 uintptr_t retaddr)
35 {
36 CPUMBState *env = cpu_env(cs);
37 uint32_t esr, iflags;
38
39 /* Recover the pc and iflags from the corresponding insn_start. */
40 cpu_restore_state(cs, retaddr);
41 iflags = env->iflags;
42
43 qemu_log_mask(CPU_LOG_INT,
44 "Unaligned access addr=0x%" PRIx64 " pc=%x iflags=%x\n",
45 addr, env->pc, iflags);
46
47 esr = ESR_EC_UNALIGNED_DATA;
48 if (likely(iflags & ESR_ESS_FLAG)) {
49 esr |= iflags & ESR_ESS_MASK;
50 } else {
51 qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
52 }
53
54 env->ear = addr;
55 env->esr = esr;
56 cs->exception_index = EXCP_HW_EXCP;
57 cpu_loop_exit(cs);
58 }
59
mb_cpu_do_unaligned_access(CPUState * cs,vaddr addr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)60 void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
61 MMUAccessType access_type,
62 int mmu_idx, uintptr_t retaddr)
63 {
64 mb_unaligned_access_internal(cs, addr, retaddr);
65 }
66
67 #ifndef CONFIG_USER_ONLY
68
HELPER(unaligned_access)69 void HELPER(unaligned_access)(CPUMBState *env, uint64_t addr)
70 {
71 mb_unaligned_access_internal(env_cpu(env), addr, GETPC());
72 }
73
mb_cpu_access_is_secure(MicroBlazeCPU * cpu,MMUAccessType access_type)74 static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu,
75 MMUAccessType access_type)
76 {
77 if (access_type == MMU_INST_FETCH) {
78 return !cpu->ns_axi_ip;
79 } else {
80 return !cpu->ns_axi_dp;
81 }
82 }
83
mb_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)84 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
85 MMUAccessType access_type, int mmu_idx,
86 bool probe, uintptr_t retaddr)
87 {
88 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
89 CPUMBState *env = &cpu->env;
90 MicroBlazeMMULookup lu;
91 unsigned int hit;
92 int prot;
93 MemTxAttrs attrs = {};
94
95 attrs.secure = mb_cpu_access_is_secure(cpu, access_type);
96
97 if (mmu_idx == MMU_NOMMU_IDX) {
98 /* MMU disabled or not available. */
99 address &= TARGET_PAGE_MASK;
100 prot = PAGE_RWX;
101 tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx,
102 TARGET_PAGE_SIZE);
103 return true;
104 }
105
106 hit = mmu_translate(cpu, &lu, address, access_type, mmu_idx);
107 if (likely(hit)) {
108 uint32_t vaddr = address & TARGET_PAGE_MASK;
109 uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
110
111 qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
112 mmu_idx, vaddr, paddr, lu.prot);
113 tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, mmu_idx,
114 TARGET_PAGE_SIZE);
115 return true;
116 }
117
118 /* TLB miss. */
119 if (probe) {
120 return false;
121 }
122
123 qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
124 mmu_idx, address);
125
126 env->ear = address;
127 switch (lu.err) {
128 case ERR_PROT:
129 env->esr = access_type == MMU_INST_FETCH ? 17 : 16;
130 env->esr |= (access_type == MMU_DATA_STORE) << 10;
131 break;
132 case ERR_MISS:
133 env->esr = access_type == MMU_INST_FETCH ? 19 : 18;
134 env->esr |= (access_type == MMU_DATA_STORE) << 10;
135 break;
136 default:
137 abort();
138 }
139
140 if (cs->exception_index == EXCP_MMU) {
141 cpu_abort(cs, "recursive faults\n");
142 }
143
144 /* TLB miss. */
145 cs->exception_index = EXCP_MMU;
146 cpu_loop_exit_restore(cs, retaddr);
147 }
148
mb_cpu_do_interrupt(CPUState * cs)149 void mb_cpu_do_interrupt(CPUState *cs)
150 {
151 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
152 CPUMBState *env = &cpu->env;
153 uint32_t t, msr = mb_cpu_read_msr(env);
154 bool set_esr;
155
156 /* IMM flag cannot propagate across a branch and into the dslot. */
157 assert((env->iflags & (D_FLAG | IMM_FLAG)) != (D_FLAG | IMM_FLAG));
158 /* BIMM flag cannot be set without D_FLAG. */
159 assert((env->iflags & (D_FLAG | BIMM_FLAG)) != BIMM_FLAG);
160 /* RTI flags are private to translate. */
161 assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
162
163 switch (cs->exception_index) {
164 case EXCP_HW_EXCP:
165 if (!(cpu->cfg.pvr_regs[0] & PVR0_USE_EXC_MASK)) {
166 qemu_log_mask(LOG_GUEST_ERROR,
167 "Exception raised on system without exceptions!\n");
168 return;
169 }
170
171 qemu_log_mask(CPU_LOG_INT,
172 "INT: HWE at pc=%08x msr=%08x iflags=%x\n",
173 env->pc, msr, env->iflags);
174
175 /* Exception breaks branch + dslot sequence? */
176 set_esr = true;
177 env->esr &= ~D_FLAG;
178 if (env->iflags & D_FLAG) {
179 env->esr |= D_FLAG;
180 env->btr = env->btarget;
181 }
182
183 /* Exception in progress. */
184 msr |= MSR_EIP;
185 env->regs[17] = env->pc + 4;
186 env->pc = cpu->cfg.base_vectors + 0x20;
187 break;
188
189 case EXCP_MMU:
190 qemu_log_mask(CPU_LOG_INT,
191 "INT: MMU at pc=%08x msr=%08x "
192 "ear=%" PRIx64 " iflags=%x\n",
193 env->pc, msr, env->ear, env->iflags);
194
195 /* Exception breaks branch + dslot sequence? */
196 set_esr = true;
197 env->esr &= ~D_FLAG;
198 if (env->iflags & D_FLAG) {
199 env->esr |= D_FLAG;
200 env->btr = env->btarget;
201 /* Reexecute the branch. */
202 env->regs[17] = env->pc - (env->iflags & BIMM_FLAG ? 8 : 4);
203 } else if (env->iflags & IMM_FLAG) {
204 /* Reexecute the imm. */
205 env->regs[17] = env->pc - 4;
206 } else {
207 env->regs[17] = env->pc;
208 }
209
210 /* Exception in progress. */
211 msr |= MSR_EIP;
212 env->pc = cpu->cfg.base_vectors + 0x20;
213 break;
214
215 case EXCP_IRQ:
216 assert(!(msr & (MSR_EIP | MSR_BIP)));
217 assert(msr & MSR_IE);
218 assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
219
220 qemu_log_mask(CPU_LOG_INT,
221 "INT: DEV at pc=%08x msr=%08x iflags=%x\n",
222 env->pc, msr, env->iflags);
223 set_esr = false;
224
225 /* Disable interrupts. */
226 msr &= ~MSR_IE;
227 env->regs[14] = env->pc;
228 env->pc = cpu->cfg.base_vectors + 0x10;
229 break;
230
231 case EXCP_HW_BREAK:
232 assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
233
234 qemu_log_mask(CPU_LOG_INT,
235 "INT: BRK at pc=%08x msr=%08x iflags=%x\n",
236 env->pc, msr, env->iflags);
237 set_esr = false;
238
239 /* Break in progress. */
240 msr |= MSR_BIP;
241 env->regs[16] = env->pc;
242 env->pc = cpu->cfg.base_vectors + 0x18;
243 break;
244
245 default:
246 cpu_abort(cs, "unhandled exception type=%d\n", cs->exception_index);
247 /* not reached */
248 }
249
250 /* Save previous mode, disable mmu, disable user-mode. */
251 t = (msr & (MSR_VM | MSR_UM)) << 1;
252 msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
253 msr |= t;
254 mb_cpu_write_msr(env, msr);
255
256 env->res_addr = RES_ADDR_NONE;
257 env->iflags = 0;
258
259 if (!set_esr) {
260 qemu_log_mask(CPU_LOG_INT,
261 " to pc=%08x msr=%08x\n", env->pc, msr);
262 } else if (env->esr & D_FLAG) {
263 qemu_log_mask(CPU_LOG_INT,
264 " to pc=%08x msr=%08x esr=%04x btr=%08x\n",
265 env->pc, msr, env->esr, env->btr);
266 } else {
267 qemu_log_mask(CPU_LOG_INT,
268 " to pc=%08x msr=%08x esr=%04x\n",
269 env->pc, msr, env->esr);
270 }
271 }
272
mb_cpu_get_phys_page_attrs_debug(CPUState * cs,vaddr addr,MemTxAttrs * attrs)273 hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
274 MemTxAttrs *attrs)
275 {
276 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
277 target_ulong vaddr, paddr = 0;
278 MicroBlazeMMULookup lu;
279 int mmu_idx = cpu_mmu_index(cs, false);
280 unsigned int hit;
281
282 /* Caller doesn't initialize */
283 *attrs = (MemTxAttrs) {};
284 attrs->secure = mb_cpu_access_is_secure(cpu, MMU_DATA_LOAD);
285
286 if (mmu_idx != MMU_NOMMU_IDX) {
287 hit = mmu_translate(cpu, &lu, addr, 0, 0);
288 if (hit) {
289 vaddr = addr & TARGET_PAGE_MASK;
290 paddr = lu.paddr + vaddr - lu.vaddr;
291 } else
292 paddr = 0; /* ???. */
293 } else
294 paddr = addr & TARGET_PAGE_MASK;
295
296 return paddr;
297 }
298
mb_cpu_exec_interrupt(CPUState * cs,int interrupt_request)299 bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
300 {
301 CPUMBState *env = cpu_env(cs);
302
303 if ((interrupt_request & CPU_INTERRUPT_HARD)
304 && (env->msr & MSR_IE)
305 && !(env->msr & (MSR_EIP | MSR_BIP))
306 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
307 cs->exception_index = EXCP_IRQ;
308 mb_cpu_do_interrupt(cs);
309 return true;
310 }
311 return false;
312 }
313
314 #endif /* !CONFIG_USER_ONLY */
315