1 /*
2 * Microblaze MMU emulation for qemu.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "exec/cputlb.h"
25 #include "accel/tcg/cpu-mmu-index.h"
26 #include "exec/page-protection.h"
27 #include "exec/target_page.h"
28
tlb_decode_size(unsigned int f)29 static unsigned int tlb_decode_size(unsigned int f)
30 {
31 static const unsigned int sizes[] = {
32 1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
33 1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
34 };
35 assert(f < ARRAY_SIZE(sizes));
36 return sizes[f];
37 }
38
mmu_flush_idx(CPUMBState * env,unsigned int idx)39 static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
40 {
41 CPUState *cs = env_cpu(env);
42 MicroBlazeMMU *mmu = &env->mmu;
43 unsigned int tlb_size;
44 uint32_t tlb_tag, end, t;
45
46 t = mmu->rams[RAM_TAG][idx];
47 if (!(t & TLB_VALID))
48 return;
49
50 tlb_tag = t & TLB_EPN_MASK;
51 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
52 end = tlb_tag + tlb_size;
53
54 while (tlb_tag < end) {
55 tlb_flush_page(cs, tlb_tag);
56 tlb_tag += TARGET_PAGE_SIZE;
57 }
58 }
59
mmu_change_pid(CPUMBState * env,unsigned int newpid)60 static void mmu_change_pid(CPUMBState *env, unsigned int newpid)
61 {
62 MicroBlazeMMU *mmu = &env->mmu;
63 unsigned int i;
64 uint32_t t;
65
66 if (newpid & ~0xff)
67 qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid);
68
69 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
70 /* Lookup and decode. */
71 t = mmu->rams[RAM_TAG][i];
72 if (t & TLB_VALID) {
73 if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
74 mmu_flush_idx(env, i);
75 }
76 }
77 }
78
79 /* rw - 0 = read, 1 = write, 2 = fetch. */
mmu_translate(MicroBlazeCPU * cpu,MicroBlazeMMULookup * lu,target_ulong vaddr,MMUAccessType rw,int mmu_idx)80 unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
81 target_ulong vaddr, MMUAccessType rw, int mmu_idx)
82 {
83 MicroBlazeMMU *mmu = &cpu->env.mmu;
84 unsigned int i, hit = 0;
85 unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
86 uint64_t tlb_tag, tlb_rpn, mask;
87 uint32_t tlb_size, t0;
88
89 lu->err = ERR_MISS;
90 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
91 uint64_t t, d;
92
93 /* Lookup and decode. */
94 t = mmu->rams[RAM_TAG][i];
95 if (t & TLB_VALID) {
96 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
97 if (tlb_size < TARGET_PAGE_SIZE) {
98 qemu_log_mask(LOG_UNIMP, "%d pages not supported\n", tlb_size);
99 abort();
100 }
101
102 mask = ~((uint64_t)tlb_size - 1);
103 tlb_tag = t & TLB_EPN_MASK;
104 if ((vaddr & mask) != (tlb_tag & mask)) {
105 continue;
106 }
107 if (mmu->tids[i]
108 && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
109 continue;
110 }
111
112 /* Bring in the data part. */
113 d = mmu->rams[RAM_DATA][i];
114 tlb_ex = d & TLB_EX;
115 tlb_wr = d & TLB_WR;
116
117 /* Now let's see if there is a zone that overrides the protbits. */
118 tlb_zsel = (d >> 4) & 0xf;
119 t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
120 t0 &= 0x3;
121
122 if (tlb_zsel > cpu->cfg.mmu_zones) {
123 qemu_log_mask(LOG_GUEST_ERROR,
124 "tlb zone select out of range! %d\n", tlb_zsel);
125 t0 = 1; /* Ignore. */
126 }
127
128 if (cpu->cfg.mmu == 1) {
129 t0 = 1; /* Zones are disabled. */
130 }
131
132 switch (t0) {
133 case 0:
134 if (mmu_idx == MMU_USER_IDX)
135 continue;
136 break;
137 case 2:
138 if (mmu_idx != MMU_USER_IDX) {
139 tlb_ex = 1;
140 tlb_wr = 1;
141 }
142 break;
143 case 3:
144 tlb_ex = 1;
145 tlb_wr = 1;
146 break;
147 default: break;
148 }
149
150 lu->err = ERR_PROT;
151 lu->prot = PAGE_READ;
152 if (tlb_wr)
153 lu->prot |= PAGE_WRITE;
154 else if (rw == 1)
155 goto done;
156 if (tlb_ex)
157 lu->prot |=PAGE_EXEC;
158 else if (rw == 2) {
159 goto done;
160 }
161
162 tlb_rpn = d & TLB_RPN_MASK;
163
164 lu->vaddr = tlb_tag;
165 lu->paddr = tlb_rpn & cpu->cfg.addr_mask;
166 lu->size = tlb_size;
167 lu->err = ERR_HIT;
168 lu->idx = i;
169 hit = 1;
170 goto done;
171 }
172 }
173 done:
174 qemu_log_mask(CPU_LOG_MMU,
175 "MMU vaddr=0x" TARGET_FMT_lx
176 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
177 vaddr, rw, tlb_wr, tlb_ex, hit);
178 return hit;
179 }
180
181 /* Writes/reads to the MMU's special regs end up here. */
mmu_read(CPUMBState * env,bool ext,uint32_t rn)182 uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
183 {
184 MicroBlazeCPU *cpu = env_archcpu(env);
185 unsigned int i;
186 uint32_t r = 0;
187
188 if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) {
189 qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
190 return 0;
191 }
192 if (ext && rn != MMU_R_TLBLO) {
193 qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
194 return 0;
195 }
196
197 switch (rn) {
198 /* Reads to HI/LO trig reads from the mmu rams. */
199 case MMU_R_TLBLO:
200 case MMU_R_TLBHI:
201 if (!(cpu->cfg.mmu_tlb_access & 1)) {
202 qemu_log_mask(LOG_GUEST_ERROR,
203 "Invalid access to MMU reg %d\n", rn);
204 return 0;
205 }
206
207 i = env->mmu.regs[MMU_R_TLBX] & 0xff;
208 r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32);
209 if (rn == MMU_R_TLBHI)
210 env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
211 break;
212 case MMU_R_PID:
213 case MMU_R_ZPR:
214 if (!(cpu->cfg.mmu_tlb_access & 1)) {
215 qemu_log_mask(LOG_GUEST_ERROR,
216 "Invalid access to MMU reg %d\n", rn);
217 return 0;
218 }
219 r = env->mmu.regs[rn];
220 break;
221 case MMU_R_TLBX:
222 r = env->mmu.regs[rn];
223 break;
224 case MMU_R_TLBSX:
225 qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n");
226 break;
227 default:
228 qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
229 break;
230 }
231 qemu_log_mask(CPU_LOG_MMU, "%s rn=%d=%x\n", __func__, rn, r);
232 return r;
233 }
234
mmu_write(CPUMBState * env,bool ext,uint32_t rn,uint32_t v)235 void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
236 {
237 MicroBlazeCPU *cpu = env_archcpu(env);
238 uint64_t tmp64;
239 unsigned int i;
240
241 qemu_log_mask(CPU_LOG_MMU,
242 "%s rn=%d=%x old=%x\n", __func__, rn, v,
243 rn < 3 ? env->mmu.regs[rn] : env->mmu.regs[MMU_R_TLBX]);
244
245 if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) {
246 qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
247 return;
248 }
249 if (ext && rn != MMU_R_TLBLO) {
250 qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
251 return;
252 }
253
254 switch (rn) {
255 /* Writes to HI/LO trig writes to the mmu rams. */
256 case MMU_R_TLBLO:
257 case MMU_R_TLBHI:
258 i = env->mmu.regs[MMU_R_TLBX] & 0xff;
259 if (rn == MMU_R_TLBHI) {
260 if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
261 qemu_log_mask(LOG_GUEST_ERROR,
262 "invalidating index %x at pc=%x\n",
263 i, env->pc);
264 env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
265 mmu_flush_idx(env, i);
266 }
267 tmp64 = env->mmu.rams[rn & 1][i];
268 env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v);
269 break;
270 case MMU_R_ZPR:
271 if (cpu->cfg.mmu_tlb_access <= 1) {
272 qemu_log_mask(LOG_GUEST_ERROR,
273 "Invalid access to MMU reg %d\n", rn);
274 return;
275 }
276
277 /* Changes to the zone protection reg flush the QEMU TLB.
278 Fortunately, these are very uncommon. */
279 if (v != env->mmu.regs[rn]) {
280 tlb_flush(env_cpu(env));
281 }
282 env->mmu.regs[rn] = v;
283 break;
284 case MMU_R_PID:
285 if (cpu->cfg.mmu_tlb_access <= 1) {
286 qemu_log_mask(LOG_GUEST_ERROR,
287 "Invalid access to MMU reg %d\n", rn);
288 return;
289 }
290
291 if (v != env->mmu.regs[rn]) {
292 mmu_change_pid(env, v);
293 env->mmu.regs[rn] = v;
294 }
295 break;
296 case MMU_R_TLBX:
297 /* Bit 31 is read-only. */
298 env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v);
299 break;
300 case MMU_R_TLBSX:
301 {
302 MicroBlazeMMULookup lu;
303 int hit;
304
305 if (cpu->cfg.mmu_tlb_access <= 1) {
306 qemu_log_mask(LOG_GUEST_ERROR,
307 "Invalid access to MMU reg %d\n", rn);
308 return;
309 }
310
311 hit = mmu_translate(cpu, &lu, v & TLB_EPN_MASK,
312 0, cpu_mmu_index(env_cpu(env), false));
313 if (hit) {
314 env->mmu.regs[MMU_R_TLBX] = lu.idx;
315 } else {
316 env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK;
317 }
318 break;
319 }
320 default:
321 qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
322 break;
323 }
324 }
325
mmu_init(MicroBlazeMMU * mmu)326 void mmu_init(MicroBlazeMMU *mmu)
327 {
328 int i;
329 for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {
330 mmu->regs[i] = 0;
331 }
332 }
333