1 /*
2 * Sparc MMU helpers
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/cputlb.h"
24 #include "accel/tcg/cpu-mmu-index.h"
25 #include "exec/page-protection.h"
26 #include "exec/target_page.h"
27 #include "exec/tlb-flags.h"
28 #include "system/memory.h"
29 #include "qemu/qemu-print.h"
30 #include "trace.h"
31
32 /* Sparc MMU emulation */
33
34 #ifndef TARGET_SPARC64
35 /*
36 * Sparc V8 Reference MMU (SRMMU)
37 */
38 static const int access_table[8][8] = {
39 { 0, 0, 0, 0, 8, 0, 12, 12 },
40 { 0, 0, 0, 0, 8, 0, 0, 0 },
41 { 8, 8, 0, 0, 0, 8, 12, 12 },
42 { 8, 8, 0, 0, 0, 8, 0, 0 },
43 { 8, 0, 8, 0, 8, 8, 12, 12 },
44 { 8, 0, 8, 0, 8, 0, 8, 0 },
45 { 8, 8, 8, 0, 8, 8, 12, 12 },
46 { 8, 8, 8, 0, 8, 8, 8, 0 }
47 };
48
49 static const int perm_table[2][8] = {
50 {
51 PAGE_READ,
52 PAGE_READ | PAGE_WRITE,
53 PAGE_READ | PAGE_EXEC,
54 PAGE_READ | PAGE_WRITE | PAGE_EXEC,
55 PAGE_EXEC,
56 PAGE_READ | PAGE_WRITE,
57 PAGE_READ | PAGE_EXEC,
58 PAGE_READ | PAGE_WRITE | PAGE_EXEC
59 },
60 {
61 PAGE_READ,
62 PAGE_READ | PAGE_WRITE,
63 PAGE_READ | PAGE_EXEC,
64 PAGE_READ | PAGE_WRITE | PAGE_EXEC,
65 PAGE_EXEC,
66 PAGE_READ,
67 0,
68 0,
69 }
70 };
71
get_physical_address(CPUSPARCState * env,CPUTLBEntryFull * full,int * access_index,target_ulong address,int rw,int mmu_idx)72 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full,
73 int *access_index, target_ulong address,
74 int rw, int mmu_idx)
75 {
76 int access_perms = 0;
77 hwaddr pde_ptr;
78 uint32_t pde;
79 int error_code = 0, is_dirty, is_user;
80 unsigned long page_offset;
81 CPUState *cs = env_cpu(env);
82 MemTxResult result;
83
84 is_user = mmu_idx == MMU_USER_IDX;
85
86 if (mmu_idx == MMU_PHYS_IDX) {
87 full->lg_page_size = TARGET_PAGE_BITS;
88 /* Boot mode: instruction fetches are taken from PROM */
89 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) {
90 full->phys_addr = env->prom_addr | (address & 0x7ffffULL);
91 full->prot = PAGE_READ | PAGE_EXEC;
92 return 0;
93 }
94 full->phys_addr = address;
95 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
96 return 0;
97 }
98
99 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1);
100 full->phys_addr = 0xffffffffffff0000ULL;
101
102 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
103 /* Context base + context number */
104 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
105 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result);
106 if (result != MEMTX_OK) {
107 return 4 << 2; /* Translation fault, L = 0 */
108 }
109
110 /* Ctx pde */
111 switch (pde & PTE_ENTRYTYPE_MASK) {
112 default:
113 case 0: /* Invalid */
114 return 1 << 2;
115 case 2: /* L0 PTE, maybe should not happen? */
116 case 3: /* Reserved */
117 return 4 << 2;
118 case 1: /* L0 PDE */
119 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
120 pde = address_space_ldl(cs->as, pde_ptr,
121 MEMTXATTRS_UNSPECIFIED, &result);
122 if (result != MEMTX_OK) {
123 return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */
124 }
125
126 switch (pde & PTE_ENTRYTYPE_MASK) {
127 default:
128 case 0: /* Invalid */
129 return (1 << 8) | (1 << 2);
130 case 3: /* Reserved */
131 return (1 << 8) | (4 << 2);
132 case 1: /* L1 PDE */
133 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
134 pde = address_space_ldl(cs->as, pde_ptr,
135 MEMTXATTRS_UNSPECIFIED, &result);
136 if (result != MEMTX_OK) {
137 return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */
138 }
139
140 switch (pde & PTE_ENTRYTYPE_MASK) {
141 default:
142 case 0: /* Invalid */
143 return (2 << 8) | (1 << 2);
144 case 3: /* Reserved */
145 return (2 << 8) | (4 << 2);
146 case 1: /* L2 PDE */
147 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
148 pde = address_space_ldl(cs->as, pde_ptr,
149 MEMTXATTRS_UNSPECIFIED, &result);
150 if (result != MEMTX_OK) {
151 return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */
152 }
153
154 switch (pde & PTE_ENTRYTYPE_MASK) {
155 default:
156 case 0: /* Invalid */
157 return (3 << 8) | (1 << 2);
158 case 1: /* PDE, should not happen */
159 case 3: /* Reserved */
160 return (3 << 8) | (4 << 2);
161 case 2: /* L3 PTE */
162 page_offset = 0;
163 }
164 full->lg_page_size = TARGET_PAGE_BITS;
165 break;
166 case 2: /* L2 PTE */
167 page_offset = address & 0x3f000;
168 full->lg_page_size = 18;
169 }
170 break;
171 case 2: /* L1 PTE */
172 page_offset = address & 0xfff000;
173 full->lg_page_size = 24;
174 break;
175 }
176 }
177
178 /* check access */
179 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
180 error_code = access_table[*access_index][access_perms];
181 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) {
182 return error_code;
183 }
184
185 /* update page modified and dirty bits */
186 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
187 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
188 pde |= PG_ACCESSED_MASK;
189 if (is_dirty) {
190 pde |= PG_MODIFIED_MASK;
191 }
192 stl_phys_notdirty(cs->as, pde_ptr, pde);
193 }
194
195 /* the page can be put in the TLB */
196 full->prot = perm_table[is_user][access_perms];
197 if (!(pde & PG_MODIFIED_MASK)) {
198 /* only set write access if already dirty... otherwise wait
199 for dirty access */
200 full->prot &= ~PAGE_WRITE;
201 }
202
203 /* Even if large ptes, we map only one 4KB page in the cache to
204 avoid filling it too fast */
205 full->phys_addr = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset;
206 return error_code;
207 }
208
209 /* Perform address translation */
sparc_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)210 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
211 MMUAccessType access_type, int mmu_idx,
212 bool probe, uintptr_t retaddr)
213 {
214 CPUSPARCState *env = cpu_env(cs);
215 CPUTLBEntryFull full = {};
216 target_ulong vaddr;
217 int error_code = 0, access_index;
218
219 /*
220 * TODO: If we ever need tlb_vaddr_to_host for this target,
221 * then we must figure out how to manipulate FSR and FAR
222 * when both MMU_NF and probe are set. In the meantime,
223 * do not support this use case.
224 */
225 assert(!probe);
226
227 address &= TARGET_PAGE_MASK;
228 error_code = get_physical_address(env, &full, &access_index,
229 address, access_type, mmu_idx);
230 vaddr = address;
231 if (likely(error_code == 0)) {
232 qemu_log_mask(CPU_LOG_MMU,
233 "Translate at %" VADDR_PRIx " -> "
234 HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
235 address, full.phys_addr, vaddr);
236 tlb_set_page_full(cs, mmu_idx, vaddr, &full);
237 return true;
238 }
239
240 if (env->mmuregs[3]) { /* Fault status register */
241 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
242 }
243 env->mmuregs[3] |= (access_index << 5) | error_code | 2;
244 env->mmuregs[4] = address; /* Fault address register */
245
246 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) {
247 /* No fault mode: if a mapping is available, just override
248 permissions. If no mapping is available, redirect accesses to
249 neverland. Fake/overridden mappings will be flushed when
250 switching to normal mode. */
251 full.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
252 tlb_set_page_full(cs, mmu_idx, vaddr, &full);
253 return true;
254 } else {
255 if (access_type == MMU_INST_FETCH) {
256 cs->exception_index = TT_TFAULT;
257 } else {
258 cs->exception_index = TT_DFAULT;
259 }
260 cpu_loop_exit_restore(cs, retaddr);
261 }
262 }
263
mmu_probe(CPUSPARCState * env,target_ulong address,int mmulev)264 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
265 {
266 CPUState *cs = env_cpu(env);
267 hwaddr pde_ptr;
268 uint32_t pde;
269 MemTxResult result;
270
271 /*
272 * TODO: MMU probe operations are supposed to set the fault
273 * status registers, but we don't do this.
274 */
275
276 /* Context base + context number */
277 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) +
278 (env->mmuregs[2] << 2);
279 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result);
280 if (result != MEMTX_OK) {
281 return 0;
282 }
283
284 switch (pde & PTE_ENTRYTYPE_MASK) {
285 default:
286 case 0: /* Invalid */
287 case 2: /* PTE, maybe should not happen? */
288 case 3: /* Reserved */
289 return 0;
290 case 1: /* L1 PDE */
291 if (mmulev == 3) {
292 return pde;
293 }
294 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
295 pde = address_space_ldl(cs->as, pde_ptr,
296 MEMTXATTRS_UNSPECIFIED, &result);
297 if (result != MEMTX_OK) {
298 return 0;
299 }
300
301 switch (pde & PTE_ENTRYTYPE_MASK) {
302 default:
303 case 0: /* Invalid */
304 case 3: /* Reserved */
305 return 0;
306 case 2: /* L1 PTE */
307 return pde;
308 case 1: /* L2 PDE */
309 if (mmulev == 2) {
310 return pde;
311 }
312 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
313 pde = address_space_ldl(cs->as, pde_ptr,
314 MEMTXATTRS_UNSPECIFIED, &result);
315 if (result != MEMTX_OK) {
316 return 0;
317 }
318
319 switch (pde & PTE_ENTRYTYPE_MASK) {
320 default:
321 case 0: /* Invalid */
322 case 3: /* Reserved */
323 return 0;
324 case 2: /* L2 PTE */
325 return pde;
326 case 1: /* L3 PDE */
327 if (mmulev == 1) {
328 return pde;
329 }
330 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
331 pde = address_space_ldl(cs->as, pde_ptr,
332 MEMTXATTRS_UNSPECIFIED, &result);
333 if (result != MEMTX_OK) {
334 return 0;
335 }
336
337 switch (pde & PTE_ENTRYTYPE_MASK) {
338 default:
339 case 0: /* Invalid */
340 case 1: /* PDE, should not happen */
341 case 3: /* Reserved */
342 return 0;
343 case 2: /* L3 PTE */
344 return pde;
345 }
346 }
347 }
348 }
349 return 0;
350 }
351
dump_mmu(CPUSPARCState * env)352 void dump_mmu(CPUSPARCState *env)
353 {
354 CPUState *cs = env_cpu(env);
355 target_ulong va, va1, va2;
356 unsigned int n, m, o;
357 hwaddr pa;
358 uint32_t pde;
359
360 qemu_printf("Root ptr: " HWADDR_FMT_plx ", ctx: %d\n",
361 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
362 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
363 pde = mmu_probe(env, va, 2);
364 if (pde) {
365 pa = cpu_get_phys_page_debug(cs, va);
366 qemu_printf("VA: " TARGET_FMT_lx ", PA: " HWADDR_FMT_plx
367 " PDE: " TARGET_FMT_lx "\n", va, pa, pde);
368 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
369 pde = mmu_probe(env, va1, 1);
370 if (pde) {
371 pa = cpu_get_phys_page_debug(cs, va1);
372 qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
373 HWADDR_FMT_plx " PDE: " TARGET_FMT_lx "\n",
374 va1, pa, pde);
375 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
376 pde = mmu_probe(env, va2, 0);
377 if (pde) {
378 pa = cpu_get_phys_page_debug(cs, va2);
379 qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
380 HWADDR_FMT_plx " PTE: "
381 TARGET_FMT_lx "\n",
382 va2, pa, pde);
383 }
384 }
385 }
386 }
387 }
388 }
389 }
390
391 /* Gdb expects all registers windows to be flushed in ram. This function handles
392 * reads (and only reads) in stack frames as if windows were flushed. We assume
393 * that the sparc ABI is followed.
394 */
sparc_cpu_memory_rw_debug(CPUState * cs,vaddr address,uint8_t * buf,size_t len,bool is_write)395 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
396 uint8_t *buf, size_t len, bool is_write)
397 {
398 CPUSPARCState *env = cpu_env(cs);
399 target_ulong addr = address;
400 int i;
401 int len1;
402 int cwp = env->cwp;
403
404 if (!is_write) {
405 for (i = 0; i < env->nwindows; i++) {
406 int off;
407 target_ulong fp = env->regbase[cwp * 16 + 22];
408
409 /* Assume fp == 0 means end of frame. */
410 if (fp == 0) {
411 break;
412 }
413
414 cwp = cpu_cwp_inc(env, cwp + 1);
415
416 /* Invalid window ? */
417 if (env->wim & (1 << cwp)) {
418 break;
419 }
420
421 /* According to the ABI, the stack is growing downward. */
422 if (addr + len < fp) {
423 break;
424 }
425
426 /* Not in this frame. */
427 if (addr > fp + 64) {
428 continue;
429 }
430
431 /* Handle access before this window. */
432 if (addr < fp) {
433 len1 = fp - addr;
434 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) {
435 return -1;
436 }
437 addr += len1;
438 len -= len1;
439 buf += len1;
440 }
441
442 /* Access byte per byte to registers. Not very efficient but speed
443 * is not critical.
444 */
445 off = addr - fp;
446 len1 = 64 - off;
447
448 if (len1 > len) {
449 len1 = len;
450 }
451
452 for (; len1; len1--) {
453 int reg = cwp * 16 + 8 + (off >> 2);
454 union {
455 uint32_t v;
456 uint8_t c[4];
457 } u;
458 u.v = cpu_to_be32(env->regbase[reg]);
459 *buf++ = u.c[off & 3];
460 addr++;
461 len--;
462 off++;
463 }
464
465 if (len == 0) {
466 return 0;
467 }
468 }
469 }
470 return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
471 }
472
473 #else /* !TARGET_SPARC64 */
474
475 /* 41 bit physical address space */
ultrasparc_truncate_physical(uint64_t x)476 static inline hwaddr ultrasparc_truncate_physical(uint64_t x)
477 {
478 return x & 0x1ffffffffffULL;
479 }
480
481 /*
482 * UltraSparc IIi I/DMMUs
483 */
484
485 /* Returns true if TTE tag is valid and matches virtual address value
486 in context requires virtual address mask value calculated from TTE
487 entry size */
ultrasparc_tag_match(SparcTLBEntry * tlb,uint64_t address,uint64_t context,hwaddr * physical)488 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
489 uint64_t address, uint64_t context,
490 hwaddr *physical)
491 {
492 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte));
493
494 /* valid, context match, virtual address match? */
495 if (TTE_IS_VALID(tlb->tte) &&
496 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context))
497 && compare_masked(address, tlb->tag, mask)) {
498 /* decode physical address */
499 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
500 return 1;
501 }
502
503 return 0;
504 }
505
build_sfsr(CPUSPARCState * env,int mmu_idx,int rw)506 static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw)
507 {
508 uint64_t sfsr = SFSR_VALID_BIT;
509
510 switch (mmu_idx) {
511 case MMU_PHYS_IDX:
512 sfsr |= SFSR_CT_NOTRANS;
513 break;
514 case MMU_USER_IDX:
515 case MMU_KERNEL_IDX:
516 sfsr |= SFSR_CT_PRIMARY;
517 break;
518 case MMU_USER_SECONDARY_IDX:
519 case MMU_KERNEL_SECONDARY_IDX:
520 sfsr |= SFSR_CT_SECONDARY;
521 break;
522 case MMU_NUCLEUS_IDX:
523 sfsr |= SFSR_CT_NUCLEUS;
524 break;
525 default:
526 g_assert_not_reached();
527 }
528
529 if (rw == 1) {
530 sfsr |= SFSR_WRITE_BIT;
531 } else if (rw == 4) {
532 sfsr |= SFSR_NF_BIT;
533 }
534
535 if (env->pstate & PS_PRIV) {
536 sfsr |= SFSR_PR_BIT;
537 }
538
539 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
540 sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */
541 }
542
543 /* FIXME: ASI field in SFSR must be set */
544
545 return sfsr;
546 }
547
get_physical_address_data(CPUSPARCState * env,CPUTLBEntryFull * full,target_ulong address,int rw,int mmu_idx)548 static int get_physical_address_data(CPUSPARCState *env, CPUTLBEntryFull *full,
549 target_ulong address, int rw, int mmu_idx)
550 {
551 CPUState *cs = env_cpu(env);
552 unsigned int i;
553 uint64_t sfsr;
554 uint64_t context;
555 bool is_user = false;
556
557 sfsr = build_sfsr(env, mmu_idx, rw);
558
559 switch (mmu_idx) {
560 case MMU_PHYS_IDX:
561 g_assert_not_reached();
562 case MMU_USER_IDX:
563 is_user = true;
564 /* fallthru */
565 case MMU_KERNEL_IDX:
566 context = env->dmmu.mmu_primary_context & 0x1fff;
567 break;
568 case MMU_USER_SECONDARY_IDX:
569 is_user = true;
570 /* fallthru */
571 case MMU_KERNEL_SECONDARY_IDX:
572 context = env->dmmu.mmu_secondary_context & 0x1fff;
573 break;
574 default:
575 context = 0;
576 break;
577 }
578
579 for (i = 0; i < 64; i++) {
580 /* ctx match, vaddr match, valid? */
581 if (ultrasparc_tag_match(&env->dtlb[i], address, context,
582 &full->phys_addr)) {
583 int do_fault = 0;
584
585 if (TTE_IS_IE(env->dtlb[i].tte)) {
586 full->tlb_fill_flags |= TLB_BSWAP;
587 }
588
589 /* access ok? */
590 /* multiple bits in SFSR.FT may be set on TT_DFAULT */
591 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
592 do_fault = 1;
593 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
594 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl);
595 }
596 if (rw == 4) {
597 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
598 do_fault = 1;
599 sfsr |= SFSR_FT_NF_E_BIT;
600 }
601 } else {
602 if (TTE_IS_NFO(env->dtlb[i].tte)) {
603 do_fault = 1;
604 sfsr |= SFSR_FT_NFO_BIT;
605 }
606 }
607
608 if (do_fault) {
609 /* faults above are reported with TT_DFAULT. */
610 cs->exception_index = TT_DFAULT;
611 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
612 do_fault = 1;
613 cs->exception_index = TT_DPROT;
614
615 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl);
616 }
617
618 if (!do_fault) {
619 full->prot = PAGE_READ;
620 if (TTE_IS_W_OK(env->dtlb[i].tte)) {
621 full->prot |= PAGE_WRITE;
622 }
623
624 TTE_SET_USED(env->dtlb[i].tte);
625
626 return 0;
627 }
628
629 env->dmmu.sfsr = sfsr;
630 env->dmmu.sfar = address; /* Fault address register */
631 env->dmmu.tag_access = (address & ~0x1fffULL) | context;
632 return 1;
633 }
634 }
635
636 trace_mmu_helper_dmiss(address, context);
637
638 /*
639 * On MMU misses:
640 * - UltraSPARC IIi: SFSR and SFAR unmodified
641 * - JPS1: SFAR updated and some fields of SFSR updated
642 */
643 env->dmmu.tag_access = (address & ~0x1fffULL) | context;
644 cs->exception_index = TT_DMISS;
645 return 1;
646 }
647
get_physical_address_code(CPUSPARCState * env,CPUTLBEntryFull * full,target_ulong address,int mmu_idx)648 static int get_physical_address_code(CPUSPARCState *env, CPUTLBEntryFull *full,
649 target_ulong address, int mmu_idx)
650 {
651 CPUState *cs = env_cpu(env);
652 unsigned int i;
653 uint64_t context;
654 bool is_user = false;
655
656 switch (mmu_idx) {
657 case MMU_PHYS_IDX:
658 case MMU_USER_SECONDARY_IDX:
659 case MMU_KERNEL_SECONDARY_IDX:
660 g_assert_not_reached();
661 case MMU_USER_IDX:
662 is_user = true;
663 /* fallthru */
664 case MMU_KERNEL_IDX:
665 context = env->dmmu.mmu_primary_context & 0x1fff;
666 break;
667 default:
668 context = 0;
669 break;
670 }
671
672 if (env->tl == 0) {
673 /* PRIMARY context */
674 context = env->dmmu.mmu_primary_context & 0x1fff;
675 } else {
676 /* NUCLEUS context */
677 context = 0;
678 }
679
680 for (i = 0; i < 64; i++) {
681 /* ctx match, vaddr match, valid? */
682 if (ultrasparc_tag_match(&env->itlb[i],
683 address, context, &full->phys_addr)) {
684 /* access ok? */
685 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
686 /* Fault status register */
687 if (env->immu.sfsr & SFSR_VALID_BIT) {
688 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
689 another fault) */
690 } else {
691 env->immu.sfsr = 0;
692 }
693 if (env->pstate & PS_PRIV) {
694 env->immu.sfsr |= SFSR_PR_BIT;
695 }
696 if (env->tl > 0) {
697 env->immu.sfsr |= SFSR_CT_NUCLEUS;
698 }
699
700 /* FIXME: ASI field in SFSR must be set */
701 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
702 cs->exception_index = TT_TFAULT;
703
704 env->immu.tag_access = (address & ~0x1fffULL) | context;
705
706 trace_mmu_helper_tfault(address, context);
707
708 return 1;
709 }
710 full->prot = PAGE_EXEC;
711 TTE_SET_USED(env->itlb[i].tte);
712 return 0;
713 }
714 }
715
716 trace_mmu_helper_tmiss(address, context);
717
718 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
719 env->immu.tag_access = (address & ~0x1fffULL) | context;
720 cs->exception_index = TT_TMISS;
721 return 1;
722 }
723
get_physical_address(CPUSPARCState * env,CPUTLBEntryFull * full,int * access_index,target_ulong address,int rw,int mmu_idx)724 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full,
725 int *access_index, target_ulong address,
726 int rw, int mmu_idx)
727 {
728 /* ??? We treat everything as a small page, then explicitly flush
729 everything when an entry is evicted. */
730 full->lg_page_size = TARGET_PAGE_BITS;
731
732 /* safety net to catch wrong softmmu index use from dynamic code */
733 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) {
734 if (rw == 2) {
735 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx,
736 env->dmmu.mmu_primary_context,
737 env->dmmu.mmu_secondary_context,
738 address);
739 } else {
740 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx,
741 env->dmmu.mmu_primary_context,
742 env->dmmu.mmu_secondary_context,
743 address);
744 }
745 }
746
747 if (mmu_idx == MMU_PHYS_IDX) {
748 full->phys_addr = ultrasparc_truncate_physical(address);
749 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
750 return 0;
751 }
752
753 if (rw == 2) {
754 return get_physical_address_code(env, full, address, mmu_idx);
755 } else {
756 return get_physical_address_data(env, full, address, rw, mmu_idx);
757 }
758 }
759
760 /* Perform address translation */
sparc_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)761 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
762 MMUAccessType access_type, int mmu_idx,
763 bool probe, uintptr_t retaddr)
764 {
765 CPUSPARCState *env = cpu_env(cs);
766 CPUTLBEntryFull full = {};
767 int error_code = 0, access_index;
768
769 address &= TARGET_PAGE_MASK;
770 error_code = get_physical_address(env, &full, &access_index,
771 address, access_type, mmu_idx);
772 if (likely(error_code == 0)) {
773 trace_mmu_helper_mmu_fault(address, full.phys_addr, mmu_idx, env->tl,
774 env->dmmu.mmu_primary_context,
775 env->dmmu.mmu_secondary_context);
776 tlb_set_page_full(cs, mmu_idx, address, &full);
777 return true;
778 }
779 if (probe) {
780 return false;
781 }
782 cpu_loop_exit_restore(cs, retaddr);
783 }
784
dump_mmu(CPUSPARCState * env)785 void dump_mmu(CPUSPARCState *env)
786 {
787 unsigned int i;
788 const char *mask;
789
790 qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %"
791 PRId64 "\n",
792 env->dmmu.mmu_primary_context,
793 env->dmmu.mmu_secondary_context);
794 qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64
795 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target);
796 if ((env->lsu & DMMU_E) == 0) {
797 qemu_printf("DMMU disabled\n");
798 } else {
799 qemu_printf("DMMU dump\n");
800 for (i = 0; i < 64; i++) {
801 switch (TTE_PGSIZE(env->dtlb[i].tte)) {
802 default:
803 case 0x0:
804 mask = " 8k";
805 break;
806 case 0x1:
807 mask = " 64k";
808 break;
809 case 0x2:
810 mask = "512k";
811 break;
812 case 0x3:
813 mask = " 4M";
814 break;
815 }
816 if (TTE_IS_VALID(env->dtlb[i].tte)) {
817 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
818 ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n",
819 i,
820 env->dtlb[i].tag & (uint64_t)~0x1fffULL,
821 TTE_PA(env->dtlb[i].tte),
822 mask,
823 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user",
824 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
825 TTE_IS_LOCKED(env->dtlb[i].tte) ?
826 "locked" : "unlocked",
827 TTE_IS_IE(env->dtlb[i].tte) ?
828 "yes" : "no",
829 env->dtlb[i].tag & (uint64_t)0x1fffULL,
830 TTE_IS_GLOBAL(env->dtlb[i].tte) ?
831 "global" : "local");
832 }
833 }
834 }
835 if ((env->lsu & IMMU_E) == 0) {
836 qemu_printf("IMMU disabled\n");
837 } else {
838 qemu_printf("IMMU dump\n");
839 for (i = 0; i < 64; i++) {
840 switch (TTE_PGSIZE(env->itlb[i].tte)) {
841 default:
842 case 0x0:
843 mask = " 8k";
844 break;
845 case 0x1:
846 mask = " 64k";
847 break;
848 case 0x2:
849 mask = "512k";
850 break;
851 case 0x3:
852 mask = " 4M";
853 break;
854 }
855 if (TTE_IS_VALID(env->itlb[i].tte)) {
856 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
857 ", %s, %s, %s, ctx %" PRId64 " %s\n",
858 i,
859 env->itlb[i].tag & (uint64_t)~0x1fffULL,
860 TTE_PA(env->itlb[i].tte),
861 mask,
862 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user",
863 TTE_IS_LOCKED(env->itlb[i].tte) ?
864 "locked" : "unlocked",
865 env->itlb[i].tag & (uint64_t)0x1fffULL,
866 TTE_IS_GLOBAL(env->itlb[i].tte) ?
867 "global" : "local");
868 }
869 }
870 }
871 }
872
873 #endif /* TARGET_SPARC64 */
874
cpu_sparc_get_phys_page(CPUSPARCState * env,hwaddr * phys,target_ulong addr,int rw,int mmu_idx)875 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys,
876 target_ulong addr, int rw, int mmu_idx)
877 {
878 CPUTLBEntryFull full = {};
879 int access_index, ret;
880
881 ret = get_physical_address(env, &full, &access_index, addr, rw, mmu_idx);
882 if (ret == 0) {
883 *phys = full.phys_addr;
884 }
885 return ret;
886 }
887
888 #if defined(TARGET_SPARC64)
cpu_get_phys_page_nofault(CPUSPARCState * env,target_ulong addr,int mmu_idx)889 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
890 int mmu_idx)
891 {
892 hwaddr phys_addr;
893
894 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) {
895 return -1;
896 }
897 return phys_addr;
898 }
899 #endif
900
sparc_cpu_get_phys_page_debug(CPUState * cs,vaddr addr)901 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
902 {
903 CPUSPARCState *env = cpu_env(cs);
904 hwaddr phys_addr;
905 int mmu_idx = cpu_mmu_index(cs, false);
906
907 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
908 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
909 return -1;
910 }
911 }
912 return phys_addr;
913 }
914
sparc_cpu_do_unaligned_access(CPUState * cs,vaddr addr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)915 G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
916 MMUAccessType access_type,
917 int mmu_idx,
918 uintptr_t retaddr)
919 {
920 CPUSPARCState *env = cpu_env(cs);
921
922 #ifdef TARGET_SPARC64
923 env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type);
924 env->dmmu.sfar = addr;
925 #else
926 env->mmuregs[4] = addr;
927 #endif
928
929 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
930 }
931