1 /*
2 * Helpers for loads and stores
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/range.h"
23 #include "cpu.h"
24 #include "tcg/tcg.h"
25 #include "exec/helper-proto.h"
26 #include "exec/cputlb.h"
27 #include "exec/page-protection.h"
28 #include "exec/target_page.h"
29 #include "accel/tcg/cpu-ldst.h"
30 #include "system/memory.h"
31 #ifdef CONFIG_USER_ONLY
32 #include "user/page-protection.h"
33 #endif
34 #include "asi.h"
35
36 //#define DEBUG_MMU
37 //#define DEBUG_MXCC
38 //#define DEBUG_UNASSIGNED
39 //#define DEBUG_ASI
40 //#define DEBUG_CACHE_CONTROL
41
42 #ifdef DEBUG_MMU
43 #define DPRINTF_MMU(fmt, ...) \
44 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
45 #else
46 #define DPRINTF_MMU(fmt, ...) do {} while (0)
47 #endif
48
49 #ifdef DEBUG_MXCC
50 #define DPRINTF_MXCC(fmt, ...) \
51 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
52 #else
53 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
54 #endif
55
56 #ifdef DEBUG_ASI
57 #define DPRINTF_ASI(fmt, ...) \
58 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
59 #endif
60
61 #ifdef DEBUG_CACHE_CONTROL
62 #define DPRINTF_CACHE_CONTROL(fmt, ...) \
63 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
64 #else
65 #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
66 #endif
67
68 #ifdef TARGET_SPARC64
69 #ifndef TARGET_ABI32
70 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
71 #else
72 #define AM_CHECK(env1) (1)
73 #endif
74 #endif
75
76 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
77 /* Calculates TSB pointer value for fault page size
78 * UltraSPARC IIi has fixed sizes (8k or 64k) for the page pointers
79 * UA2005 holds the page size configuration in mmu_ctx registers */
ultrasparc_tsb_pointer(CPUSPARCState * env,const SparcV9MMU * mmu,const int idx)80 static uint64_t ultrasparc_tsb_pointer(CPUSPARCState *env,
81 const SparcV9MMU *mmu, const int idx)
82 {
83 uint64_t tsb_register;
84 int page_size;
85 if (cpu_has_hypervisor(env)) {
86 int tsb_index = 0;
87 int ctx = mmu->tag_access & 0x1fffULL;
88 uint64_t ctx_register = mmu->sun4v_ctx_config[ctx ? 1 : 0];
89 tsb_index = idx;
90 tsb_index |= ctx ? 2 : 0;
91 page_size = idx ? ctx_register >> 8 : ctx_register;
92 page_size &= 7;
93 tsb_register = mmu->sun4v_tsb_pointers[tsb_index];
94 } else {
95 page_size = idx;
96 tsb_register = mmu->tsb;
97 }
98 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
99 int tsb_size = tsb_register & 0xf;
100
101 uint64_t tsb_base_mask = (~0x1fffULL) << tsb_size;
102
103 /* move va bits to correct position,
104 * the context bits will be masked out later */
105 uint64_t va = mmu->tag_access >> (3 * page_size + 9);
106
107 /* calculate tsb_base mask and adjust va if split is in use */
108 if (tsb_split) {
109 if (idx == 0) {
110 va &= ~(1ULL << (13 + tsb_size));
111 } else {
112 va |= (1ULL << (13 + tsb_size));
113 }
114 tsb_base_mask <<= 1;
115 }
116
117 return ((tsb_register & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
118 }
119
120 /* Calculates tag target register value by reordering bits
121 in tag access register */
ultrasparc_tag_target(uint64_t tag_access_register)122 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
123 {
124 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
125 }
126
replace_tlb_entry(SparcTLBEntry * tlb,uint64_t tlb_tag,uint64_t tlb_tte,CPUSPARCState * env)127 static void replace_tlb_entry(SparcTLBEntry *tlb,
128 uint64_t tlb_tag, uint64_t tlb_tte,
129 CPUSPARCState *env)
130 {
131 target_ulong mask, size, va, offset;
132
133 /* flush page range if translation is valid */
134 if (TTE_IS_VALID(tlb->tte)) {
135 CPUState *cs = env_cpu(env);
136
137 size = 8192ULL << 3 * TTE_PGSIZE(tlb->tte);
138 mask = 1ULL + ~size;
139
140 va = tlb->tag & mask;
141
142 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
143 tlb_flush_page(cs, va + offset);
144 }
145 }
146
147 tlb->tag = tlb_tag;
148 tlb->tte = tlb_tte;
149 }
150
demap_tlb(SparcTLBEntry * tlb,target_ulong demap_addr,const char * strmmu,CPUSPARCState * env1)151 static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
152 const char *strmmu, CPUSPARCState *env1)
153 {
154 unsigned int i;
155 target_ulong mask;
156 uint64_t context;
157
158 int is_demap_context = (demap_addr >> 6) & 1;
159
160 /* demap context */
161 switch ((demap_addr >> 4) & 3) {
162 case 0: /* primary */
163 context = env1->dmmu.mmu_primary_context;
164 break;
165 case 1: /* secondary */
166 context = env1->dmmu.mmu_secondary_context;
167 break;
168 case 2: /* nucleus */
169 context = 0;
170 break;
171 case 3: /* reserved */
172 default:
173 return;
174 }
175
176 for (i = 0; i < 64; i++) {
177 if (TTE_IS_VALID(tlb[i].tte)) {
178
179 if (is_demap_context) {
180 /* will remove non-global entries matching context value */
181 if (TTE_IS_GLOBAL(tlb[i].tte) ||
182 !tlb_compare_context(&tlb[i], context)) {
183 continue;
184 }
185 } else {
186 /* demap page
187 will remove any entry matching VA */
188 mask = 0xffffffffffffe000ULL;
189 mask <<= 3 * ((tlb[i].tte >> 61) & 3);
190
191 if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
192 continue;
193 }
194
195 /* entry should be global or matching context value */
196 if (!TTE_IS_GLOBAL(tlb[i].tte) &&
197 !tlb_compare_context(&tlb[i], context)) {
198 continue;
199 }
200 }
201
202 replace_tlb_entry(&tlb[i], 0, 0, env1);
203 #ifdef DEBUG_MMU
204 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
205 dump_mmu(env1);
206 #endif
207 }
208 }
209 }
210
sun4v_tte_to_sun4u(CPUSPARCState * env,uint64_t tag,uint64_t sun4v_tte)211 static uint64_t sun4v_tte_to_sun4u(CPUSPARCState *env, uint64_t tag,
212 uint64_t sun4v_tte)
213 {
214 uint64_t sun4u_tte;
215 if (!(cpu_has_hypervisor(env) && (tag & TLB_UST1_IS_SUN4V_BIT))) {
216 /* is already in the sun4u format */
217 return sun4v_tte;
218 }
219 sun4u_tte = TTE_PA(sun4v_tte) | (sun4v_tte & TTE_VALID_BIT);
220 sun4u_tte |= (sun4v_tte & 3ULL) << 61; /* TTE_PGSIZE */
221 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_NFO_BIT_UA2005, TTE_NFO_BIT);
222 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_USED_BIT_UA2005, TTE_USED_BIT);
223 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_W_OK_BIT_UA2005, TTE_W_OK_BIT);
224 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_SIDEEFFECT_BIT_UA2005,
225 TTE_SIDEEFFECT_BIT);
226 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_PRIV_BIT_UA2005, TTE_PRIV_BIT);
227 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_LOCKED_BIT_UA2005, TTE_LOCKED_BIT);
228 return sun4u_tte;
229 }
230
replace_tlb_1bit_lru(SparcTLBEntry * tlb,uint64_t tlb_tag,uint64_t tlb_tte,const char * strmmu,CPUSPARCState * env1,uint64_t addr)231 static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
232 uint64_t tlb_tag, uint64_t tlb_tte,
233 const char *strmmu, CPUSPARCState *env1,
234 uint64_t addr)
235 {
236 unsigned int i, replace_used;
237
238 tlb_tte = sun4v_tte_to_sun4u(env1, addr, tlb_tte);
239 if (cpu_has_hypervisor(env1)) {
240 uint64_t new_vaddr = tlb_tag & ~0x1fffULL;
241 uint64_t new_size = 8192ULL << 3 * TTE_PGSIZE(tlb_tte);
242 uint32_t new_ctx = tlb_tag & 0x1fffU;
243 for (i = 0; i < 64; i++) {
244 uint32_t ctx = tlb[i].tag & 0x1fffU;
245 /* check if new mapping overlaps an existing one */
246 if (new_ctx == ctx) {
247 uint64_t vaddr = tlb[i].tag & ~0x1fffULL;
248 uint64_t size = 8192ULL << 3 * TTE_PGSIZE(tlb[i].tte);
249 if (ranges_overlap(new_vaddr, new_size, vaddr, size)) {
250 DPRINTF_MMU("auto demap entry [%d] %lx->%lx\n", i, vaddr,
251 new_vaddr);
252 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
253 return;
254 }
255 }
256
257 }
258 }
259 /* Try replacing invalid entry */
260 for (i = 0; i < 64; i++) {
261 if (!TTE_IS_VALID(tlb[i].tte)) {
262 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
263 #ifdef DEBUG_MMU
264 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
265 dump_mmu(env1);
266 #endif
267 return;
268 }
269 }
270
271 /* All entries are valid, try replacing unlocked entry */
272
273 for (replace_used = 0; replace_used < 2; ++replace_used) {
274
275 /* Used entries are not replaced on first pass */
276
277 for (i = 0; i < 64; i++) {
278 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
279
280 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
281 #ifdef DEBUG_MMU
282 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
283 strmmu, (replace_used ? "used" : "unused"), i);
284 dump_mmu(env1);
285 #endif
286 return;
287 }
288 }
289
290 /* Now reset used bit and search for unused entries again */
291
292 for (i = 0; i < 64; i++) {
293 TTE_SET_UNUSED(tlb[i].tte);
294 }
295 }
296
297 #ifdef DEBUG_MMU
298 DPRINTF_MMU("%s lru replacement: no free entries available, "
299 "replacing the last one\n", strmmu);
300 #endif
301 /* corner case: the last entry is replaced anyway */
302 replace_tlb_entry(&tlb[63], tlb_tag, tlb_tte, env1);
303 }
304
305 #endif
306
307 #ifdef TARGET_SPARC64
308 /* returns true if access using this ASI is to have address translated by MMU
309 otherwise access is to raw physical address */
310 /* TODO: check sparc32 bits */
is_translating_asi(int asi)311 static inline int is_translating_asi(int asi)
312 {
313 /* Ultrasparc IIi translating asi
314 - note this list is defined by cpu implementation
315 */
316 switch (asi) {
317 case 0x04 ... 0x11:
318 case 0x16 ... 0x19:
319 case 0x1E ... 0x1F:
320 case 0x24 ... 0x2C:
321 case 0x70 ... 0x73:
322 case 0x78 ... 0x79:
323 case 0x80 ... 0xFF:
324 return 1;
325
326 default:
327 return 0;
328 }
329 }
330
address_mask(CPUSPARCState * env1,target_ulong addr)331 static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr)
332 {
333 if (AM_CHECK(env1)) {
334 addr &= 0xffffffffULL;
335 }
336 return addr;
337 }
338
asi_address_mask(CPUSPARCState * env,int asi,target_ulong addr)339 static inline target_ulong asi_address_mask(CPUSPARCState *env,
340 int asi, target_ulong addr)
341 {
342 if (is_translating_asi(asi)) {
343 addr = address_mask(env, addr);
344 }
345 return addr;
346 }
347
348 #ifndef CONFIG_USER_ONLY
do_check_asi(CPUSPARCState * env,int asi,uintptr_t ra)349 static inline void do_check_asi(CPUSPARCState *env, int asi, uintptr_t ra)
350 {
351 /* ASIs >= 0x80 are user mode.
352 * ASIs >= 0x30 are hyper mode (or super if hyper is not available).
353 * ASIs <= 0x2f are super mode.
354 */
355 if (asi < 0x80
356 && !cpu_hypervisor_mode(env)
357 && (!cpu_supervisor_mode(env)
358 || (asi >= 0x30 && cpu_has_hypervisor(env)))) {
359 cpu_raise_exception_ra(env, TT_PRIV_ACT, ra);
360 }
361 }
362 #endif /* !CONFIG_USER_ONLY */
363 #endif
364
365 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
do_check_align(CPUSPARCState * env,target_ulong addr,uint32_t align,uintptr_t ra)366 static void do_check_align(CPUSPARCState *env, target_ulong addr,
367 uint32_t align, uintptr_t ra)
368 {
369 if (addr & align) {
370 cpu_raise_exception_ra(env, TT_UNALIGNED, ra);
371 }
372 }
373 #endif
374
375 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
376 defined(DEBUG_MXCC)
dump_mxcc(CPUSPARCState * env)377 static void dump_mxcc(CPUSPARCState *env)
378 {
379 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
380 "\n",
381 env->mxccdata[0], env->mxccdata[1],
382 env->mxccdata[2], env->mxccdata[3]);
383 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
384 "\n"
385 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
386 "\n",
387 env->mxccregs[0], env->mxccregs[1],
388 env->mxccregs[2], env->mxccregs[3],
389 env->mxccregs[4], env->mxccregs[5],
390 env->mxccregs[6], env->mxccregs[7]);
391 }
392 #endif
393
394 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
395 && defined(DEBUG_ASI)
dump_asi(const char * txt,target_ulong addr,int asi,int size,uint64_t r1)396 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
397 uint64_t r1)
398 {
399 switch (size) {
400 case 1:
401 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
402 addr, asi, r1 & 0xff);
403 break;
404 case 2:
405 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
406 addr, asi, r1 & 0xffff);
407 break;
408 case 4:
409 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
410 addr, asi, r1 & 0xffffffff);
411 break;
412 case 8:
413 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
414 addr, asi, r1);
415 break;
416 }
417 }
418 #endif
419
420 #ifndef CONFIG_USER_ONLY
421 #ifndef TARGET_SPARC64
sparc_raise_mmu_fault(CPUState * cs,hwaddr addr,bool is_write,bool is_exec,int is_asi,unsigned size,uintptr_t retaddr)422 static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr,
423 bool is_write, bool is_exec, int is_asi,
424 unsigned size, uintptr_t retaddr)
425 {
426 CPUSPARCState *env = cpu_env(cs);
427 int fault_type;
428
429 #ifdef DEBUG_UNASSIGNED
430 if (is_asi) {
431 printf("Unassigned mem %s access of %d byte%s to " HWADDR_FMT_plx
432 " asi 0x%02x from " TARGET_FMT_lx "\n",
433 is_exec ? "exec" : is_write ? "write" : "read", size,
434 size == 1 ? "" : "s", addr, is_asi, env->pc);
435 } else {
436 printf("Unassigned mem %s access of %d byte%s to " HWADDR_FMT_plx
437 " from " TARGET_FMT_lx "\n",
438 is_exec ? "exec" : is_write ? "write" : "read", size,
439 size == 1 ? "" : "s", addr, env->pc);
440 }
441 #endif
442 /* Don't overwrite translation and access faults */
443 fault_type = (env->mmuregs[3] & 0x1c) >> 2;
444 if ((fault_type > 4) || (fault_type == 0)) {
445 env->mmuregs[3] = 0; /* Fault status register */
446 if (is_asi) {
447 env->mmuregs[3] |= 1 << 16;
448 }
449 if (env->psrs) {
450 env->mmuregs[3] |= 1 << 5;
451 }
452 if (is_exec) {
453 env->mmuregs[3] |= 1 << 6;
454 }
455 if (is_write) {
456 env->mmuregs[3] |= 1 << 7;
457 }
458 env->mmuregs[3] |= (5 << 2) | 2;
459 /* SuperSPARC will never place instruction fault addresses in the FAR */
460 if (!is_exec) {
461 env->mmuregs[4] = addr; /* Fault address register */
462 }
463 }
464 /* overflow (same type fault was not read before another fault) */
465 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
466 env->mmuregs[3] |= 1;
467 }
468
469 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
470 int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS;
471 cpu_raise_exception_ra(env, tt, retaddr);
472 }
473
474 /*
475 * flush neverland mappings created during no-fault mode,
476 * so the sequential MMU faults report proper fault types
477 */
478 if (env->mmuregs[0] & MMU_NF) {
479 tlb_flush(cs);
480 }
481 }
482 #else
sparc_raise_mmu_fault(CPUState * cs,hwaddr addr,bool is_write,bool is_exec,int is_asi,unsigned size,uintptr_t retaddr)483 static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr,
484 bool is_write, bool is_exec, int is_asi,
485 unsigned size, uintptr_t retaddr)
486 {
487 CPUSPARCState *env = cpu_env(cs);
488
489 #ifdef DEBUG_UNASSIGNED
490 printf("Unassigned mem access to " HWADDR_FMT_plx " from " TARGET_FMT_lx
491 "\n", addr, env->pc);
492 #endif
493
494 if (is_exec) { /* XXX has_hypervisor */
495 if (env->lsu & (IMMU_E)) {
496 cpu_raise_exception_ra(env, TT_CODE_ACCESS, retaddr);
497 } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
498 cpu_raise_exception_ra(env, TT_INSN_REAL_TRANSLATION_MISS, retaddr);
499 }
500 } else {
501 if (env->lsu & (DMMU_E)) {
502 cpu_raise_exception_ra(env, TT_DATA_ACCESS, retaddr);
503 } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
504 cpu_raise_exception_ra(env, TT_DATA_REAL_TRANSLATION_MISS, retaddr);
505 }
506 }
507 }
508 #endif
509 #endif
510
511 #ifndef TARGET_SPARC64
512 #ifndef CONFIG_USER_ONLY
513
514
515 /* Leon3 cache control */
516
leon3_cache_control_st(CPUSPARCState * env,target_ulong addr,uint64_t val,int size)517 static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr,
518 uint64_t val, int size)
519 {
520 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
521 addr, val, size);
522
523 if (size != 4) {
524 DPRINTF_CACHE_CONTROL("32bits only\n");
525 return;
526 }
527
528 switch (addr) {
529 case 0x00: /* Cache control */
530
531 /* These values must always be read as zeros */
532 val &= ~CACHE_CTRL_FD;
533 val &= ~CACHE_CTRL_FI;
534 val &= ~CACHE_CTRL_IB;
535 val &= ~CACHE_CTRL_IP;
536 val &= ~CACHE_CTRL_DP;
537
538 env->cache_control = val;
539 break;
540 case 0x04: /* Instruction cache configuration */
541 case 0x08: /* Data cache configuration */
542 /* Read Only */
543 break;
544 default:
545 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
546 break;
547 };
548 }
549
leon3_cache_control_ld(CPUSPARCState * env,target_ulong addr,int size)550 static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
551 int size)
552 {
553 uint64_t ret = 0;
554
555 if (size != 4) {
556 DPRINTF_CACHE_CONTROL("32bits only\n");
557 return 0;
558 }
559
560 switch (addr) {
561 case 0x00: /* Cache control */
562 ret = env->cache_control;
563 break;
564
565 /* Configuration registers are read and only always keep those
566 predefined values */
567
568 case 0x04: /* Instruction cache configuration */
569 ret = 0x10220000;
570 break;
571 case 0x08: /* Data cache configuration */
572 ret = 0x18220000;
573 break;
574 default:
575 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
576 break;
577 };
578 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
579 addr, ret, size);
580 return ret;
581 }
582
helper_ld_asi(CPUSPARCState * env,target_ulong addr,int asi,uint32_t memop)583 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
584 int asi, uint32_t memop)
585 {
586 int size = 1 << (memop & MO_SIZE);
587 int sign = memop & MO_SIGN;
588 CPUState *cs = env_cpu(env);
589 uint64_t ret = 0;
590 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
591 uint32_t last_addr = addr;
592 #endif
593
594 do_check_align(env, addr, size - 1, GETPC());
595 switch (asi) {
596 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
597 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
598 switch (addr) {
599 case 0x00: /* Leon3 Cache Control */
600 case 0x08: /* Leon3 Instruction Cache config */
601 case 0x0C: /* Leon3 Date Cache config */
602 if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
603 ret = leon3_cache_control_ld(env, addr, size);
604 } else {
605 qemu_log_mask(LOG_UNIMP, "0x" TARGET_FMT_lx ": unimplemented"
606 " address, size: %d\n", addr, size);
607 }
608 break;
609 case 0x01c00a00: /* MXCC control register */
610 if (size == 8) {
611 ret = env->mxccregs[3];
612 } else {
613 qemu_log_mask(LOG_UNIMP,
614 "%08x: unimplemented access size: %d\n", addr,
615 size);
616 }
617 break;
618 case 0x01c00a04: /* MXCC control register */
619 if (size == 4) {
620 ret = env->mxccregs[3];
621 } else {
622 qemu_log_mask(LOG_UNIMP,
623 "%08x: unimplemented access size: %d\n", addr,
624 size);
625 }
626 break;
627 case 0x01c00c00: /* Module reset register */
628 if (size == 8) {
629 ret = env->mxccregs[5];
630 /* should we do something here? */
631 } else {
632 qemu_log_mask(LOG_UNIMP,
633 "%08x: unimplemented access size: %d\n", addr,
634 size);
635 }
636 break;
637 case 0x01c00f00: /* MBus port address register */
638 if (size == 8) {
639 ret = env->mxccregs[7];
640 } else {
641 qemu_log_mask(LOG_UNIMP,
642 "%08x: unimplemented access size: %d\n", addr,
643 size);
644 }
645 break;
646 default:
647 qemu_log_mask(LOG_UNIMP,
648 "%08x: unimplemented address, size: %d\n", addr,
649 size);
650 break;
651 }
652 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
653 "addr = %08x -> ret = %" PRIx64 ","
654 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
655 #ifdef DEBUG_MXCC
656 dump_mxcc(env);
657 #endif
658 break;
659 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */
660 case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */
661 {
662 int mmulev;
663
664 mmulev = (addr >> 8) & 15;
665 if (mmulev > 4) {
666 ret = 0;
667 } else {
668 ret = mmu_probe(env, addr, mmulev);
669 }
670 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
671 addr, mmulev, ret);
672 }
673 break;
674 case ASI_M_MMUREGS: /* SuperSparc MMU regs */
675 case ASI_LEON_MMUREGS: /* LEON3 MMU regs */
676 {
677 int reg = (addr >> 8) & 0x1f;
678
679 ret = env->mmuregs[reg];
680 if (reg == 3) { /* Fault status cleared on read */
681 env->mmuregs[3] = 0;
682 } else if (reg == 0x13) { /* Fault status read */
683 ret = env->mmuregs[3];
684 } else if (reg == 0x14) { /* Fault address read */
685 ret = env->mmuregs[4];
686 }
687 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
688 }
689 break;
690 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
691 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
692 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
693 break;
694 case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */
695 case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */
696 case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */
697 case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */
698 break;
699 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
700 {
701 MemTxResult result;
702 hwaddr access_addr = (hwaddr)addr | ((hwaddr)(asi & 0xf) << 32);
703
704 switch (size) {
705 case 1:
706 ret = address_space_ldub(cs->as, access_addr,
707 MEMTXATTRS_UNSPECIFIED, &result);
708 break;
709 case 2:
710 ret = address_space_lduw(cs->as, access_addr,
711 MEMTXATTRS_UNSPECIFIED, &result);
712 break;
713 default:
714 case 4:
715 ret = address_space_ldl(cs->as, access_addr,
716 MEMTXATTRS_UNSPECIFIED, &result);
717 break;
718 case 8:
719 ret = address_space_ldq(cs->as, access_addr,
720 MEMTXATTRS_UNSPECIFIED, &result);
721 break;
722 }
723
724 if (result != MEMTX_OK) {
725 sparc_raise_mmu_fault(cs, access_addr, false, false, false,
726 size, GETPC());
727 }
728 break;
729 }
730 case 0x30: /* Turbosparc secondary cache diagnostic */
731 case 0x31: /* Turbosparc RAM snoop */
732 case 0x32: /* Turbosparc page table descriptor diagnostic */
733 case 0x39: /* data cache diagnostic register */
734 ret = 0;
735 break;
736 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
737 {
738 int reg = (addr >> 8) & 3;
739
740 switch (reg) {
741 case 0: /* Breakpoint Value (Addr) */
742 ret = env->mmubpregs[reg];
743 break;
744 case 1: /* Breakpoint Mask */
745 ret = env->mmubpregs[reg];
746 break;
747 case 2: /* Breakpoint Control */
748 ret = env->mmubpregs[reg];
749 break;
750 case 3: /* Breakpoint Status */
751 ret = env->mmubpregs[reg];
752 env->mmubpregs[reg] = 0ULL;
753 break;
754 }
755 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
756 ret);
757 }
758 break;
759 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
760 ret = env->mmubpctrv;
761 break;
762 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
763 ret = env->mmubpctrc;
764 break;
765 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
766 ret = env->mmubpctrs;
767 break;
768 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
769 ret = env->mmubpaction;
770 break;
771 default:
772 sparc_raise_mmu_fault(cs, addr, false, false, asi, size, GETPC());
773 ret = 0;
774 break;
775
776 case ASI_USERDATA: /* User data access */
777 case ASI_KERNELDATA: /* Supervisor data access */
778 case ASI_USERTXT: /* User code access */
779 case ASI_KERNELTXT: /* Supervisor code access */
780 case ASI_P: /* Implicit primary context data access (v9 only?) */
781 case ASI_M_BYPASS: /* MMU passthrough */
782 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
783 /* These are always handled inline. */
784 g_assert_not_reached();
785 }
786 if (sign) {
787 switch (size) {
788 case 1:
789 ret = (int8_t) ret;
790 break;
791 case 2:
792 ret = (int16_t) ret;
793 break;
794 case 4:
795 ret = (int32_t) ret;
796 break;
797 default:
798 break;
799 }
800 }
801 #ifdef DEBUG_ASI
802 dump_asi("read ", last_addr, asi, size, ret);
803 #endif
804 return ret;
805 }
806
helper_st_asi(CPUSPARCState * env,target_ulong addr,uint64_t val,int asi,uint32_t memop)807 void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
808 int asi, uint32_t memop)
809 {
810 int size = 1 << (memop & MO_SIZE);
811 CPUState *cs = env_cpu(env);
812
813 do_check_align(env, addr, size - 1, GETPC());
814 switch (asi) {
815 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
816 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
817 switch (addr) {
818 case 0x00: /* Leon3 Cache Control */
819 case 0x08: /* Leon3 Instruction Cache config */
820 case 0x0C: /* Leon3 Date Cache config */
821 if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
822 leon3_cache_control_st(env, addr, val, size);
823 } else {
824 qemu_log_mask(LOG_UNIMP, "0x" TARGET_FMT_lx ": unimplemented"
825 " address, size: %d\n", addr, size);
826 }
827 break;
828
829 case 0x01c00000: /* MXCC stream data register 0 */
830 if (size == 8) {
831 env->mxccdata[0] = val;
832 } else {
833 qemu_log_mask(LOG_UNIMP,
834 "%08x: unimplemented access size: %d\n", addr,
835 size);
836 }
837 break;
838 case 0x01c00008: /* MXCC stream data register 1 */
839 if (size == 8) {
840 env->mxccdata[1] = val;
841 } else {
842 qemu_log_mask(LOG_UNIMP,
843 "%08x: unimplemented access size: %d\n", addr,
844 size);
845 }
846 break;
847 case 0x01c00010: /* MXCC stream data register 2 */
848 if (size == 8) {
849 env->mxccdata[2] = val;
850 } else {
851 qemu_log_mask(LOG_UNIMP,
852 "%08x: unimplemented access size: %d\n", addr,
853 size);
854 }
855 break;
856 case 0x01c00018: /* MXCC stream data register 3 */
857 if (size == 8) {
858 env->mxccdata[3] = val;
859 } else {
860 qemu_log_mask(LOG_UNIMP,
861 "%08x: unimplemented access size: %d\n", addr,
862 size);
863 }
864 break;
865 case 0x01c00100: /* MXCC stream source */
866 {
867 int i;
868
869 if (size == 8) {
870 env->mxccregs[0] = val;
871 } else {
872 qemu_log_mask(LOG_UNIMP,
873 "%08x: unimplemented access size: %d\n", addr,
874 size);
875 }
876
877 for (i = 0; i < 4; i++) {
878 MemTxResult result;
879 hwaddr access_addr = (env->mxccregs[0] & 0xffffffffULL) + 8 * i;
880
881 env->mxccdata[i] = address_space_ldq(cs->as,
882 access_addr,
883 MEMTXATTRS_UNSPECIFIED,
884 &result);
885 if (result != MEMTX_OK) {
886 /* TODO: investigate whether this is the right behaviour */
887 sparc_raise_mmu_fault(cs, access_addr, false, false,
888 false, size, GETPC());
889 }
890 }
891 break;
892 }
893 case 0x01c00200: /* MXCC stream destination */
894 {
895 int i;
896
897 if (size == 8) {
898 env->mxccregs[1] = val;
899 } else {
900 qemu_log_mask(LOG_UNIMP,
901 "%08x: unimplemented access size: %d\n", addr,
902 size);
903 }
904
905 for (i = 0; i < 4; i++) {
906 MemTxResult result;
907 hwaddr access_addr = (env->mxccregs[1] & 0xffffffffULL) + 8 * i;
908
909 address_space_stq(cs->as, access_addr, env->mxccdata[i],
910 MEMTXATTRS_UNSPECIFIED, &result);
911
912 if (result != MEMTX_OK) {
913 /* TODO: investigate whether this is the right behaviour */
914 sparc_raise_mmu_fault(cs, access_addr, true, false,
915 false, size, GETPC());
916 }
917 }
918 break;
919 }
920 case 0x01c00a00: /* MXCC control register */
921 if (size == 8) {
922 env->mxccregs[3] = val;
923 } else {
924 qemu_log_mask(LOG_UNIMP,
925 "%08x: unimplemented access size: %d\n", addr,
926 size);
927 }
928 break;
929 case 0x01c00a04: /* MXCC control register */
930 if (size == 4) {
931 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
932 | val;
933 } else {
934 qemu_log_mask(LOG_UNIMP,
935 "%08x: unimplemented access size: %d\n", addr,
936 size);
937 }
938 break;
939 case 0x01c00e00: /* MXCC error register */
940 /* writing a 1 bit clears the error */
941 if (size == 8) {
942 env->mxccregs[6] &= ~val;
943 } else {
944 qemu_log_mask(LOG_UNIMP,
945 "%08x: unimplemented access size: %d\n", addr,
946 size);
947 }
948 break;
949 case 0x01c00f00: /* MBus port address register */
950 if (size == 8) {
951 env->mxccregs[7] = val;
952 } else {
953 qemu_log_mask(LOG_UNIMP,
954 "%08x: unimplemented access size: %d\n", addr,
955 size);
956 }
957 break;
958 default:
959 qemu_log_mask(LOG_UNIMP,
960 "%08x: unimplemented address, size: %d\n", addr,
961 size);
962 break;
963 }
964 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
965 asi, size, addr, val);
966 #ifdef DEBUG_MXCC
967 dump_mxcc(env);
968 #endif
969 break;
970 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */
971 case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */
972 {
973 int mmulev;
974
975 mmulev = (addr >> 8) & 15;
976 DPRINTF_MMU("mmu flush level %d\n", mmulev);
977 switch (mmulev) {
978 case 0: /* flush page */
979 tlb_flush_page(cs, addr & 0xfffff000);
980 break;
981 case 1: /* flush segment (256k) */
982 case 2: /* flush region (16M) */
983 case 3: /* flush context (4G) */
984 case 4: /* flush entire */
985 tlb_flush(cs);
986 break;
987 default:
988 break;
989 }
990 #ifdef DEBUG_MMU
991 dump_mmu(env);
992 #endif
993 }
994 break;
995 case ASI_M_MMUREGS: /* write MMU regs */
996 case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */
997 {
998 int reg = (addr >> 8) & 0x1f;
999 uint32_t oldreg;
1000
1001 oldreg = env->mmuregs[reg];
1002 switch (reg) {
1003 case 0: /* Control Register */
1004 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1005 (val & 0x00ffffff);
1006 /* Mappings generated during no-fault mode
1007 are invalid in normal mode. */
1008 if ((oldreg ^ env->mmuregs[reg])
1009 & (MMU_NF | env->def.mmu_bm)) {
1010 tlb_flush(cs);
1011 }
1012 break;
1013 case 1: /* Context Table Pointer Register */
1014 env->mmuregs[reg] = val & env->def.mmu_ctpr_mask;
1015 break;
1016 case 2: /* Context Register */
1017 env->mmuregs[reg] = val & env->def.mmu_cxr_mask;
1018 if (oldreg != env->mmuregs[reg]) {
1019 /* we flush when the MMU context changes because
1020 QEMU has no MMU context support */
1021 tlb_flush(cs);
1022 }
1023 break;
1024 case 3: /* Synchronous Fault Status Register with Clear */
1025 case 4: /* Synchronous Fault Address Register */
1026 break;
1027 case 0x10: /* TLB Replacement Control Register */
1028 env->mmuregs[reg] = val & env->def.mmu_trcr_mask;
1029 break;
1030 case 0x13: /* Synchronous Fault Status Register with Read
1031 and Clear */
1032 env->mmuregs[3] = val & env->def.mmu_sfsr_mask;
1033 break;
1034 case 0x14: /* Synchronous Fault Address Register */
1035 env->mmuregs[4] = val;
1036 break;
1037 default:
1038 env->mmuregs[reg] = val;
1039 break;
1040 }
1041 if (oldreg != env->mmuregs[reg]) {
1042 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1043 reg, oldreg, env->mmuregs[reg]);
1044 }
1045 #ifdef DEBUG_MMU
1046 dump_mmu(env);
1047 #endif
1048 }
1049 break;
1050 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
1051 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
1052 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
1053 break;
1054 case ASI_M_TXTC_TAG: /* I-cache tag */
1055 case ASI_M_TXTC_DATA: /* I-cache data */
1056 case ASI_M_DATAC_TAG: /* D-cache tag */
1057 case ASI_M_DATAC_DATA: /* D-cache data */
1058 case ASI_M_FLUSH_PAGE: /* I/D-cache flush page */
1059 case ASI_M_FLUSH_SEG: /* I/D-cache flush segment */
1060 case ASI_M_FLUSH_REGION: /* I/D-cache flush region */
1061 case ASI_M_FLUSH_CTX: /* I/D-cache flush context */
1062 case ASI_M_FLUSH_USER: /* I/D-cache flush user */
1063 break;
1064 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1065 {
1066 MemTxResult result;
1067 hwaddr access_addr = (hwaddr)addr | ((hwaddr)(asi & 0xf) << 32);
1068
1069 switch (size) {
1070 case 1:
1071 address_space_stb(cs->as, access_addr, val,
1072 MEMTXATTRS_UNSPECIFIED, &result);
1073 break;
1074 case 2:
1075 address_space_stw(cs->as, access_addr, val,
1076 MEMTXATTRS_UNSPECIFIED, &result);
1077 break;
1078 case 4:
1079 default:
1080 address_space_stl(cs->as, access_addr, val,
1081 MEMTXATTRS_UNSPECIFIED, &result);
1082 break;
1083 case 8:
1084 address_space_stq(cs->as, access_addr, val,
1085 MEMTXATTRS_UNSPECIFIED, &result);
1086 break;
1087 }
1088 if (result != MEMTX_OK) {
1089 sparc_raise_mmu_fault(cs, access_addr, true, false, false,
1090 size, GETPC());
1091 }
1092 }
1093 break;
1094 case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
1095 case 0x31: /* store buffer data, Ross RT620 I-cache flush or
1096 Turbosparc snoop RAM */
1097 case 0x32: /* store buffer control or Turbosparc page table
1098 descriptor diagnostic */
1099 case 0x36: /* I-cache flash clear */
1100 case 0x37: /* D-cache flash clear */
1101 break;
1102 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1103 {
1104 int reg = (addr >> 8) & 3;
1105
1106 switch (reg) {
1107 case 0: /* Breakpoint Value (Addr) */
1108 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1109 break;
1110 case 1: /* Breakpoint Mask */
1111 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1112 break;
1113 case 2: /* Breakpoint Control */
1114 env->mmubpregs[reg] = (val & 0x7fULL);
1115 break;
1116 case 3: /* Breakpoint Status */
1117 env->mmubpregs[reg] = (val & 0xfULL);
1118 break;
1119 }
1120 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
1121 env->mmuregs[reg]);
1122 }
1123 break;
1124 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1125 env->mmubpctrv = val & 0xffffffff;
1126 break;
1127 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1128 env->mmubpctrc = val & 0x3;
1129 break;
1130 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1131 env->mmubpctrs = val & 0x3;
1132 break;
1133 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1134 env->mmubpaction = val & 0x1fff;
1135 break;
1136 case ASI_USERTXT: /* User code access, XXX */
1137 case ASI_KERNELTXT: /* Supervisor code access, XXX */
1138 default:
1139 sparc_raise_mmu_fault(cs, addr, true, false, asi, size, GETPC());
1140 break;
1141
1142 case ASI_USERDATA: /* User data access */
1143 case ASI_KERNELDATA: /* Supervisor data access */
1144 case ASI_P:
1145 case ASI_M_BYPASS: /* MMU passthrough */
1146 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1147 case ASI_M_BCOPY: /* Block copy, sta access */
1148 case ASI_M_BFILL: /* Block fill, stda access */
1149 /* These are always handled inline. */
1150 g_assert_not_reached();
1151 }
1152 #ifdef DEBUG_ASI
1153 dump_asi("write", addr, asi, size, val);
1154 #endif
1155 }
1156
helper_ld_code(CPUSPARCState * env,target_ulong addr,uint32_t oi)1157 uint64_t helper_ld_code(CPUSPARCState *env, target_ulong addr, uint32_t oi)
1158 {
1159 MemOp mop = get_memop(oi);
1160 uintptr_t ra = GETPC();
1161 uint64_t ret;
1162
1163 switch (mop & MO_SIZE) {
1164 case MO_8:
1165 ret = cpu_ldb_code_mmu(env, addr, oi, ra);
1166 if (mop & MO_SIGN) {
1167 ret = (int8_t)ret;
1168 }
1169 break;
1170 case MO_16:
1171 ret = cpu_ldw_code_mmu(env, addr, oi, ra);
1172 if ((mop & MO_BSWAP) != MO_TE) {
1173 ret = bswap16(ret);
1174 }
1175 if (mop & MO_SIGN) {
1176 ret = (int16_t)ret;
1177 }
1178 break;
1179 case MO_32:
1180 ret = cpu_ldl_code_mmu(env, addr, oi, ra);
1181 if ((mop & MO_BSWAP) != MO_TE) {
1182 ret = bswap32(ret);
1183 }
1184 if (mop & MO_SIGN) {
1185 ret = (int32_t)ret;
1186 }
1187 break;
1188 case MO_64:
1189 ret = cpu_ldq_code_mmu(env, addr, oi, ra);
1190 if ((mop & MO_BSWAP) != MO_TE) {
1191 ret = bswap64(ret);
1192 }
1193 break;
1194 default:
1195 g_assert_not_reached();
1196 }
1197 return ret;
1198 }
1199
1200 #endif /* CONFIG_USER_ONLY */
1201 #else /* TARGET_SPARC64 */
1202
1203 #ifdef CONFIG_USER_ONLY
helper_ld_asi(CPUSPARCState * env,target_ulong addr,int asi,uint32_t memop)1204 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
1205 int asi, uint32_t memop)
1206 {
1207 int size = 1 << (memop & MO_SIZE);
1208 int sign = memop & MO_SIGN;
1209 uint64_t ret = 0;
1210
1211 if (asi < 0x80) {
1212 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
1213 }
1214 do_check_align(env, addr, size - 1, GETPC());
1215 addr = asi_address_mask(env, asi, addr);
1216
1217 switch (asi) {
1218 case ASI_PNF: /* Primary no-fault */
1219 case ASI_PNFL: /* Primary no-fault LE */
1220 case ASI_SNF: /* Secondary no-fault */
1221 case ASI_SNFL: /* Secondary no-fault LE */
1222 if (!page_check_range(addr, size, PAGE_READ)) {
1223 ret = 0;
1224 break;
1225 }
1226 switch (size) {
1227 case 1:
1228 ret = cpu_ldub_data(env, addr);
1229 break;
1230 case 2:
1231 ret = cpu_lduw_data(env, addr);
1232 break;
1233 case 4:
1234 ret = cpu_ldl_data(env, addr);
1235 break;
1236 case 8:
1237 ret = cpu_ldq_data(env, addr);
1238 break;
1239 default:
1240 g_assert_not_reached();
1241 }
1242 break;
1243 break;
1244
1245 case ASI_P: /* Primary */
1246 case ASI_PL: /* Primary LE */
1247 case ASI_S: /* Secondary */
1248 case ASI_SL: /* Secondary LE */
1249 /* These are always handled inline. */
1250 g_assert_not_reached();
1251
1252 default:
1253 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1254 }
1255
1256 /* Convert from little endian */
1257 switch (asi) {
1258 case ASI_PNFL: /* Primary no-fault LE */
1259 case ASI_SNFL: /* Secondary no-fault LE */
1260 switch (size) {
1261 case 2:
1262 ret = bswap16(ret);
1263 break;
1264 case 4:
1265 ret = bswap32(ret);
1266 break;
1267 case 8:
1268 ret = bswap64(ret);
1269 break;
1270 }
1271 }
1272
1273 /* Convert to signed number */
1274 if (sign) {
1275 switch (size) {
1276 case 1:
1277 ret = (int8_t) ret;
1278 break;
1279 case 2:
1280 ret = (int16_t) ret;
1281 break;
1282 case 4:
1283 ret = (int32_t) ret;
1284 break;
1285 }
1286 }
1287 #ifdef DEBUG_ASI
1288 dump_asi("read", addr, asi, size, ret);
1289 #endif
1290 return ret;
1291 }
1292
helper_st_asi(CPUSPARCState * env,target_ulong addr,target_ulong val,int asi,uint32_t memop)1293 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1294 int asi, uint32_t memop)
1295 {
1296 int size = 1 << (memop & MO_SIZE);
1297 #ifdef DEBUG_ASI
1298 dump_asi("write", addr, asi, size, val);
1299 #endif
1300 if (asi < 0x80) {
1301 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
1302 }
1303 do_check_align(env, addr, size - 1, GETPC());
1304
1305 switch (asi) {
1306 case ASI_P: /* Primary */
1307 case ASI_PL: /* Primary LE */
1308 case ASI_S: /* Secondary */
1309 case ASI_SL: /* Secondary LE */
1310 /* These are always handled inline. */
1311 g_assert_not_reached();
1312
1313 case ASI_PNF: /* Primary no-fault, RO */
1314 case ASI_SNF: /* Secondary no-fault, RO */
1315 case ASI_PNFL: /* Primary no-fault LE, RO */
1316 case ASI_SNFL: /* Secondary no-fault LE, RO */
1317 default:
1318 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1319 }
1320 }
1321
1322 #else /* CONFIG_USER_ONLY */
1323
helper_ld_asi(CPUSPARCState * env,target_ulong addr,int asi,uint32_t memop)1324 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
1325 int asi, uint32_t memop)
1326 {
1327 int size = 1 << (memop & MO_SIZE);
1328 int sign = memop & MO_SIGN;
1329 CPUState *cs = env_cpu(env);
1330 uint64_t ret = 0;
1331 #if defined(DEBUG_ASI)
1332 target_ulong last_addr = addr;
1333 #endif
1334
1335 asi &= 0xff;
1336
1337 do_check_asi(env, asi, GETPC());
1338 do_check_align(env, addr, size - 1, GETPC());
1339 addr = asi_address_mask(env, asi, addr);
1340
1341 switch (asi) {
1342 case ASI_PNF:
1343 case ASI_PNFL:
1344 case ASI_SNF:
1345 case ASI_SNFL:
1346 {
1347 MemOpIdx oi;
1348 int idx = (env->pstate & PS_PRIV
1349 ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX)
1350 : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX));
1351
1352 if (cpu_get_phys_page_nofault(env, addr, idx) == -1ULL) {
1353 #ifdef DEBUG_ASI
1354 dump_asi("read ", last_addr, asi, size, ret);
1355 #endif
1356 /* exception_index is set in get_physical_address_data. */
1357 cpu_raise_exception_ra(env, cs->exception_index, GETPC());
1358 }
1359 oi = make_memop_idx(memop, idx);
1360 switch (size) {
1361 case 1:
1362 ret = cpu_ldb_mmu(env, addr, oi, GETPC());
1363 break;
1364 case 2:
1365 ret = cpu_ldw_mmu(env, addr, oi, GETPC());
1366 break;
1367 case 4:
1368 ret = cpu_ldl_mmu(env, addr, oi, GETPC());
1369 break;
1370 case 8:
1371 ret = cpu_ldq_mmu(env, addr, oi, GETPC());
1372 break;
1373 default:
1374 g_assert_not_reached();
1375 }
1376 }
1377 break;
1378
1379 case ASI_AIUP: /* As if user primary */
1380 case ASI_AIUS: /* As if user secondary */
1381 case ASI_AIUPL: /* As if user primary LE */
1382 case ASI_AIUSL: /* As if user secondary LE */
1383 case ASI_P: /* Primary */
1384 case ASI_S: /* Secondary */
1385 case ASI_PL: /* Primary LE */
1386 case ASI_SL: /* Secondary LE */
1387 case ASI_REAL: /* Bypass */
1388 case ASI_REAL_IO: /* Bypass, non-cacheable */
1389 case ASI_REAL_L: /* Bypass LE */
1390 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1391 case ASI_N: /* Nucleus */
1392 case ASI_NL: /* Nucleus Little Endian (LE) */
1393 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
1394 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
1395 case ASI_TWINX_AIUP: /* As if user primary, twinx */
1396 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
1397 case ASI_TWINX_REAL: /* Real address, twinx */
1398 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
1399 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
1400 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1401 case ASI_TWINX_N: /* Nucleus, twinx */
1402 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
1403 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
1404 case ASI_TWINX_P: /* Primary, twinx */
1405 case ASI_TWINX_PL: /* Primary, twinx, LE */
1406 case ASI_TWINX_S: /* Secondary, twinx */
1407 case ASI_TWINX_SL: /* Secondary, twinx, LE */
1408 case ASI_MON_P:
1409 case ASI_MON_S:
1410 case ASI_MON_AIUP:
1411 case ASI_MON_AIUS:
1412 /* These are always handled inline. */
1413 g_assert_not_reached();
1414
1415 case ASI_UPA_CONFIG: /* UPA config */
1416 /* XXX */
1417 break;
1418 case ASI_LSU_CONTROL: /* LSU */
1419 ret = env->lsu;
1420 break;
1421 case ASI_IMMU: /* I-MMU regs */
1422 {
1423 int reg = (addr >> 3) & 0xf;
1424 switch (reg) {
1425 case 0:
1426 /* 0x00 I-TSB Tag Target register */
1427 ret = ultrasparc_tag_target(env->immu.tag_access);
1428 break;
1429 case 3: /* SFSR */
1430 ret = env->immu.sfsr;
1431 break;
1432 case 5: /* TSB access */
1433 ret = env->immu.tsb;
1434 break;
1435 case 6:
1436 /* 0x30 I-TSB Tag Access register */
1437 ret = env->immu.tag_access;
1438 break;
1439 default:
1440 sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
1441 ret = 0;
1442 }
1443 break;
1444 }
1445 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */
1446 {
1447 /* env->immuregs[5] holds I-MMU TSB register value
1448 env->immuregs[6] holds I-MMU Tag Access register value */
1449 ret = ultrasparc_tsb_pointer(env, &env->immu, 0);
1450 break;
1451 }
1452 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */
1453 {
1454 /* env->immuregs[5] holds I-MMU TSB register value
1455 env->immuregs[6] holds I-MMU Tag Access register value */
1456 ret = ultrasparc_tsb_pointer(env, &env->immu, 1);
1457 break;
1458 }
1459 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1460 {
1461 int reg = (addr >> 3) & 0x3f;
1462
1463 ret = env->itlb[reg].tte;
1464 break;
1465 }
1466 case ASI_ITLB_TAG_READ: /* I-MMU tag read */
1467 {
1468 int reg = (addr >> 3) & 0x3f;
1469
1470 ret = env->itlb[reg].tag;
1471 break;
1472 }
1473 case ASI_DMMU: /* D-MMU regs */
1474 {
1475 int reg = (addr >> 3) & 0xf;
1476 switch (reg) {
1477 case 0:
1478 /* 0x00 D-TSB Tag Target register */
1479 ret = ultrasparc_tag_target(env->dmmu.tag_access);
1480 break;
1481 case 1: /* 0x08 Primary Context */
1482 ret = env->dmmu.mmu_primary_context;
1483 break;
1484 case 2: /* 0x10 Secondary Context */
1485 ret = env->dmmu.mmu_secondary_context;
1486 break;
1487 case 3: /* SFSR */
1488 ret = env->dmmu.sfsr;
1489 break;
1490 case 4: /* 0x20 SFAR */
1491 ret = env->dmmu.sfar;
1492 break;
1493 case 5: /* 0x28 TSB access */
1494 ret = env->dmmu.tsb;
1495 break;
1496 case 6: /* 0x30 D-TSB Tag Access register */
1497 ret = env->dmmu.tag_access;
1498 break;
1499 case 7:
1500 ret = env->dmmu.virtual_watchpoint;
1501 break;
1502 case 8:
1503 ret = env->dmmu.physical_watchpoint;
1504 break;
1505 default:
1506 sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
1507 ret = 0;
1508 }
1509 break;
1510 }
1511 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */
1512 {
1513 /* env->dmmuregs[5] holds D-MMU TSB register value
1514 env->dmmuregs[6] holds D-MMU Tag Access register value */
1515 ret = ultrasparc_tsb_pointer(env, &env->dmmu, 0);
1516 break;
1517 }
1518 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */
1519 {
1520 /* env->dmmuregs[5] holds D-MMU TSB register value
1521 env->dmmuregs[6] holds D-MMU Tag Access register value */
1522 ret = ultrasparc_tsb_pointer(env, &env->dmmu, 1);
1523 break;
1524 }
1525 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
1526 {
1527 int reg = (addr >> 3) & 0x3f;
1528
1529 ret = env->dtlb[reg].tte;
1530 break;
1531 }
1532 case ASI_DTLB_TAG_READ: /* D-MMU tag read */
1533 {
1534 int reg = (addr >> 3) & 0x3f;
1535
1536 ret = env->dtlb[reg].tag;
1537 break;
1538 }
1539 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
1540 break;
1541 case ASI_INTR_RECEIVE: /* Interrupt data receive */
1542 ret = env->ivec_status;
1543 break;
1544 case ASI_INTR_R: /* Incoming interrupt vector, RO */
1545 {
1546 int reg = (addr >> 4) & 0x3;
1547 if (reg < 3) {
1548 ret = env->ivec_data[reg];
1549 }
1550 break;
1551 }
1552 case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
1553 if (unlikely((addr >= 0x20) && (addr < 0x30))) {
1554 /* Hyperprivileged access only */
1555 sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
1556 }
1557 /* fall through */
1558 case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
1559 {
1560 unsigned int i = (addr >> 3) & 0x7;
1561 ret = env->scratch[i];
1562 break;
1563 }
1564 case ASI_MMU: /* UA2005 Context ID registers */
1565 switch ((addr >> 3) & 0x3) {
1566 case 1:
1567 ret = env->dmmu.mmu_primary_context;
1568 break;
1569 case 2:
1570 ret = env->dmmu.mmu_secondary_context;
1571 break;
1572 default:
1573 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1574 }
1575 break;
1576 case ASI_DCACHE_DATA: /* D-cache data */
1577 case ASI_DCACHE_TAG: /* D-cache tag access */
1578 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
1579 case ASI_AFSR: /* E-cache asynchronous fault status */
1580 case ASI_AFAR: /* E-cache asynchronous fault address */
1581 case ASI_EC_TAG_DATA: /* E-cache tag data */
1582 case ASI_IC_INSTR: /* I-cache instruction access */
1583 case ASI_IC_TAG: /* I-cache tag access */
1584 case ASI_IC_PRE_DECODE: /* I-cache predecode */
1585 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
1586 case ASI_EC_W: /* E-cache tag */
1587 case ASI_EC_R: /* E-cache tag */
1588 break;
1589 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */
1590 case ASI_ITLB_DATA_IN: /* I-MMU data in, WO */
1591 case ASI_IMMU_DEMAP: /* I-MMU demap, WO */
1592 case ASI_DTLB_DATA_IN: /* D-MMU data in, WO */
1593 case ASI_DMMU_DEMAP: /* D-MMU demap, WO */
1594 case ASI_INTR_W: /* Interrupt vector, WO */
1595 default:
1596 sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
1597 ret = 0;
1598 break;
1599 }
1600
1601 /* Convert to signed number */
1602 if (sign) {
1603 switch (size) {
1604 case 1:
1605 ret = (int8_t) ret;
1606 break;
1607 case 2:
1608 ret = (int16_t) ret;
1609 break;
1610 case 4:
1611 ret = (int32_t) ret;
1612 break;
1613 default:
1614 break;
1615 }
1616 }
1617 #ifdef DEBUG_ASI
1618 dump_asi("read ", last_addr, asi, size, ret);
1619 #endif
1620 return ret;
1621 }
1622
helper_st_asi(CPUSPARCState * env,target_ulong addr,target_ulong val,int asi,uint32_t memop)1623 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1624 int asi, uint32_t memop)
1625 {
1626 int size = 1 << (memop & MO_SIZE);
1627 CPUState *cs = env_cpu(env);
1628
1629 #ifdef DEBUG_ASI
1630 dump_asi("write", addr, asi, size, val);
1631 #endif
1632
1633 asi &= 0xff;
1634
1635 do_check_asi(env, asi, GETPC());
1636 do_check_align(env, addr, size - 1, GETPC());
1637 addr = asi_address_mask(env, asi, addr);
1638
1639 switch (asi) {
1640 case ASI_AIUP: /* As if user primary */
1641 case ASI_AIUS: /* As if user secondary */
1642 case ASI_AIUPL: /* As if user primary LE */
1643 case ASI_AIUSL: /* As if user secondary LE */
1644 case ASI_P: /* Primary */
1645 case ASI_S: /* Secondary */
1646 case ASI_PL: /* Primary LE */
1647 case ASI_SL: /* Secondary LE */
1648 case ASI_REAL: /* Bypass */
1649 case ASI_REAL_IO: /* Bypass, non-cacheable */
1650 case ASI_REAL_L: /* Bypass LE */
1651 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1652 case ASI_N: /* Nucleus */
1653 case ASI_NL: /* Nucleus Little Endian (LE) */
1654 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
1655 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
1656 case ASI_TWINX_AIUP: /* As if user primary, twinx */
1657 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
1658 case ASI_TWINX_REAL: /* Real address, twinx */
1659 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
1660 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
1661 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1662 case ASI_TWINX_N: /* Nucleus, twinx */
1663 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
1664 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
1665 case ASI_TWINX_P: /* Primary, twinx */
1666 case ASI_TWINX_PL: /* Primary, twinx, LE */
1667 case ASI_TWINX_S: /* Secondary, twinx */
1668 case ASI_TWINX_SL: /* Secondary, twinx, LE */
1669 /* These are always handled inline. */
1670 g_assert_not_reached();
1671 /* these ASIs have different functions on UltraSPARC-IIIi
1672 * and UA2005 CPUs. Use the explicit numbers to avoid confusion
1673 */
1674 case 0x31:
1675 case 0x32:
1676 case 0x39:
1677 case 0x3a:
1678 if (cpu_has_hypervisor(env)) {
1679 /* UA2005
1680 * ASI_DMMU_CTX_ZERO_TSB_BASE_PS0
1681 * ASI_DMMU_CTX_ZERO_TSB_BASE_PS1
1682 * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS0
1683 * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS1
1684 */
1685 int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
1686 env->dmmu.sun4v_tsb_pointers[idx] = val;
1687 } else {
1688 goto illegal_insn;
1689 }
1690 break;
1691 case 0x33:
1692 case 0x3b:
1693 if (cpu_has_hypervisor(env)) {
1694 /* UA2005
1695 * ASI_DMMU_CTX_ZERO_CONFIG
1696 * ASI_DMMU_CTX_NONZERO_CONFIG
1697 */
1698 env->dmmu.sun4v_ctx_config[(asi & 8) >> 3] = val;
1699 } else {
1700 goto illegal_insn;
1701 }
1702 break;
1703 case 0x35:
1704 case 0x36:
1705 case 0x3d:
1706 case 0x3e:
1707 if (cpu_has_hypervisor(env)) {
1708 /* UA2005
1709 * ASI_IMMU_CTX_ZERO_TSB_BASE_PS0
1710 * ASI_IMMU_CTX_ZERO_TSB_BASE_PS1
1711 * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS0
1712 * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS1
1713 */
1714 int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
1715 env->immu.sun4v_tsb_pointers[idx] = val;
1716 } else {
1717 goto illegal_insn;
1718 }
1719 break;
1720 case 0x37:
1721 case 0x3f:
1722 if (cpu_has_hypervisor(env)) {
1723 /* UA2005
1724 * ASI_IMMU_CTX_ZERO_CONFIG
1725 * ASI_IMMU_CTX_NONZERO_CONFIG
1726 */
1727 env->immu.sun4v_ctx_config[(asi & 8) >> 3] = val;
1728 } else {
1729 goto illegal_insn;
1730 }
1731 break;
1732 case ASI_UPA_CONFIG: /* UPA config */
1733 /* XXX */
1734 return;
1735 case ASI_LSU_CONTROL: /* LSU */
1736 env->lsu = val & (DMMU_E | IMMU_E);
1737 return;
1738 case ASI_IMMU: /* I-MMU regs */
1739 {
1740 int reg = (addr >> 3) & 0xf;
1741 uint64_t oldreg;
1742
1743 oldreg = env->immu.mmuregs[reg];
1744 switch (reg) {
1745 case 0: /* RO */
1746 return;
1747 case 1: /* Not in I-MMU */
1748 case 2:
1749 return;
1750 case 3: /* SFSR */
1751 if ((val & 1) == 0) {
1752 val = 0; /* Clear SFSR */
1753 }
1754 env->immu.sfsr = val;
1755 break;
1756 case 4: /* RO */
1757 return;
1758 case 5: /* TSB access */
1759 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
1760 PRIx64 "\n", env->immu.tsb, val);
1761 env->immu.tsb = val;
1762 break;
1763 case 6: /* Tag access */
1764 env->immu.tag_access = val;
1765 break;
1766 case 7:
1767 case 8:
1768 return;
1769 default:
1770 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1771 break;
1772 }
1773
1774 if (oldreg != env->immu.mmuregs[reg]) {
1775 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1776 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1777 }
1778 #ifdef DEBUG_MMU
1779 dump_mmu(env);
1780 #endif
1781 return;
1782 }
1783 case ASI_ITLB_DATA_IN: /* I-MMU data in */
1784 /* ignore real translation entries */
1785 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1786 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access,
1787 val, "immu", env, addr);
1788 }
1789 return;
1790 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1791 {
1792 /* TODO: auto demap */
1793
1794 unsigned int i = (addr >> 3) & 0x3f;
1795
1796 /* ignore real translation entries */
1797 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1798 replace_tlb_entry(&env->itlb[i], env->immu.tag_access,
1799 sun4v_tte_to_sun4u(env, addr, val), env);
1800 }
1801 #ifdef DEBUG_MMU
1802 DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
1803 dump_mmu(env);
1804 #endif
1805 return;
1806 }
1807 case ASI_IMMU_DEMAP: /* I-MMU demap */
1808 demap_tlb(env->itlb, addr, "immu", env);
1809 return;
1810 case ASI_DMMU: /* D-MMU regs */
1811 {
1812 int reg = (addr >> 3) & 0xf;
1813 uint64_t oldreg;
1814
1815 oldreg = env->dmmu.mmuregs[reg];
1816 switch (reg) {
1817 case 0: /* RO */
1818 case 4:
1819 return;
1820 case 3: /* SFSR */
1821 if ((val & 1) == 0) {
1822 val = 0; /* Clear SFSR, Fault address */
1823 env->dmmu.sfar = 0;
1824 }
1825 env->dmmu.sfsr = val;
1826 break;
1827 case 1: /* Primary context */
1828 env->dmmu.mmu_primary_context = val;
1829 /* can be optimized to only flush MMU_USER_IDX
1830 and MMU_KERNEL_IDX entries */
1831 tlb_flush(cs);
1832 break;
1833 case 2: /* Secondary context */
1834 env->dmmu.mmu_secondary_context = val;
1835 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
1836 and MMU_KERNEL_SECONDARY_IDX entries */
1837 tlb_flush(cs);
1838 break;
1839 case 5: /* TSB access */
1840 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
1841 PRIx64 "\n", env->dmmu.tsb, val);
1842 env->dmmu.tsb = val;
1843 break;
1844 case 6: /* Tag access */
1845 env->dmmu.tag_access = val;
1846 break;
1847 case 7: /* Virtual Watchpoint */
1848 env->dmmu.virtual_watchpoint = val;
1849 break;
1850 case 8: /* Physical Watchpoint */
1851 env->dmmu.physical_watchpoint = val;
1852 break;
1853 default:
1854 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1855 break;
1856 }
1857
1858 if (oldreg != env->dmmu.mmuregs[reg]) {
1859 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1860 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
1861 }
1862 #ifdef DEBUG_MMU
1863 dump_mmu(env);
1864 #endif
1865 return;
1866 }
1867 case ASI_DTLB_DATA_IN: /* D-MMU data in */
1868 /* ignore real translation entries */
1869 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1870 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access,
1871 val, "dmmu", env, addr);
1872 }
1873 return;
1874 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
1875 {
1876 unsigned int i = (addr >> 3) & 0x3f;
1877
1878 /* ignore real translation entries */
1879 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1880 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access,
1881 sun4v_tte_to_sun4u(env, addr, val), env);
1882 }
1883 #ifdef DEBUG_MMU
1884 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
1885 dump_mmu(env);
1886 #endif
1887 return;
1888 }
1889 case ASI_DMMU_DEMAP: /* D-MMU demap */
1890 demap_tlb(env->dtlb, addr, "dmmu", env);
1891 return;
1892 case ASI_INTR_RECEIVE: /* Interrupt data receive */
1893 env->ivec_status = val & 0x20;
1894 return;
1895 case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
1896 if (unlikely((addr >= 0x20) && (addr < 0x30))) {
1897 /* Hyperprivileged access only */
1898 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1899 }
1900 /* fall through */
1901 case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
1902 {
1903 unsigned int i = (addr >> 3) & 0x7;
1904 env->scratch[i] = val;
1905 return;
1906 }
1907 case ASI_MMU: /* UA2005 Context ID registers */
1908 {
1909 switch ((addr >> 3) & 0x3) {
1910 case 1:
1911 env->dmmu.mmu_primary_context = val;
1912 env->immu.mmu_primary_context = val;
1913 tlb_flush_by_mmuidx(cs,
1914 (1 << MMU_USER_IDX) | (1 << MMU_KERNEL_IDX));
1915 break;
1916 case 2:
1917 env->dmmu.mmu_secondary_context = val;
1918 env->immu.mmu_secondary_context = val;
1919 tlb_flush_by_mmuidx(cs,
1920 (1 << MMU_USER_SECONDARY_IDX) |
1921 (1 << MMU_KERNEL_SECONDARY_IDX));
1922 break;
1923 default:
1924 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1925 }
1926 }
1927 return;
1928 case ASI_QUEUE: /* UA2005 CPU mondo queue */
1929 case ASI_DCACHE_DATA: /* D-cache data */
1930 case ASI_DCACHE_TAG: /* D-cache tag access */
1931 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
1932 case ASI_AFSR: /* E-cache asynchronous fault status */
1933 case ASI_AFAR: /* E-cache asynchronous fault address */
1934 case ASI_EC_TAG_DATA: /* E-cache tag data */
1935 case ASI_IC_INSTR: /* I-cache instruction access */
1936 case ASI_IC_TAG: /* I-cache tag access */
1937 case ASI_IC_PRE_DECODE: /* I-cache predecode */
1938 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
1939 case ASI_EC_W: /* E-cache tag */
1940 case ASI_EC_R: /* E-cache tag */
1941 return;
1942 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */
1943 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */
1944 case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */
1945 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */
1946 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */
1947 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */
1948 case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */
1949 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
1950 case ASI_INTR_R: /* Incoming interrupt vector, RO */
1951 case ASI_PNF: /* Primary no-fault, RO */
1952 case ASI_SNF: /* Secondary no-fault, RO */
1953 case ASI_PNFL: /* Primary no-fault LE, RO */
1954 case ASI_SNFL: /* Secondary no-fault LE, RO */
1955 default:
1956 sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
1957 return;
1958 illegal_insn:
1959 cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
1960 }
1961 }
1962 #endif /* CONFIG_USER_ONLY */
1963 #endif /* TARGET_SPARC64 */
1964
1965 #if !defined(CONFIG_USER_ONLY)
1966
sparc_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)1967 void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1968 vaddr addr, unsigned size,
1969 MMUAccessType access_type,
1970 int mmu_idx, MemTxAttrs attrs,
1971 MemTxResult response, uintptr_t retaddr)
1972 {
1973 bool is_write = access_type == MMU_DATA_STORE;
1974 bool is_exec = access_type == MMU_INST_FETCH;
1975 bool is_asi = false;
1976
1977 sparc_raise_mmu_fault(cs, physaddr, is_write, is_exec,
1978 is_asi, size, retaddr);
1979 }
1980 #endif
1981