xref: /qemu/target/mips/tcg/system/tlb_helper.c (revision 9c2ff9cdc9b33472333e9431cbf4417f5f228883)
1 /*
2  * MIPS TLB (Translation lookaside buffer) helpers.
3  *
4  *  Copyright (c) 2004-2005 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/cputlb.h"
25 #include "exec/exec-all.h"
26 #include "exec/page-protection.h"
27 #include "exec/target_page.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/log.h"
30 #include "exec/helper-proto.h"
31 
32 /* TLB management */
33 static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
34 {
35     /* Discard entries from env->tlb[first] onwards.  */
36     while (env->tlb->tlb_in_use > first) {
37         r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
38     }
39 }
40 
41 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
42 {
43 #if defined(TARGET_MIPS64)
44     return extract64(entrylo, 6, 54);
45 #else
46     return extract64(entrylo, 6, 24) | /* PFN */
47            (extract64(entrylo, 32, 32) << 24); /* PFNX */
48 #endif
49 }
50 
51 static void r4k_fill_tlb(CPUMIPSState *env, int idx)
52 {
53     r4k_tlb_t *tlb;
54     uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
55 
56     /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
57     tlb = &env->tlb->mmu.r4k.tlb[idx];
58     if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
59         tlb->EHINV = 1;
60         return;
61     }
62     tlb->EHINV = 0;
63     tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
64 #if defined(TARGET_MIPS64)
65     tlb->VPN &= env->SEGMask;
66 #endif
67     tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
68     tlb->MMID = env->CP0_MemoryMapID;
69     tlb->PageMask = env->CP0_PageMask;
70     tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
71     tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
72     tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
73     tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
74     tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
75     tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
76     tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
77     tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
78     tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
79     tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
80     tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
81     tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
82     tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
83 }
84 
85 static void r4k_helper_tlbinv(CPUMIPSState *env)
86 {
87     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
88     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
89     uint32_t MMID = env->CP0_MemoryMapID;
90     uint32_t tlb_mmid;
91     r4k_tlb_t *tlb;
92     int idx;
93 
94     MMID = mi ? MMID : (uint32_t) ASID;
95     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
96         tlb = &env->tlb->mmu.r4k.tlb[idx];
97         tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
98         if (!tlb->G && tlb_mmid == MMID) {
99             tlb->EHINV = 1;
100         }
101     }
102     cpu_mips_tlb_flush(env);
103 }
104 
105 static void r4k_helper_tlbinvf(CPUMIPSState *env)
106 {
107     int idx;
108 
109     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
110         env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
111     }
112     cpu_mips_tlb_flush(env);
113 }
114 
115 static void r4k_helper_tlbwi(CPUMIPSState *env)
116 {
117     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
118     target_ulong VPN;
119     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
120     uint32_t MMID = env->CP0_MemoryMapID;
121     uint32_t tlb_mmid;
122     bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
123     r4k_tlb_t *tlb;
124     int idx;
125 
126     MMID = mi ? MMID : (uint32_t) ASID;
127 
128     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
129     tlb = &env->tlb->mmu.r4k.tlb[idx];
130     VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
131 #if defined(TARGET_MIPS64)
132     VPN &= env->SEGMask;
133 #endif
134     EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
135     G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
136     V0 = (env->CP0_EntryLo0 & 2) != 0;
137     D0 = (env->CP0_EntryLo0 & 4) != 0;
138     XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
139     RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
140     V1 = (env->CP0_EntryLo1 & 2) != 0;
141     D1 = (env->CP0_EntryLo1 & 4) != 0;
142     XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
143     RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
144 
145     tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
146     /*
147      * Discard cached TLB entries, unless tlbwi is just upgrading access
148      * permissions on the current entry.
149      */
150     if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
151         (!tlb->EHINV && EHINV) ||
152         (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
153         (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
154         (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
155         (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
156         r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
157     }
158 
159     r4k_invalidate_tlb(env, idx, 0);
160     r4k_fill_tlb(env, idx);
161 }
162 
163 static void r4k_helper_tlbwr(CPUMIPSState *env)
164 {
165     int r = cpu_mips_get_random(env);
166 
167     r4k_invalidate_tlb(env, r, 1);
168     r4k_fill_tlb(env, r);
169 }
170 
171 static void r4k_helper_tlbp(CPUMIPSState *env)
172 {
173     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
174     r4k_tlb_t *tlb;
175     target_ulong mask;
176     target_ulong tag;
177     target_ulong VPN;
178     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
179     uint32_t MMID = env->CP0_MemoryMapID;
180     uint32_t tlb_mmid;
181     int i;
182 
183     MMID = mi ? MMID : (uint32_t) ASID;
184     for (i = 0; i < env->tlb->nb_tlb; i++) {
185         tlb = &env->tlb->mmu.r4k.tlb[i];
186         /* 1k pages are not supported. */
187         mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
188         tag = env->CP0_EntryHi & ~mask;
189         VPN = tlb->VPN & ~mask;
190 #if defined(TARGET_MIPS64)
191         tag &= env->SEGMask;
192 #endif
193         tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
194         /* Check ASID/MMID, virtual page number & size */
195         if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
196             /* TLB match */
197             env->CP0_Index = i;
198             break;
199         }
200     }
201     if (i == env->tlb->nb_tlb) {
202         /* No match.  Discard any shadow entries, if any of them match.  */
203         for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
204             tlb = &env->tlb->mmu.r4k.tlb[i];
205             /* 1k pages are not supported. */
206             mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
207             tag = env->CP0_EntryHi & ~mask;
208             VPN = tlb->VPN & ~mask;
209 #if defined(TARGET_MIPS64)
210             tag &= env->SEGMask;
211 #endif
212             tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
213             /* Check ASID/MMID, virtual page number & size */
214             if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
215                 r4k_mips_tlb_flush_extra(env, i);
216                 break;
217             }
218         }
219 
220         env->CP0_Index |= 0x80000000;
221     }
222 }
223 
224 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
225 {
226 #if defined(TARGET_MIPS64)
227     return tlb_pfn << 6;
228 #else
229     return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
230            (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
231 #endif
232 }
233 
234 static void r4k_helper_tlbr(CPUMIPSState *env)
235 {
236     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
237     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
238     uint32_t MMID = env->CP0_MemoryMapID;
239     uint32_t tlb_mmid;
240     r4k_tlb_t *tlb;
241     int idx;
242 
243     MMID = mi ? MMID : (uint32_t) ASID;
244     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
245     tlb = &env->tlb->mmu.r4k.tlb[idx];
246 
247     tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
248     /* If this will change the current ASID/MMID, flush qemu's TLB.  */
249     if (MMID != tlb_mmid) {
250         cpu_mips_tlb_flush(env);
251     }
252 
253     r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
254 
255     if (tlb->EHINV) {
256         env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
257         env->CP0_PageMask = 0;
258         env->CP0_EntryLo0 = 0;
259         env->CP0_EntryLo1 = 0;
260     } else {
261         env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
262         env->CP0_MemoryMapID = tlb->MMID;
263         env->CP0_PageMask = tlb->PageMask;
264         env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
265                         ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
266                         ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
267                         get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
268         env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
269                         ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
270                         ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
271                         get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
272     }
273 }
274 
275 void helper_tlbwi(CPUMIPSState *env)
276 {
277     env->tlb->helper_tlbwi(env);
278 }
279 
280 void helper_tlbwr(CPUMIPSState *env)
281 {
282     env->tlb->helper_tlbwr(env);
283 }
284 
285 void helper_tlbp(CPUMIPSState *env)
286 {
287     env->tlb->helper_tlbp(env);
288 }
289 
290 void helper_tlbr(CPUMIPSState *env)
291 {
292     env->tlb->helper_tlbr(env);
293 }
294 
295 void helper_tlbinv(CPUMIPSState *env)
296 {
297     env->tlb->helper_tlbinv(env);
298 }
299 
300 void helper_tlbinvf(CPUMIPSState *env)
301 {
302     env->tlb->helper_tlbinvf(env);
303 }
304 
305 static void global_invalidate_tlb(CPUMIPSState *env,
306                            uint32_t invMsgVPN2,
307                            uint8_t invMsgR,
308                            uint32_t invMsgMMid,
309                            bool invAll,
310                            bool invVAMMid,
311                            bool invMMid,
312                            bool invVA)
313 {
314 
315     int idx;
316     r4k_tlb_t *tlb;
317     bool VAMatch;
318     bool MMidMatch;
319 
320     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
321         tlb = &env->tlb->mmu.r4k.tlb[idx];
322         VAMatch =
323             (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
324 #ifdef TARGET_MIPS64
325             &&
326             (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
327 #endif
328             );
329         MMidMatch = tlb->MMID == invMsgMMid;
330         if ((invAll && (idx > env->CP0_Wired)) ||
331             (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
332             (VAMatch && invVA) ||
333             (MMidMatch && !(tlb->G) && invMMid)) {
334             tlb->EHINV = 1;
335         }
336     }
337     cpu_mips_tlb_flush(env);
338 }
339 
340 void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
341 {
342     bool invAll = type == 0;
343     bool invVA = type == 1;
344     bool invMMid = type == 2;
345     bool invVAMMid = type == 3;
346     uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
347     uint8_t invMsgR = 0;
348     uint32_t invMsgMMid = env->CP0_MemoryMapID;
349     CPUState *other_cs = first_cpu;
350 
351 #ifdef TARGET_MIPS64
352     invMsgR = extract64(arg, 62, 2);
353 #endif
354 
355     CPU_FOREACH(other_cs) {
356         MIPSCPU *other_cpu = MIPS_CPU(other_cs);
357         global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
358                               invAll, invVAMMid, invMMid, invVA);
359     }
360 }
361 
362 /* no MMU emulation */
363 static int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
364                               target_ulong address, MMUAccessType access_type)
365 {
366     *physical = address;
367     *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
368     return TLBRET_MATCH;
369 }
370 
371 /* fixed mapping MMU emulation */
372 static int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical,
373                                  int *prot, target_ulong address,
374                                  MMUAccessType access_type)
375 {
376     if (address <= (int32_t)0x7FFFFFFFUL) {
377         if (!(env->CP0_Status & (1 << CP0St_ERL))) {
378             *physical = address + 0x40000000UL;
379         } else {
380             *physical = address;
381         }
382     } else if (address <= (int32_t)0xBFFFFFFFUL) {
383         *physical = address & 0x1FFFFFFF;
384     } else {
385         *physical = address;
386     }
387 
388     *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
389     return TLBRET_MATCH;
390 }
391 
392 /* MIPS32/MIPS64 R4000-style MMU emulation */
393 static int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
394                            target_ulong address, MMUAccessType access_type)
395 {
396     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
397     uint32_t MMID = env->CP0_MemoryMapID;
398     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
399     uint32_t tlb_mmid;
400     int i;
401 
402     MMID = mi ? MMID : (uint32_t) ASID;
403 
404     for (i = 0; i < env->tlb->tlb_in_use; i++) {
405         r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
406         /* 1k pages are not supported. */
407         target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
408         target_ulong tag = address & ~mask;
409         target_ulong VPN = tlb->VPN & ~mask;
410 #if defined(TARGET_MIPS64)
411         tag &= env->SEGMask;
412 #endif
413 
414         /* Check ASID/MMID, virtual page number & size */
415         tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
416         if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
417             /* TLB match */
418             int n = !!(address & mask & ~(mask >> 1));
419             /* Check access rights */
420             if (!(n ? tlb->V1 : tlb->V0)) {
421                 return TLBRET_INVALID;
422             }
423             if (access_type == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
424                 return TLBRET_XI;
425             }
426             if (access_type == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
427                 return TLBRET_RI;
428             }
429             if (access_type != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
430                 *physical = tlb->PFN[n] | (address & (mask >> 1));
431                 *prot = PAGE_READ;
432                 if (n ? tlb->D1 : tlb->D0) {
433                     *prot |= PAGE_WRITE;
434                 }
435                 if (!(n ? tlb->XI1 : tlb->XI0)) {
436                     *prot |= PAGE_EXEC;
437                 }
438                 return TLBRET_MATCH;
439             }
440             return TLBRET_DIRTY;
441         }
442     }
443     return TLBRET_NOMATCH;
444 }
445 
446 static void no_mmu_init(CPUMIPSState *env, const mips_def_t *def)
447 {
448     env->tlb->nb_tlb = 1;
449     env->tlb->map_address = &no_mmu_map_address;
450 }
451 
452 static void fixed_mmu_init(CPUMIPSState *env, const mips_def_t *def)
453 {
454     env->tlb->nb_tlb = 1;
455     env->tlb->map_address = &fixed_mmu_map_address;
456 }
457 
458 static void r4k_mmu_init(CPUMIPSState *env, const mips_def_t *def)
459 {
460     env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63);
461     env->tlb->map_address = &r4k_map_address;
462     env->tlb->helper_tlbwi = r4k_helper_tlbwi;
463     env->tlb->helper_tlbwr = r4k_helper_tlbwr;
464     env->tlb->helper_tlbp = r4k_helper_tlbp;
465     env->tlb->helper_tlbr = r4k_helper_tlbr;
466     env->tlb->helper_tlbinv = r4k_helper_tlbinv;
467     env->tlb->helper_tlbinvf = r4k_helper_tlbinvf;
468 }
469 
470 void mmu_init(CPUMIPSState *env, const mips_def_t *def)
471 {
472     env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext));
473 
474     switch (def->mmu_type) {
475     case MMU_TYPE_NONE:
476         no_mmu_init(env, def);
477         break;
478     case MMU_TYPE_R4000:
479         r4k_mmu_init(env, def);
480         break;
481     case MMU_TYPE_FMT:
482         fixed_mmu_init(env, def);
483         break;
484     case MMU_TYPE_R3000:
485     case MMU_TYPE_R6000:
486     case MMU_TYPE_R8000:
487     default:
488         cpu_abort(env_cpu(env), "MMU type not supported\n");
489     }
490 }
491 
492 void cpu_mips_tlb_flush(CPUMIPSState *env)
493 {
494     /* Flush qemu's TLB and discard all shadowed entries.  */
495     tlb_flush(env_cpu(env));
496     env->tlb->tlb_in_use = env->tlb->nb_tlb;
497 }
498 
499 static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
500                                 MMUAccessType access_type, int tlb_error)
501 {
502     CPUState *cs = env_cpu(env);
503     int exception = 0, error_code = 0;
504 
505     if (access_type == MMU_INST_FETCH) {
506         error_code |= EXCP_INST_NOTAVAIL;
507     }
508 
509     switch (tlb_error) {
510     default:
511     case TLBRET_BADADDR:
512         /* Reference to kernel address from user mode or supervisor mode */
513         /* Reference to supervisor address from user mode */
514         if (access_type == MMU_DATA_STORE) {
515             exception = EXCP_AdES;
516         } else {
517             exception = EXCP_AdEL;
518         }
519         break;
520     case TLBRET_NOMATCH:
521         /* No TLB match for a mapped address */
522         if (access_type == MMU_DATA_STORE) {
523             exception = EXCP_TLBS;
524         } else {
525             exception = EXCP_TLBL;
526         }
527         error_code |= EXCP_TLB_NOMATCH;
528         break;
529     case TLBRET_INVALID:
530         /* TLB match with no valid bit */
531         if (access_type == MMU_DATA_STORE) {
532             exception = EXCP_TLBS;
533         } else {
534             exception = EXCP_TLBL;
535         }
536         break;
537     case TLBRET_DIRTY:
538         /* TLB match but 'D' bit is cleared */
539         exception = EXCP_LTLBL;
540         break;
541     case TLBRET_XI:
542         /* Execute-Inhibit Exception */
543         if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
544             exception = EXCP_TLBXI;
545         } else {
546             exception = EXCP_TLBL;
547         }
548         break;
549     case TLBRET_RI:
550         /* Read-Inhibit Exception */
551         if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
552             exception = EXCP_TLBRI;
553         } else {
554             exception = EXCP_TLBL;
555         }
556         break;
557     }
558     /* Raise exception */
559     if (!(env->hflags & MIPS_HFLAG_DM)) {
560         env->CP0_BadVAddr = address;
561     }
562     env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
563                        ((address >> 9) & 0x007ffff0);
564     env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
565                        (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) |
566                        (address & (TARGET_PAGE_MASK << 1));
567 #if defined(TARGET_MIPS64)
568     env->CP0_EntryHi &= env->SEGMask;
569     env->CP0_XContext =
570         (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | /* PTEBase */
571         (extract64(address, 62, 2) << (env->SEGBITS - 9)) |     /* R       */
572         (extract64(address, 13, env->SEGBITS - 13) << 4);       /* BadVPN2 */
573 #endif
574     cs->exception_index = exception;
575     env->error_code = error_code;
576 }
577 
578 #if !defined(TARGET_MIPS64)
579 
580 /*
581  * Perform hardware page table walk
582  *
583  * Memory accesses are performed using the KERNEL privilege level.
584  * Synchronous exceptions detected on memory accesses cause a silent exit
585  * from page table walking, resulting in a TLB or XTLB Refill exception.
586  *
587  * Implementations are not required to support page table walk memory
588  * accesses from mapped memory regions. When an unsupported access is
589  * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
590  * exception.
591  *
592  * Note that if an exception is caused by AddressTranslation or LoadMemory
593  * functions, the exception is not taken, a silent exit is taken,
594  * resulting in a TLB or XTLB Refill exception.
595  */
596 
597 static bool get_pte(CPUMIPSState *env, uint64_t vaddr, MemOp op,
598                     uint64_t *pte, unsigned ptw_mmu_idx)
599 {
600     MemOpIdx oi;
601 
602     if ((vaddr & (memop_size(op) - 1)) != 0) {
603         return false;
604     }
605 
606     oi = make_memop_idx(op | mo_endian_env(env), ptw_mmu_idx);
607     if (op == MO_64) {
608         *pte = cpu_ldq_mmu(env, vaddr, oi, 0);
609     } else {
610         *pte = cpu_ldl_mmu(env, vaddr, oi, 0);
611     }
612 
613     return true;
614 }
615 
616 static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry,
617                                      MemOp op, int ptei)
618 {
619     unsigned entry_size = memop_size(op) << 3;
620     uint64_t result = entry;
621     uint64_t rixi;
622     if (ptei > entry_size) {
623         ptei -= 32;
624     }
625     result >>= (ptei - 2);
626     rixi = result & 3;
627     result >>= 2;
628     result |= rixi << CP0EnLo_XI;
629     return result;
630 }
631 
632 static int walk_directory(CPUMIPSState *env, uint64_t *vaddr,
633         int directory_index, bool *huge_page, bool *hgpg_directory_hit,
634         uint64_t *pw_entrylo0, uint64_t *pw_entrylo1,
635         MemOp directory_mop, MemOp leaf_mop, int ptw_mmu_idx)
636 {
637     int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1;
638     int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F;
639     int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
640     int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
641     uint64_t entry;
642     uint64_t paddr;
643     int prot;
644     uint64_t lsb = 0;
645     uint64_t w = 0;
646 
647     if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD,
648                              ptw_mmu_idx) != TLBRET_MATCH) {
649         /* wrong base address */
650         return 0;
651     }
652     if (!get_pte(env, *vaddr, directory_mop, &entry, ptw_mmu_idx)) {
653         return 0;
654     }
655 
656     if ((entry & (1 << psn)) && hugepg) {
657         *huge_page = true;
658         *hgpg_directory_hit = true;
659         entry = get_tlb_entry_layout(env, entry, leaf_mop, pf_ptew);
660         w = directory_index - 1;
661         if (directory_index & 0x1) {
662             /* Generate adjacent page from same PTE for odd TLB page */
663             lsb = BIT_ULL(w) >> 6;
664             *pw_entrylo0 = entry & ~lsb; /* even page */
665             *pw_entrylo1 = entry | lsb; /* odd page */
666         } else if (dph) {
667             int oddpagebit = 1 << leaf_mop;
668             uint64_t vaddr2 = *vaddr ^ oddpagebit;
669             if (*vaddr & oddpagebit) {
670                 *pw_entrylo1 = entry;
671             } else {
672                 *pw_entrylo0 = entry;
673             }
674             if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD,
675                                      ptw_mmu_idx) != TLBRET_MATCH) {
676                 return 0;
677             }
678             if (!get_pte(env, vaddr2, leaf_mop, &entry, ptw_mmu_idx)) {
679                 return 0;
680             }
681             entry = get_tlb_entry_layout(env, entry, leaf_mop, pf_ptew);
682             if (*vaddr & oddpagebit) {
683                 *pw_entrylo0 = entry;
684             } else {
685                 *pw_entrylo1 = entry;
686             }
687         } else {
688             return 0;
689         }
690         return 1;
691     } else {
692         *vaddr = entry;
693         return 2;
694     }
695 }
696 
697 static bool page_table_walk_refill(CPUMIPSState *env, vaddr address,
698                                    int ptw_mmu_idx)
699 {
700     int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F;
701     int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F;
702     int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F;
703     int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F;
704     int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
705 
706     /* Initial values */
707     bool huge_page = false;
708     bool hgpg_bdhit = false;
709     bool hgpg_gdhit = false;
710     bool hgpg_udhit = false;
711     bool hgpg_mdhit = false;
712 
713     int32_t pw_pagemask = 0;
714     target_ulong pw_entryhi = 0;
715     uint64_t pw_entrylo0 = 0;
716     uint64_t pw_entrylo1 = 0;
717 
718     /* Native pointer size */
719     /*For the 32-bit architectures, this bit is fixed to 0.*/
720     MemOp native_op = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? MO_32 : MO_64;
721 
722     /* Indices from PWField */
723     int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F;
724     int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F;
725     int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F;
726     int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F;
727     int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
728 
729     /* Indices computed from faulting address */
730     int gindex = (address >> pf_gdw) & ((1 << gdw) - 1);
731     int uindex = (address >> pf_udw) & ((1 << udw) - 1);
732     int mindex = (address >> pf_mdw) & ((1 << mdw) - 1);
733     int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1);
734 
735     /* Other HTW configs */
736     int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
737     MemOp directory_mop, leaf_mop;
738 
739     /* Offsets into tables */
740     unsigned goffset, uoffset, moffset, ptoffset0, ptoffset1;
741 
742     /* Starting address - Page Table Base */
743     uint64_t vaddr = env->CP0_PWBase;
744 
745     uint64_t dir_entry;
746     uint64_t paddr;
747     int prot;
748     int m;
749 
750     if (!(env->CP0_Config3 & (1 << CP0C3_PW))) {
751         /* walker is unimplemented */
752         return false;
753     }
754     if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) {
755         /* walker is disabled */
756         return false;
757     }
758     if (!(gdw > 0 || udw > 0 || mdw > 0)) {
759         /* no structure to walk */
760         return false;
761     }
762     if (ptew > 1) {
763         return false;
764     }
765 
766     /* HTW Shift values (depend on entry size) */
767     directory_mop = (hugepg && (ptew == 1)) ? native_op + 1 : native_op;
768     leaf_mop = (ptew == 1) ? native_op + 1 : native_op;
769 
770     goffset = gindex << directory_mop;
771     uoffset = uindex << directory_mop;
772     moffset = mindex << directory_mop;
773     ptoffset0 = (ptindex >> 1) << (leaf_mop + 1);
774     ptoffset1 = ptoffset0 | (1 << (leaf_mop));
775 
776     /* Global Directory */
777     if (gdw > 0) {
778         vaddr |= goffset;
779         switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit,
780                                &pw_entrylo0, &pw_entrylo1,
781                                directory_mop, leaf_mop, ptw_mmu_idx))
782         {
783         case 0:
784             return false;
785         case 1:
786             goto refill;
787         case 2:
788         default:
789             break;
790         }
791     }
792 
793     /* Upper directory */
794     if (udw > 0) {
795         vaddr |= uoffset;
796         switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit,
797                                &pw_entrylo0, &pw_entrylo1,
798                                directory_mop, leaf_mop, ptw_mmu_idx))
799         {
800         case 0:
801             return false;
802         case 1:
803             goto refill;
804         case 2:
805         default:
806             break;
807         }
808     }
809 
810     /* Middle directory */
811     if (mdw > 0) {
812         vaddr |= moffset;
813         switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit,
814                                &pw_entrylo0, &pw_entrylo1,
815                                directory_mop, leaf_mop, ptw_mmu_idx))
816         {
817         case 0:
818             return false;
819         case 1:
820             goto refill;
821         case 2:
822         default:
823             break;
824         }
825     }
826 
827     /* Leaf Level Page Table - First half of PTE pair */
828     vaddr |= ptoffset0;
829     if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
830                              ptw_mmu_idx) != TLBRET_MATCH) {
831         return false;
832     }
833     if (!get_pte(env, vaddr, leaf_mop, &dir_entry, ptw_mmu_idx)) {
834         return false;
835     }
836     dir_entry = get_tlb_entry_layout(env, dir_entry, leaf_mop, pf_ptew);
837     pw_entrylo0 = dir_entry;
838 
839     /* Leaf Level Page Table - Second half of PTE pair */
840     vaddr |= ptoffset1;
841     if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
842                              ptw_mmu_idx) != TLBRET_MATCH) {
843         return false;
844     }
845     if (!get_pte(env, vaddr, leaf_mop, &dir_entry, ptw_mmu_idx)) {
846         return false;
847     }
848     dir_entry = get_tlb_entry_layout(env, dir_entry, leaf_mop, pf_ptew);
849     pw_entrylo1 = dir_entry;
850 
851 refill:
852 
853     m = (1 << pf_ptw) - 1;
854 
855     if (huge_page) {
856         switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 |
857                 hgpg_mdhit)
858         {
859         case 4:
860             m = (1 << pf_gdw) - 1;
861             if (pf_gdw & 1) {
862                 m >>= 1;
863             }
864             break;
865         case 2:
866             m = (1 << pf_udw) - 1;
867             if (pf_udw & 1) {
868                 m >>= 1;
869             }
870             break;
871         case 1:
872             m = (1 << pf_mdw) - 1;
873             if (pf_mdw & 1) {
874                 m >>= 1;
875             }
876             break;
877         }
878     }
879     pw_pagemask = m >> TARGET_PAGE_BITS;
880     pw_pagemask = compute_pagemask(pw_pagemask << CP0PM_MASK);
881     pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF);
882     {
883         target_ulong tmp_entryhi = env->CP0_EntryHi;
884         int32_t tmp_pagemask = env->CP0_PageMask;
885         uint64_t tmp_entrylo0 = env->CP0_EntryLo0;
886         uint64_t tmp_entrylo1 = env->CP0_EntryLo1;
887 
888         env->CP0_EntryHi = pw_entryhi;
889         env->CP0_PageMask = pw_pagemask;
890         env->CP0_EntryLo0 = pw_entrylo0;
891         env->CP0_EntryLo1 = pw_entrylo1;
892 
893         /*
894          * The hardware page walker inserts a page into the TLB in a manner
895          * identical to a TLBWR instruction as executed by the software refill
896          * handler.
897          */
898         r4k_helper_tlbwr(env);
899 
900         env->CP0_EntryHi = tmp_entryhi;
901         env->CP0_PageMask = tmp_pagemask;
902         env->CP0_EntryLo0 = tmp_entrylo0;
903         env->CP0_EntryLo1 = tmp_entrylo1;
904     }
905     return true;
906 }
907 #endif
908 
909 bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
910                        MMUAccessType access_type, int mmu_idx,
911                        bool probe, uintptr_t retaddr)
912 {
913     CPUMIPSState *env = cpu_env(cs);
914     hwaddr physical;
915     int prot;
916     int ret = TLBRET_BADADDR;
917 
918     /* data access */
919     /* XXX: put correct access by using cpu_restore_state() correctly */
920     ret = get_physical_address(env, &physical, &prot, address,
921                                access_type, mmu_idx);
922     switch (ret) {
923     case TLBRET_MATCH:
924         qemu_log_mask(CPU_LOG_MMU,
925                       "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
926                       " prot %d\n", __func__, address, physical, prot);
927         break;
928     default:
929         qemu_log_mask(CPU_LOG_MMU,
930                       "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
931                       ret);
932         break;
933     }
934     if (ret == TLBRET_MATCH) {
935         tlb_set_page(cs, address & TARGET_PAGE_MASK,
936                      physical & TARGET_PAGE_MASK, prot,
937                      mmu_idx, TARGET_PAGE_SIZE);
938         return true;
939     }
940 #if !defined(TARGET_MIPS64)
941     if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
942         /*
943          * Memory reads during hardware page table walking are performed
944          * as if they were kernel-mode load instructions.
945          */
946         int ptw_mmu_idx = (env->hflags & MIPS_HFLAG_ERL ?
947                            MMU_ERL_IDX : MMU_KERNEL_IDX);
948 
949         if (page_table_walk_refill(env, address, ptw_mmu_idx)) {
950             ret = get_physical_address(env, &physical, &prot, address,
951                                        access_type, mmu_idx);
952             if (ret == TLBRET_MATCH) {
953                 tlb_set_page(cs, address & TARGET_PAGE_MASK,
954                              physical & TARGET_PAGE_MASK, prot,
955                              mmu_idx, TARGET_PAGE_SIZE);
956                 return true;
957             }
958         }
959     }
960 #endif
961     if (probe) {
962         return false;
963     }
964 
965     raise_mmu_exception(env, address, access_type, ret);
966     do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
967 }
968 
969 hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address,
970                                   MMUAccessType access_type, uintptr_t retaddr)
971 {
972     hwaddr physical;
973     int prot;
974     int ret = 0;
975     CPUState *cs = env_cpu(env);
976 
977     /* data access */
978     ret = get_physical_address(env, &physical, &prot, address, access_type,
979                                mips_env_mmu_index(env));
980     if (ret == TLBRET_MATCH) {
981         return physical;
982     }
983 
984     raise_mmu_exception(env, address, access_type, ret);
985     cpu_loop_exit_restore(cs, retaddr);
986 }
987 
988 static void set_hflags_for_handler(CPUMIPSState *env)
989 {
990     /* Exception handlers are entered in 32-bit mode.  */
991     env->hflags &= ~(MIPS_HFLAG_M16);
992     /* ...except that microMIPS lets you choose.  */
993     if (env->insn_flags & ASE_MICROMIPS) {
994         env->hflags |= (!!(env->CP0_Config3 &
995                            (1 << CP0C3_ISA_ON_EXC))
996                         << MIPS_HFLAG_M16_SHIFT);
997     }
998 }
999 
1000 static inline void set_badinstr_registers(CPUMIPSState *env)
1001 {
1002     if (env->insn_flags & ISA_NANOMIPS32) {
1003         if (env->CP0_Config3 & (1 << CP0C3_BI)) {
1004             uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16;
1005             if ((instr & 0x10000000) == 0) {
1006                 instr |= cpu_lduw_code(env, env->active_tc.PC + 2);
1007             }
1008             env->CP0_BadInstr = instr;
1009 
1010             if ((instr & 0xFC000000) == 0x60000000) {
1011                 instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16;
1012                 env->CP0_BadInstrX = instr;
1013             }
1014         }
1015         return;
1016     }
1017 
1018     if (env->hflags & MIPS_HFLAG_M16) {
1019         /* TODO: add BadInstr support for microMIPS */
1020         return;
1021     }
1022     if (env->CP0_Config3 & (1 << CP0C3_BI)) {
1023         env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
1024     }
1025     if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
1026         (env->hflags & MIPS_HFLAG_BMASK)) {
1027         env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
1028     }
1029 }
1030 
1031 void mips_cpu_do_interrupt(CPUState *cs)
1032 {
1033     MIPSCPU *cpu = MIPS_CPU(cs);
1034     CPUMIPSState *env = &cpu->env;
1035     bool update_badinstr = 0;
1036     target_ulong offset;
1037     int cause = -1;
1038 
1039     if (qemu_loglevel_mask(CPU_LOG_INT)
1040         && cs->exception_index != EXCP_EXT_INTERRUPT) {
1041         qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
1042                  " %s exception\n",
1043                  __func__, env->active_tc.PC, env->CP0_EPC,
1044                  mips_exception_name(cs->exception_index));
1045     }
1046     if (cs->exception_index == EXCP_EXT_INTERRUPT &&
1047         (env->hflags & MIPS_HFLAG_DM)) {
1048         cs->exception_index = EXCP_DINT;
1049     }
1050     offset = 0x180;
1051     switch (cs->exception_index) {
1052     case EXCP_SEMIHOST:
1053         cs->exception_index = EXCP_NONE;
1054         mips_semihosting(env);
1055         env->active_tc.PC += env->error_code;
1056         return;
1057     case EXCP_DSS:
1058         env->CP0_Debug |= 1 << CP0DB_DSS;
1059         /*
1060          * Debug single step cannot be raised inside a delay slot and
1061          * resume will always occur on the next instruction
1062          * (but we assume the pc has always been updated during
1063          * code translation).
1064          */
1065         env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
1066         goto enter_debug_mode;
1067     case EXCP_DINT:
1068         env->CP0_Debug |= 1 << CP0DB_DINT;
1069         goto set_DEPC;
1070     case EXCP_DIB:
1071         env->CP0_Debug |= 1 << CP0DB_DIB;
1072         goto set_DEPC;
1073     case EXCP_DBp:
1074         env->CP0_Debug |= 1 << CP0DB_DBp;
1075         /* Setup DExcCode - SDBBP instruction */
1076         env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) |
1077                          (9 << CP0DB_DEC);
1078         goto set_DEPC;
1079     case EXCP_DDBS:
1080         env->CP0_Debug |= 1 << CP0DB_DDBS;
1081         goto set_DEPC;
1082     case EXCP_DDBL:
1083         env->CP0_Debug |= 1 << CP0DB_DDBL;
1084     set_DEPC:
1085         env->CP0_DEPC = exception_resume_pc(env);
1086         env->hflags &= ~MIPS_HFLAG_BMASK;
1087  enter_debug_mode:
1088         if (env->insn_flags & ISA_MIPS3) {
1089             env->hflags |= MIPS_HFLAG_64;
1090             if (!(env->insn_flags & ISA_MIPS_R6) ||
1091                 env->CP0_Status & (1 << CP0St_KX)) {
1092                 env->hflags &= ~MIPS_HFLAG_AWRAP;
1093             }
1094         }
1095         env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
1096         env->hflags &= ~(MIPS_HFLAG_KSU);
1097         /* EJTAG probe trap enable is not implemented... */
1098         if (!(env->CP0_Status & (1 << CP0St_EXL))) {
1099             env->CP0_Cause &= ~(1U << CP0Ca_BD);
1100         }
1101         env->active_tc.PC = env->exception_base + 0x480;
1102         set_hflags_for_handler(env);
1103         break;
1104     case EXCP_RESET:
1105         cpu_reset(CPU(cpu));
1106         break;
1107     case EXCP_SRESET:
1108         env->CP0_Status |= (1 << CP0St_SR);
1109         memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
1110         goto set_error_EPC;
1111     case EXCP_NMI:
1112         env->CP0_Status |= (1 << CP0St_NMI);
1113  set_error_EPC:
1114         env->CP0_ErrorEPC = exception_resume_pc(env);
1115         env->hflags &= ~MIPS_HFLAG_BMASK;
1116         env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
1117         if (env->insn_flags & ISA_MIPS3) {
1118             env->hflags |= MIPS_HFLAG_64;
1119             if (!(env->insn_flags & ISA_MIPS_R6) ||
1120                 env->CP0_Status & (1 << CP0St_KX)) {
1121                 env->hflags &= ~MIPS_HFLAG_AWRAP;
1122             }
1123         }
1124         env->hflags |= MIPS_HFLAG_CP0;
1125         env->hflags &= ~(MIPS_HFLAG_KSU);
1126         if (!(env->CP0_Status & (1 << CP0St_EXL))) {
1127             env->CP0_Cause &= ~(1U << CP0Ca_BD);
1128         }
1129         env->active_tc.PC = env->exception_base;
1130         set_hflags_for_handler(env);
1131         break;
1132     case EXCP_EXT_INTERRUPT:
1133         cause = 0;
1134         if (env->CP0_Cause & (1 << CP0Ca_IV)) {
1135             uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
1136 
1137             if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
1138                 offset = 0x200;
1139             } else {
1140                 uint32_t vector = 0;
1141                 uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
1142 
1143                 if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
1144                     /*
1145                      * For VEIC mode, the external interrupt controller feeds
1146                      * the vector through the CP0Cause IP lines.
1147                      */
1148                     vector = pending;
1149                 } else {
1150                     /*
1151                      * Vectored Interrupts
1152                      * Mask with Status.IM7-IM0 to get enabled interrupts.
1153                      */
1154                     pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
1155                     /* Find the highest-priority interrupt. */
1156                     while (pending >>= 1) {
1157                         vector++;
1158                     }
1159                 }
1160                 offset = 0x200 + (vector * (spacing << 5));
1161             }
1162         }
1163         goto set_EPC;
1164     case EXCP_LTLBL:
1165         cause = 1;
1166         update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1167         goto set_EPC;
1168     case EXCP_TLBL:
1169         cause = 2;
1170         update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1171         if ((env->error_code & EXCP_TLB_NOMATCH) &&
1172             !(env->CP0_Status & (1 << CP0St_EXL))) {
1173 #if defined(TARGET_MIPS64)
1174             int R = env->CP0_BadVAddr >> 62;
1175             int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
1176             int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
1177 
1178             if ((R != 0 || UX) && (R != 3 || KX) &&
1179                 (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
1180                 offset = 0x080;
1181             } else {
1182 #endif
1183                 offset = 0x000;
1184 #if defined(TARGET_MIPS64)
1185             }
1186 #endif
1187         }
1188         goto set_EPC;
1189     case EXCP_TLBS:
1190         cause = 3;
1191         update_badinstr = 1;
1192         if ((env->error_code & EXCP_TLB_NOMATCH) &&
1193             !(env->CP0_Status & (1 << CP0St_EXL))) {
1194 #if defined(TARGET_MIPS64)
1195             int R = env->CP0_BadVAddr >> 62;
1196             int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
1197             int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
1198 
1199             if ((R != 0 || UX) && (R != 3 || KX) &&
1200                 (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
1201                 offset = 0x080;
1202             } else {
1203 #endif
1204                 offset = 0x000;
1205 #if defined(TARGET_MIPS64)
1206             }
1207 #endif
1208         }
1209         goto set_EPC;
1210     case EXCP_AdEL:
1211         cause = 4;
1212         update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1213         goto set_EPC;
1214     case EXCP_AdES:
1215         cause = 5;
1216         update_badinstr = 1;
1217         goto set_EPC;
1218     case EXCP_IBE:
1219         cause = 6;
1220         goto set_EPC;
1221     case EXCP_DBE:
1222         cause = 7;
1223         goto set_EPC;
1224     case EXCP_SYSCALL:
1225         cause = 8;
1226         update_badinstr = 1;
1227         goto set_EPC;
1228     case EXCP_BREAK:
1229         cause = 9;
1230         update_badinstr = 1;
1231         goto set_EPC;
1232     case EXCP_RI:
1233         cause = 10;
1234         update_badinstr = 1;
1235         goto set_EPC;
1236     case EXCP_CpU:
1237         cause = 11;
1238         update_badinstr = 1;
1239         env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
1240                          (env->error_code << CP0Ca_CE);
1241         goto set_EPC;
1242     case EXCP_OVERFLOW:
1243         cause = 12;
1244         update_badinstr = 1;
1245         goto set_EPC;
1246     case EXCP_TRAP:
1247         cause = 13;
1248         update_badinstr = 1;
1249         goto set_EPC;
1250     case EXCP_MSAFPE:
1251         cause = 14;
1252         update_badinstr = 1;
1253         goto set_EPC;
1254     case EXCP_FPE:
1255         cause = 15;
1256         update_badinstr = 1;
1257         goto set_EPC;
1258     case EXCP_C2E:
1259         cause = 18;
1260         goto set_EPC;
1261     case EXCP_TLBRI:
1262         cause = 19;
1263         update_badinstr = 1;
1264         goto set_EPC;
1265     case EXCP_TLBXI:
1266         cause = 20;
1267         goto set_EPC;
1268     case EXCP_MSADIS:
1269         cause = 21;
1270         update_badinstr = 1;
1271         goto set_EPC;
1272     case EXCP_MDMX:
1273         cause = 22;
1274         goto set_EPC;
1275     case EXCP_DWATCH:
1276         cause = 23;
1277         /* XXX: TODO: manage deferred watch exceptions */
1278         goto set_EPC;
1279     case EXCP_MCHECK:
1280         cause = 24;
1281         goto set_EPC;
1282     case EXCP_THREAD:
1283         cause = 25;
1284         goto set_EPC;
1285     case EXCP_DSPDIS:
1286         cause = 26;
1287         goto set_EPC;
1288     case EXCP_CACHE:
1289         cause = 30;
1290         offset = 0x100;
1291  set_EPC:
1292         if (!(env->CP0_Status & (1 << CP0St_EXL))) {
1293             env->CP0_EPC = exception_resume_pc(env);
1294             if (update_badinstr) {
1295                 set_badinstr_registers(env);
1296             }
1297             if (env->hflags & MIPS_HFLAG_BMASK) {
1298                 env->CP0_Cause |= (1U << CP0Ca_BD);
1299             } else {
1300                 env->CP0_Cause &= ~(1U << CP0Ca_BD);
1301             }
1302             env->CP0_Status |= (1 << CP0St_EXL);
1303             if (env->insn_flags & ISA_MIPS3) {
1304                 env->hflags |= MIPS_HFLAG_64;
1305                 if (!(env->insn_flags & ISA_MIPS_R6) ||
1306                     env->CP0_Status & (1 << CP0St_KX)) {
1307                     env->hflags &= ~MIPS_HFLAG_AWRAP;
1308                 }
1309             }
1310             env->hflags |= MIPS_HFLAG_CP0;
1311             env->hflags &= ~(MIPS_HFLAG_KSU);
1312         }
1313         env->hflags &= ~MIPS_HFLAG_BMASK;
1314         if (env->CP0_Status & (1 << CP0St_BEV)) {
1315             env->active_tc.PC = env->exception_base + 0x200;
1316         } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) &&
1317                                     env->CP0_Config5 & (1 << CP0C5_CV))) {
1318             /* Force KSeg1 for cache errors */
1319             env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000);
1320         } else {
1321             env->active_tc.PC = env->CP0_EBase & ~0xfff;
1322         }
1323 
1324         env->active_tc.PC += offset;
1325         set_hflags_for_handler(env);
1326         env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) |
1327                          (cause << CP0Ca_EC);
1328         break;
1329     default:
1330         abort();
1331     }
1332     if (qemu_loglevel_mask(CPU_LOG_INT)
1333         && cs->exception_index != EXCP_EXT_INTERRUPT) {
1334         qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
1335                  "    S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
1336                  __func__, env->active_tc.PC, env->CP0_EPC, cause,
1337                  env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
1338                  env->CP0_DEPC);
1339     }
1340     cs->exception_index = EXCP_NONE;
1341 }
1342 
1343 bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1344 {
1345     if (interrupt_request & CPU_INTERRUPT_HARD) {
1346         CPUMIPSState *env = cpu_env(cs);
1347 
1348         if (cpu_mips_hw_interrupts_enabled(env) &&
1349             cpu_mips_hw_interrupts_pending(env)) {
1350             /* Raise it */
1351             cs->exception_index = EXCP_EXT_INTERRUPT;
1352             env->error_code = 0;
1353             mips_cpu_do_interrupt(cs);
1354             return true;
1355         }
1356     }
1357     return false;
1358 }
1359 
1360 void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra)
1361 {
1362     CPUState *cs = env_cpu(env);
1363     r4k_tlb_t *tlb;
1364     target_ulong addr;
1365     target_ulong end;
1366     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
1367     uint32_t MMID = env->CP0_MemoryMapID;
1368     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
1369     uint32_t tlb_mmid;
1370     target_ulong mask;
1371 
1372     MMID = mi ? MMID : (uint32_t) ASID;
1373 
1374     tlb = &env->tlb->mmu.r4k.tlb[idx];
1375     /*
1376      * The qemu TLB is flushed when the ASID/MMID changes, so no need to
1377      * flush these entries again.
1378      */
1379     tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
1380     if (tlb->G == 0 && tlb_mmid != MMID) {
1381         return;
1382     }
1383 
1384     if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
1385         /*
1386          * For tlbwr, we can shadow the discarded entry into
1387          * a new (fake) TLB entry, as long as the guest can not
1388          * tell that it's there.
1389          */
1390         env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
1391         env->tlb->tlb_in_use++;
1392         return;
1393     }
1394 
1395     /* 1k pages are not supported. */
1396     mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1397     if (tlb->V0) {
1398         addr = tlb->VPN & ~mask;
1399 #if defined(TARGET_MIPS64)
1400         if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1401             addr |= 0x3FFFFF0000000000ULL;
1402         }
1403 #endif
1404         end = addr | (mask >> 1);
1405         while (addr < end) {
1406             tlb_flush_page(cs, addr);
1407             addr += TARGET_PAGE_SIZE;
1408         }
1409     }
1410     if (tlb->V1) {
1411         addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
1412 #if defined(TARGET_MIPS64)
1413         if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1414             addr |= 0x3FFFFF0000000000ULL;
1415         }
1416 #endif
1417         end = addr | mask;
1418         while (addr - 1 < end) {
1419             tlb_flush_page(cs, addr);
1420             addr += TARGET_PAGE_SIZE;
1421         }
1422     }
1423 }
1424