xref: /qemu/target/loongarch/tcg/tlb_helper.c (revision 641f1c53862aec64810c0b93b5b1de49d55fda92)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * QEMU LoongArch TLB helpers
4  *
5  * Copyright (c) 2021 Loongson Technology Corporation Limited
6  *
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/guest-random.h"
11 
12 #include "cpu.h"
13 #include "internals.h"
14 #include "exec/helper-proto.h"
15 #include "exec/cputlb.h"
16 #include "exec/exec-all.h"
17 #include "exec/page-protection.h"
18 #include "exec/target_page.h"
19 #include "accel/tcg/cpu-ldst.h"
20 #include "exec/log.h"
21 #include "cpu-csr.h"
22 #include "tcg/tcg_loongarch.h"
23 
24 bool check_ps(CPULoongArchState *env, uint8_t tlb_ps)
25 {
26     if (tlb_ps >= 64) {
27         return false;
28     }
29     return BIT_ULL(tlb_ps) & (env->CSR_PRCFG2);
30 }
31 
32 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address,
33                                 MMUAccessType access_type, int tlb_error)
34 {
35     CPUState *cs = env_cpu(env);
36 
37     switch (tlb_error) {
38     default:
39     case TLBRET_BADADDR:
40         cs->exception_index = access_type == MMU_INST_FETCH
41                               ? EXCCODE_ADEF : EXCCODE_ADEM;
42         break;
43     case TLBRET_NOMATCH:
44         /* No TLB match for a mapped address */
45         if (access_type == MMU_DATA_LOAD) {
46             cs->exception_index = EXCCODE_PIL;
47         } else if (access_type == MMU_DATA_STORE) {
48             cs->exception_index = EXCCODE_PIS;
49         } else if (access_type == MMU_INST_FETCH) {
50             cs->exception_index = EXCCODE_PIF;
51         }
52         env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1);
53         break;
54     case TLBRET_INVALID:
55         /* TLB match with no valid bit */
56         if (access_type == MMU_DATA_LOAD) {
57             cs->exception_index = EXCCODE_PIL;
58         } else if (access_type == MMU_DATA_STORE) {
59             cs->exception_index = EXCCODE_PIS;
60         } else if (access_type == MMU_INST_FETCH) {
61             cs->exception_index = EXCCODE_PIF;
62         }
63         break;
64     case TLBRET_DIRTY:
65         /* TLB match but 'D' bit is cleared */
66         cs->exception_index = EXCCODE_PME;
67         break;
68     case TLBRET_XI:
69         /* Execute-Inhibit Exception */
70         cs->exception_index = EXCCODE_PNX;
71         break;
72     case TLBRET_RI:
73         /* Read-Inhibit Exception */
74         cs->exception_index = EXCCODE_PNR;
75         break;
76     case TLBRET_PE:
77         /* Privileged Exception */
78         cs->exception_index = EXCCODE_PPI;
79         break;
80     }
81 
82     if (tlb_error == TLBRET_NOMATCH) {
83         env->CSR_TLBRBADV = address;
84         if (is_la64(env)) {
85             env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_64,
86                                         VPPN, extract64(address, 13, 35));
87         } else {
88             env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_32,
89                                         VPPN, extract64(address, 13, 19));
90         }
91     } else {
92         if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
93             env->CSR_BADV = address;
94         }
95         env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1);
96    }
97 }
98 
99 static void invalidate_tlb_entry(CPULoongArchState *env, int index)
100 {
101     target_ulong addr, mask, pagesize;
102     uint8_t tlb_ps;
103     LoongArchTLB *tlb = &env->tlb[index];
104 
105     int mmu_idx = cpu_mmu_index(env_cpu(env), false);
106     uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V);
107     uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V);
108     uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
109     uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
110 
111     if (!tlb_e) {
112         return;
113     }
114     if (index >= LOONGARCH_STLB) {
115         tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
116     } else {
117         tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
118     }
119     pagesize = MAKE_64BIT_MASK(tlb_ps, 1);
120     mask = MAKE_64BIT_MASK(0, tlb_ps + 1);
121 
122     if (tlb_v0) {
123         addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask;    /* even */
124         tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
125                                   mmu_idx, TARGET_LONG_BITS);
126     }
127 
128     if (tlb_v1) {
129         addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize;    /* odd */
130         tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
131                                   mmu_idx, TARGET_LONG_BITS);
132     }
133 }
134 
135 static void invalidate_tlb(CPULoongArchState *env, int index)
136 {
137     LoongArchTLB *tlb;
138     uint16_t csr_asid, tlb_asid, tlb_g;
139 
140     csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
141     tlb = &env->tlb[index];
142     tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
143     tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
144     if (tlb_g == 0 && tlb_asid != csr_asid) {
145         return;
146     }
147     invalidate_tlb_entry(env, index);
148 }
149 
150 static void fill_tlb_entry(CPULoongArchState *env, int index)
151 {
152     LoongArchTLB *tlb = &env->tlb[index];
153     uint64_t lo0, lo1, csr_vppn;
154     uint16_t csr_asid;
155     uint8_t csr_ps;
156 
157     if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
158         csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
159         if (is_la64(env)) {
160             csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_64, VPPN);
161         } else {
162             csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_32, VPPN);
163         }
164         lo0 = env->CSR_TLBRELO0;
165         lo1 = env->CSR_TLBRELO1;
166     } else {
167         csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
168         if (is_la64(env)) {
169             csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_64, VPPN);
170         } else {
171             csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_32, VPPN);
172         }
173         lo0 = env->CSR_TLBELO0;
174         lo1 = env->CSR_TLBELO1;
175     }
176 
177     /*check csr_ps */
178     if (!check_ps(env, csr_ps)) {
179         qemu_log_mask(LOG_GUEST_ERROR, "csr_ps %d is illegal\n", csr_ps);
180         return;
181     }
182 
183     /* Only MTLB has the ps fields */
184     if (index >= LOONGARCH_STLB) {
185         tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
186     }
187 
188     tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn);
189     tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1);
190     csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
191     tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid);
192 
193     tlb->tlb_entry0 = lo0;
194     tlb->tlb_entry1 = lo1;
195 }
196 
197 /* Return an random value between low and high */
198 static uint32_t get_random_tlb(uint32_t low, uint32_t high)
199 {
200     uint32_t val;
201 
202     qemu_guest_getrandom_nofail(&val, sizeof(val));
203     return val % (high - low + 1) + low;
204 }
205 
206 /*
207  * One tlb entry holds an adjacent odd/even pair, the vpn is the
208  * content of the virtual page number divided by 2. So the
209  * compare vpn is bit[47:15] for 16KiB page. while the vppn
210  * field in tlb entry contains bit[47:13], so need adjust.
211  * virt_vpn = vaddr[47:13]
212  */
213 static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
214                                  int *index)
215 {
216     LoongArchTLB *tlb;
217     uint16_t csr_asid, tlb_asid, stlb_idx;
218     uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
219     int i, compare_shift;
220     uint64_t vpn, tlb_vppn;
221 
222     csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
223    stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
224     vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
225     stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
226     compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
227 
228     /* Search STLB */
229     for (i = 0; i < 8; ++i) {
230         tlb = &env->tlb[i * 256 + stlb_idx];
231         tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
232         if (tlb_e) {
233             tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
234             tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
235             tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
236 
237             if ((tlb_g == 1 || tlb_asid == csr_asid) &&
238                 (vpn == (tlb_vppn >> compare_shift))) {
239                 *index = i * 256 + stlb_idx;
240                 return true;
241             }
242         }
243     }
244 
245     /* Search MTLB */
246     for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
247         tlb = &env->tlb[i];
248         tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
249         if (tlb_e) {
250             tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
251             tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
252             tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
253             tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
254             compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
255             vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
256             if ((tlb_g == 1 || tlb_asid == csr_asid) &&
257                 (vpn == (tlb_vppn >> compare_shift))) {
258                 *index = i;
259                 return true;
260             }
261         }
262     }
263     return false;
264 }
265 
266 void helper_tlbsrch(CPULoongArchState *env)
267 {
268     int index, match;
269 
270     if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
271         match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index);
272     } else {
273         match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index);
274     }
275 
276     if (match) {
277         env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index);
278         env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
279         return;
280     }
281 
282     env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
283 }
284 
285 void helper_tlbrd(CPULoongArchState *env)
286 {
287     LoongArchTLB *tlb;
288     int index;
289     uint8_t tlb_ps, tlb_e;
290 
291     index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
292     tlb = &env->tlb[index];
293 
294     if (index >= LOONGARCH_STLB) {
295         tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
296     } else {
297         tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
298     }
299     tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
300 
301     if (!tlb_e) {
302         /* Invalid TLB entry */
303         env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
304         env->CSR_ASID  = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0);
305         env->CSR_TLBEHI = 0;
306         env->CSR_TLBELO0 = 0;
307         env->CSR_TLBELO1 = 0;
308         env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0);
309     } else {
310         /* Valid TLB entry */
311         env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
312         env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX,
313                                      PS, (tlb_ps & 0x3f));
314         env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) <<
315                                      R_TLB_MISC_VPPN_SHIFT;
316         env->CSR_TLBELO0 = tlb->tlb_entry0;
317         env->CSR_TLBELO1 = tlb->tlb_entry1;
318     }
319 }
320 
321 void helper_tlbwr(CPULoongArchState *env)
322 {
323     int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
324 
325     invalidate_tlb(env, index);
326 
327     if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
328         env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc,
329                                               TLB_MISC, E, 0);
330         return;
331     }
332 
333     fill_tlb_entry(env, index);
334 }
335 
336 void helper_tlbfill(CPULoongArchState *env)
337 {
338     uint64_t address, entryhi;
339     int index, set, stlb_idx;
340     uint16_t pagesize, stlb_ps;
341 
342     if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
343         entryhi = env->CSR_TLBREHI;
344         pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
345     } else {
346         entryhi = env->CSR_TLBEHI;
347         pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
348     }
349 
350     if (!check_ps(env, pagesize)) {
351         qemu_log_mask(LOG_GUEST_ERROR, "pagesize %d is illegal\n", pagesize);
352         return;
353     }
354 
355     stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
356     if (!check_ps(env, stlb_ps)) {
357         qemu_log_mask(LOG_GUEST_ERROR, "stlb_ps %d is illegal\n", stlb_ps);
358         return;
359     }
360 
361     if (pagesize == stlb_ps) {
362         /* Only write into STLB bits [47:13] */
363         address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT);
364 
365         /* Choose one set ramdomly */
366         set = get_random_tlb(0, 7);
367 
368         /* Index in one set */
369         stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */
370 
371         index = set * 256 + stlb_idx;
372     } else {
373         /* Only write into MTLB */
374         index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1);
375     }
376 
377     invalidate_tlb(env, index);
378     fill_tlb_entry(env, index);
379 }
380 
381 void helper_tlbclr(CPULoongArchState *env)
382 {
383     LoongArchTLB *tlb;
384     int i, index;
385     uint16_t csr_asid, tlb_asid, tlb_g;
386 
387     csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
388     index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
389 
390     if (index < LOONGARCH_STLB) {
391         /* STLB. One line per operation */
392         for (i = 0; i < 8; i++) {
393             tlb = &env->tlb[i * 256 + (index % 256)];
394             tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
395             tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
396             if (!tlb_g && tlb_asid == csr_asid) {
397                 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
398             }
399         }
400     } else if (index < LOONGARCH_TLB_MAX) {
401         /* All MTLB entries */
402         for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
403             tlb = &env->tlb[i];
404             tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
405             tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
406             if (!tlb_g && tlb_asid == csr_asid) {
407                 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
408             }
409         }
410     }
411 
412     tlb_flush(env_cpu(env));
413 }
414 
415 void helper_tlbflush(CPULoongArchState *env)
416 {
417     int i, index;
418 
419     index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
420 
421     if (index < LOONGARCH_STLB) {
422         /* STLB. One line per operation */
423         for (i = 0; i < 8; i++) {
424             int s_idx = i * 256 + (index % 256);
425             env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc,
426                                                   TLB_MISC, E, 0);
427         }
428     } else if (index < LOONGARCH_TLB_MAX) {
429         /* All MTLB entries */
430         for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
431             env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
432                                               TLB_MISC, E, 0);
433         }
434     }
435 
436     tlb_flush(env_cpu(env));
437 }
438 
439 void helper_invtlb_all(CPULoongArchState *env)
440 {
441     for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
442         env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
443                                           TLB_MISC, E, 0);
444     }
445     tlb_flush(env_cpu(env));
446 }
447 
448 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g)
449 {
450     for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
451         LoongArchTLB *tlb = &env->tlb[i];
452         uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
453 
454         if (tlb_g == g) {
455             tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
456         }
457     }
458     tlb_flush(env_cpu(env));
459 }
460 
461 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info)
462 {
463     uint16_t asid = info & R_CSR_ASID_ASID_MASK;
464 
465     for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
466         LoongArchTLB *tlb = &env->tlb[i];
467         uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
468         uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
469 
470         if (!tlb_g && (tlb_asid == asid)) {
471             tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
472         }
473     }
474     tlb_flush(env_cpu(env));
475 }
476 
477 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info,
478                              target_ulong addr)
479 {
480     uint16_t asid = info & 0x3ff;
481 
482     for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
483         LoongArchTLB *tlb = &env->tlb[i];
484         uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
485         uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
486         uint64_t vpn, tlb_vppn;
487         uint8_t tlb_ps, compare_shift;
488         uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
489 
490         if (!tlb_e) {
491             continue;
492         }
493         if (i >= LOONGARCH_STLB) {
494             tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
495         } else {
496             tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
497         }
498         tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
499         vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
500         compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
501 
502         if (!tlb_g && (tlb_asid == asid) &&
503            (vpn == (tlb_vppn >> compare_shift))) {
504             tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
505         }
506     }
507     tlb_flush(env_cpu(env));
508 }
509 
510 void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
511                                   target_ulong info, target_ulong addr)
512 {
513     uint16_t asid = info & 0x3ff;
514 
515     for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
516         LoongArchTLB *tlb = &env->tlb[i];
517         uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
518         uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
519         uint64_t vpn, tlb_vppn;
520         uint8_t tlb_ps, compare_shift;
521         uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
522 
523         if (!tlb_e) {
524             continue;
525         }
526         if (i >= LOONGARCH_STLB) {
527             tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
528         } else {
529             tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
530         }
531         tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
532         vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
533         compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
534 
535         if ((tlb_g || (tlb_asid == asid)) &&
536             (vpn == (tlb_vppn >> compare_shift))) {
537             tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
538         }
539     }
540     tlb_flush(env_cpu(env));
541 }
542 
543 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
544                             MMUAccessType access_type, int mmu_idx,
545                             bool probe, uintptr_t retaddr)
546 {
547     CPULoongArchState *env = cpu_env(cs);
548     hwaddr physical;
549     int prot;
550     int ret;
551 
552     /* Data access */
553     ret = get_physical_address(env, &physical, &prot, address,
554                                access_type, mmu_idx, 0);
555 
556     if (ret == TLBRET_MATCH) {
557         tlb_set_page(cs, address & TARGET_PAGE_MASK,
558                      physical & TARGET_PAGE_MASK, prot,
559                      mmu_idx, TARGET_PAGE_SIZE);
560         qemu_log_mask(CPU_LOG_MMU,
561                       "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
562                       " prot %d\n", __func__, address, physical, prot);
563         return true;
564     } else {
565         qemu_log_mask(CPU_LOG_MMU,
566                       "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
567                       ret);
568     }
569     if (probe) {
570         return false;
571     }
572     raise_mmu_exception(env, address, access_type, ret);
573     cpu_loop_exit_restore(cs, retaddr);
574 }
575 
576 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
577                           target_ulong level, uint32_t mem_idx)
578 {
579     CPUState *cs = env_cpu(env);
580     target_ulong badvaddr, index, phys;
581     uint64_t dir_base, dir_width;
582 
583     if (unlikely((level == 0) || (level > 4))) {
584         qemu_log_mask(LOG_GUEST_ERROR,
585                       "Attepted LDDIR with level %"PRId64"\n", level);
586         return base;
587     }
588 
589     if (FIELD_EX64(base, TLBENTRY, HUGE)) {
590         if (unlikely(level == 4)) {
591             qemu_log_mask(LOG_GUEST_ERROR,
592                           "Attempted use of level 4 huge page\n");
593             return base;
594         }
595 
596         if (FIELD_EX64(base, TLBENTRY, LEVEL)) {
597             return base;
598         } else {
599             return FIELD_DP64(base, TLBENTRY, LEVEL, level);
600         }
601     }
602 
603     badvaddr = env->CSR_TLBRBADV;
604     base = base & TARGET_PHYS_MASK;
605     get_dir_base_width(env, &dir_base, &dir_width, level);
606     index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
607     phys = base | index << 3;
608     return ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
609 }
610 
611 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
612                   uint32_t mem_idx)
613 {
614     CPUState *cs = env_cpu(env);
615     target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv;
616     uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
617     uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
618     uint64_t dir_base, dir_width;
619 
620     /*
621      * The parameter "base" has only two types,
622      * one is the page table base address,
623      * whose bit 6 should be 0,
624      * and the other is the huge page entry,
625      * whose bit 6 should be 1.
626      */
627     base = base & TARGET_PHYS_MASK;
628     if (FIELD_EX64(base, TLBENTRY, HUGE)) {
629         /*
630          * Gets the huge page level and Gets huge page size.
631          * Clears the huge page level information in the entry.
632          * Clears huge page bit.
633          * Move HGLOBAL bit to GLOBAL bit.
634          */
635         get_dir_base_width(env, &dir_base, &dir_width,
636                            FIELD_EX64(base, TLBENTRY, LEVEL));
637 
638         base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
639         base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
640         if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
641             base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
642             base = FIELD_DP64(base, TLBENTRY, G, 1);
643         }
644 
645         ps = dir_base + dir_width - 1;
646         /*
647          * Huge pages are evenly split into parity pages
648          * when loaded into the tlb,
649          * so the tlb page size needs to be divided by 2.
650          */
651         tmp0 = base;
652         if (odd) {
653             tmp0 += MAKE_64BIT_MASK(ps, 1);
654         }
655     } else {
656         badv = env->CSR_TLBRBADV;
657 
658         ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
659         ptindex = ptindex & ~0x1;   /* clear bit 0 */
660         ptoffset0 = ptindex << 3;
661         ptoffset1 = (ptindex + 1) << 3;
662         phys = base | (odd ? ptoffset1 : ptoffset0);
663         tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
664         ps = ptbase;
665     }
666 
667     if (odd) {
668         env->CSR_TLBRELO1 = tmp0;
669     } else {
670         env->CSR_TLBRELO0 = tmp0;
671     }
672     env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps);
673 }
674 
675 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
676                                    int *prot, target_ulong address,
677                                    int access_type, int index, int mmu_idx)
678 {
679     LoongArchTLB *tlb = &env->tlb[index];
680     uint64_t plv = mmu_idx;
681     uint64_t tlb_entry, tlb_ppn;
682     uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
683 
684     if (index >= LOONGARCH_STLB) {
685         tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
686     } else {
687         tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
688     }
689     n = (address >> tlb_ps) & 0x1;/* Odd or even */
690 
691     tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
692     tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
693     tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
694     tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
695     if (is_la64(env)) {
696         tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
697         tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX);
698         tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR);
699         tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV);
700     } else {
701         tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN);
702         tlb_nx = 0;
703         tlb_nr = 0;
704         tlb_rplv = 0;
705     }
706 
707     /* Remove sw bit between bit12 -- bit PS*/
708     tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) - 1));
709 
710     /* Check access rights */
711     if (!tlb_v) {
712         return TLBRET_INVALID;
713     }
714 
715     if (access_type == MMU_INST_FETCH && tlb_nx) {
716         return TLBRET_XI;
717     }
718 
719     if (access_type == MMU_DATA_LOAD && tlb_nr) {
720         return TLBRET_RI;
721     }
722 
723     if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
724         ((tlb_rplv == 1) && (plv != tlb_plv))) {
725         return TLBRET_PE;
726     }
727 
728     if ((access_type == MMU_DATA_STORE) && !tlb_d) {
729         return TLBRET_DIRTY;
730     }
731 
732     *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
733                 (address & MAKE_64BIT_MASK(0, tlb_ps));
734     *prot = PAGE_READ;
735     if (tlb_d) {
736         *prot |= PAGE_WRITE;
737     }
738     if (!tlb_nx) {
739         *prot |= PAGE_EXEC;
740     }
741     return TLBRET_MATCH;
742 }
743 
744 int loongarch_get_addr_from_tlb(CPULoongArchState *env, hwaddr *physical,
745                                 int *prot, target_ulong address,
746                                 MMUAccessType access_type, int mmu_idx)
747 {
748     int index, match;
749 
750     match = loongarch_tlb_search(env, address, &index);
751     if (match) {
752         return loongarch_map_tlb_entry(env, physical, prot,
753                                        address, access_type, index, mmu_idx);
754     }
755 
756     return TLBRET_NOMATCH;
757 }
758