1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch TLB helpers
4 *
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
6 *
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/guest-random.h"
11
12 #include "cpu.h"
13 #include "internals.h"
14 #include "exec/helper-proto.h"
15 #include "exec/cputlb.h"
16 #include "exec/page-protection.h"
17 #include "exec/target_page.h"
18 #include "accel/tcg/cpu-ldst.h"
19 #include "exec/log.h"
20 #include "cpu-csr.h"
21 #include "tcg/tcg_loongarch.h"
22
check_ps(CPULoongArchState * env,uint8_t tlb_ps)23 bool check_ps(CPULoongArchState *env, uint8_t tlb_ps)
24 {
25 if (tlb_ps >= 64) {
26 return false;
27 }
28 return BIT_ULL(tlb_ps) & (env->CSR_PRCFG2);
29 }
30
raise_mmu_exception(CPULoongArchState * env,target_ulong address,MMUAccessType access_type,int tlb_error)31 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address,
32 MMUAccessType access_type, int tlb_error)
33 {
34 CPUState *cs = env_cpu(env);
35
36 switch (tlb_error) {
37 default:
38 case TLBRET_BADADDR:
39 cs->exception_index = access_type == MMU_INST_FETCH
40 ? EXCCODE_ADEF : EXCCODE_ADEM;
41 break;
42 case TLBRET_NOMATCH:
43 /* No TLB match for a mapped address */
44 if (access_type == MMU_DATA_LOAD) {
45 cs->exception_index = EXCCODE_PIL;
46 } else if (access_type == MMU_DATA_STORE) {
47 cs->exception_index = EXCCODE_PIS;
48 } else if (access_type == MMU_INST_FETCH) {
49 cs->exception_index = EXCCODE_PIF;
50 }
51 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1);
52 break;
53 case TLBRET_INVALID:
54 /* TLB match with no valid bit */
55 if (access_type == MMU_DATA_LOAD) {
56 cs->exception_index = EXCCODE_PIL;
57 } else if (access_type == MMU_DATA_STORE) {
58 cs->exception_index = EXCCODE_PIS;
59 } else if (access_type == MMU_INST_FETCH) {
60 cs->exception_index = EXCCODE_PIF;
61 }
62 break;
63 case TLBRET_DIRTY:
64 /* TLB match but 'D' bit is cleared */
65 cs->exception_index = EXCCODE_PME;
66 break;
67 case TLBRET_XI:
68 /* Execute-Inhibit Exception */
69 cs->exception_index = EXCCODE_PNX;
70 break;
71 case TLBRET_RI:
72 /* Read-Inhibit Exception */
73 cs->exception_index = EXCCODE_PNR;
74 break;
75 case TLBRET_PE:
76 /* Privileged Exception */
77 cs->exception_index = EXCCODE_PPI;
78 break;
79 }
80
81 if (tlb_error == TLBRET_NOMATCH) {
82 env->CSR_TLBRBADV = address;
83 if (is_la64(env)) {
84 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_64,
85 VPPN, extract64(address, 13, 35));
86 } else {
87 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_32,
88 VPPN, extract64(address, 13, 19));
89 }
90 } else {
91 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
92 env->CSR_BADV = address;
93 }
94 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1);
95 }
96 }
97
invalidate_tlb_entry(CPULoongArchState * env,int index)98 static void invalidate_tlb_entry(CPULoongArchState *env, int index)
99 {
100 target_ulong addr, mask, pagesize;
101 uint8_t tlb_ps;
102 LoongArchTLB *tlb = &env->tlb[index];
103
104 int mmu_idx = cpu_mmu_index(env_cpu(env), false);
105 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V);
106 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V);
107 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
108 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
109
110 if (!tlb_e) {
111 return;
112 }
113 if (index >= LOONGARCH_STLB) {
114 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
115 } else {
116 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
117 }
118 pagesize = MAKE_64BIT_MASK(tlb_ps, 1);
119 mask = MAKE_64BIT_MASK(0, tlb_ps + 1);
120
121 if (tlb_v0) {
122 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */
123 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
124 mmu_idx, TARGET_LONG_BITS);
125 }
126
127 if (tlb_v1) {
128 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */
129 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
130 mmu_idx, TARGET_LONG_BITS);
131 }
132 }
133
invalidate_tlb(CPULoongArchState * env,int index)134 static void invalidate_tlb(CPULoongArchState *env, int index)
135 {
136 LoongArchTLB *tlb;
137 uint16_t csr_asid, tlb_asid, tlb_g;
138
139 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
140 tlb = &env->tlb[index];
141 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
142 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
143 if (tlb_g == 0 && tlb_asid != csr_asid) {
144 return;
145 }
146 invalidate_tlb_entry(env, index);
147 }
148
fill_tlb_entry(CPULoongArchState * env,int index)149 static void fill_tlb_entry(CPULoongArchState *env, int index)
150 {
151 LoongArchTLB *tlb = &env->tlb[index];
152 uint64_t lo0, lo1, csr_vppn;
153 uint16_t csr_asid;
154 uint8_t csr_ps;
155
156 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
157 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
158 if (is_la64(env)) {
159 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_64, VPPN);
160 } else {
161 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_32, VPPN);
162 }
163 lo0 = env->CSR_TLBRELO0;
164 lo1 = env->CSR_TLBRELO1;
165 } else {
166 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
167 if (is_la64(env)) {
168 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_64, VPPN);
169 } else {
170 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_32, VPPN);
171 }
172 lo0 = env->CSR_TLBELO0;
173 lo1 = env->CSR_TLBELO1;
174 }
175
176 /*check csr_ps */
177 if (!check_ps(env, csr_ps)) {
178 qemu_log_mask(LOG_GUEST_ERROR, "csr_ps %d is illegal\n", csr_ps);
179 return;
180 }
181
182 /* Only MTLB has the ps fields */
183 if (index >= LOONGARCH_STLB) {
184 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
185 }
186
187 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn);
188 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1);
189 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
190 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid);
191
192 tlb->tlb_entry0 = lo0;
193 tlb->tlb_entry1 = lo1;
194 }
195
196 /* Return an random value between low and high */
get_random_tlb(uint32_t low,uint32_t high)197 static uint32_t get_random_tlb(uint32_t low, uint32_t high)
198 {
199 uint32_t val;
200
201 qemu_guest_getrandom_nofail(&val, sizeof(val));
202 return val % (high - low + 1) + low;
203 }
204
205 /*
206 * One tlb entry holds an adjacent odd/even pair, the vpn is the
207 * content of the virtual page number divided by 2. So the
208 * compare vpn is bit[47:15] for 16KiB page. while the vppn
209 * field in tlb entry contains bit[47:13], so need adjust.
210 * virt_vpn = vaddr[47:13]
211 */
loongarch_tlb_search(CPULoongArchState * env,target_ulong vaddr,int * index)212 static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
213 int *index)
214 {
215 LoongArchTLB *tlb;
216 uint16_t csr_asid, tlb_asid, stlb_idx;
217 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
218 int i, compare_shift;
219 uint64_t vpn, tlb_vppn;
220
221 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
222 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
223 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
224 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
225 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
226
227 /* Search STLB */
228 for (i = 0; i < 8; ++i) {
229 tlb = &env->tlb[i * 256 + stlb_idx];
230 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
231 if (tlb_e) {
232 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
233 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
234 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
235
236 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
237 (vpn == (tlb_vppn >> compare_shift))) {
238 *index = i * 256 + stlb_idx;
239 return true;
240 }
241 }
242 }
243
244 /* Search MTLB */
245 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
246 tlb = &env->tlb[i];
247 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
248 if (tlb_e) {
249 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
250 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
251 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
252 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
253 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
254 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
255 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
256 (vpn == (tlb_vppn >> compare_shift))) {
257 *index = i;
258 return true;
259 }
260 }
261 }
262 return false;
263 }
264
helper_tlbsrch(CPULoongArchState * env)265 void helper_tlbsrch(CPULoongArchState *env)
266 {
267 int index, match;
268
269 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
270 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index);
271 } else {
272 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index);
273 }
274
275 if (match) {
276 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index);
277 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
278 return;
279 }
280
281 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
282 }
283
helper_tlbrd(CPULoongArchState * env)284 void helper_tlbrd(CPULoongArchState *env)
285 {
286 LoongArchTLB *tlb;
287 int index;
288 uint8_t tlb_ps, tlb_e;
289
290 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
291 tlb = &env->tlb[index];
292
293 if (index >= LOONGARCH_STLB) {
294 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
295 } else {
296 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
297 }
298 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
299
300 if (!tlb_e) {
301 /* Invalid TLB entry */
302 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
303 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0);
304 env->CSR_TLBEHI = 0;
305 env->CSR_TLBELO0 = 0;
306 env->CSR_TLBELO1 = 0;
307 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0);
308 } else {
309 /* Valid TLB entry */
310 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
311 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX,
312 PS, (tlb_ps & 0x3f));
313 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) <<
314 R_TLB_MISC_VPPN_SHIFT;
315 env->CSR_TLBELO0 = tlb->tlb_entry0;
316 env->CSR_TLBELO1 = tlb->tlb_entry1;
317 }
318 }
319
helper_tlbwr(CPULoongArchState * env)320 void helper_tlbwr(CPULoongArchState *env)
321 {
322 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
323
324 invalidate_tlb(env, index);
325
326 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
327 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc,
328 TLB_MISC, E, 0);
329 return;
330 }
331
332 fill_tlb_entry(env, index);
333 }
334
helper_tlbfill(CPULoongArchState * env)335 void helper_tlbfill(CPULoongArchState *env)
336 {
337 uint64_t address, entryhi;
338 int index, set, stlb_idx;
339 uint16_t pagesize, stlb_ps;
340
341 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
342 entryhi = env->CSR_TLBREHI;
343 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
344 } else {
345 entryhi = env->CSR_TLBEHI;
346 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
347 }
348
349 if (!check_ps(env, pagesize)) {
350 qemu_log_mask(LOG_GUEST_ERROR, "pagesize %d is illegal\n", pagesize);
351 return;
352 }
353
354 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
355 if (!check_ps(env, stlb_ps)) {
356 qemu_log_mask(LOG_GUEST_ERROR, "stlb_ps %d is illegal\n", stlb_ps);
357 return;
358 }
359
360 if (pagesize == stlb_ps) {
361 /* Only write into STLB bits [47:13] */
362 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT);
363
364 /* Choose one set ramdomly */
365 set = get_random_tlb(0, 7);
366
367 /* Index in one set */
368 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */
369
370 index = set * 256 + stlb_idx;
371 } else {
372 /* Only write into MTLB */
373 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1);
374 }
375
376 invalidate_tlb(env, index);
377 fill_tlb_entry(env, index);
378 }
379
helper_tlbclr(CPULoongArchState * env)380 void helper_tlbclr(CPULoongArchState *env)
381 {
382 LoongArchTLB *tlb;
383 int i, index;
384 uint16_t csr_asid, tlb_asid, tlb_g;
385
386 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
387 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
388
389 if (index < LOONGARCH_STLB) {
390 /* STLB. One line per operation */
391 for (i = 0; i < 8; i++) {
392 tlb = &env->tlb[i * 256 + (index % 256)];
393 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
394 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
395 if (!tlb_g && tlb_asid == csr_asid) {
396 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
397 }
398 }
399 } else if (index < LOONGARCH_TLB_MAX) {
400 /* All MTLB entries */
401 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
402 tlb = &env->tlb[i];
403 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
404 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
405 if (!tlb_g && tlb_asid == csr_asid) {
406 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
407 }
408 }
409 }
410
411 tlb_flush(env_cpu(env));
412 }
413
helper_tlbflush(CPULoongArchState * env)414 void helper_tlbflush(CPULoongArchState *env)
415 {
416 int i, index;
417
418 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
419
420 if (index < LOONGARCH_STLB) {
421 /* STLB. One line per operation */
422 for (i = 0; i < 8; i++) {
423 int s_idx = i * 256 + (index % 256);
424 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc,
425 TLB_MISC, E, 0);
426 }
427 } else if (index < LOONGARCH_TLB_MAX) {
428 /* All MTLB entries */
429 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
430 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
431 TLB_MISC, E, 0);
432 }
433 }
434
435 tlb_flush(env_cpu(env));
436 }
437
helper_invtlb_all(CPULoongArchState * env)438 void helper_invtlb_all(CPULoongArchState *env)
439 {
440 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
441 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
442 TLB_MISC, E, 0);
443 }
444 tlb_flush(env_cpu(env));
445 }
446
helper_invtlb_all_g(CPULoongArchState * env,uint32_t g)447 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g)
448 {
449 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
450 LoongArchTLB *tlb = &env->tlb[i];
451 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
452
453 if (tlb_g == g) {
454 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
455 }
456 }
457 tlb_flush(env_cpu(env));
458 }
459
helper_invtlb_all_asid(CPULoongArchState * env,target_ulong info)460 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info)
461 {
462 uint16_t asid = info & R_CSR_ASID_ASID_MASK;
463
464 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
465 LoongArchTLB *tlb = &env->tlb[i];
466 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
467 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
468
469 if (!tlb_g && (tlb_asid == asid)) {
470 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
471 }
472 }
473 tlb_flush(env_cpu(env));
474 }
475
helper_invtlb_page_asid(CPULoongArchState * env,target_ulong info,target_ulong addr)476 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info,
477 target_ulong addr)
478 {
479 uint16_t asid = info & 0x3ff;
480
481 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
482 LoongArchTLB *tlb = &env->tlb[i];
483 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
484 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
485 uint64_t vpn, tlb_vppn;
486 uint8_t tlb_ps, compare_shift;
487 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
488
489 if (!tlb_e) {
490 continue;
491 }
492 if (i >= LOONGARCH_STLB) {
493 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
494 } else {
495 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
496 }
497 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
498 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
499 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
500
501 if (!tlb_g && (tlb_asid == asid) &&
502 (vpn == (tlb_vppn >> compare_shift))) {
503 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
504 }
505 }
506 tlb_flush(env_cpu(env));
507 }
508
helper_invtlb_page_asid_or_g(CPULoongArchState * env,target_ulong info,target_ulong addr)509 void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
510 target_ulong info, target_ulong addr)
511 {
512 uint16_t asid = info & 0x3ff;
513
514 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
515 LoongArchTLB *tlb = &env->tlb[i];
516 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
517 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
518 uint64_t vpn, tlb_vppn;
519 uint8_t tlb_ps, compare_shift;
520 uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
521
522 if (!tlb_e) {
523 continue;
524 }
525 if (i >= LOONGARCH_STLB) {
526 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
527 } else {
528 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
529 }
530 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
531 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
532 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
533
534 if ((tlb_g || (tlb_asid == asid)) &&
535 (vpn == (tlb_vppn >> compare_shift))) {
536 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
537 }
538 }
539 tlb_flush(env_cpu(env));
540 }
541
loongarch_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)542 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
543 MMUAccessType access_type, int mmu_idx,
544 bool probe, uintptr_t retaddr)
545 {
546 CPULoongArchState *env = cpu_env(cs);
547 hwaddr physical;
548 int prot;
549 int ret;
550
551 /* Data access */
552 ret = get_physical_address(env, &physical, &prot, address,
553 access_type, mmu_idx, 0);
554
555 if (ret == TLBRET_MATCH) {
556 tlb_set_page(cs, address & TARGET_PAGE_MASK,
557 physical & TARGET_PAGE_MASK, prot,
558 mmu_idx, TARGET_PAGE_SIZE);
559 qemu_log_mask(CPU_LOG_MMU,
560 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
561 " prot %d\n", __func__, address, physical, prot);
562 return true;
563 } else {
564 qemu_log_mask(CPU_LOG_MMU,
565 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
566 ret);
567 }
568 if (probe) {
569 return false;
570 }
571 raise_mmu_exception(env, address, access_type, ret);
572 cpu_loop_exit_restore(cs, retaddr);
573 }
574
helper_lddir(CPULoongArchState * env,target_ulong base,target_ulong level,uint32_t mem_idx)575 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
576 target_ulong level, uint32_t mem_idx)
577 {
578 CPUState *cs = env_cpu(env);
579 target_ulong badvaddr, index, phys;
580 uint64_t dir_base, dir_width;
581
582 if (unlikely((level == 0) || (level > 4))) {
583 qemu_log_mask(LOG_GUEST_ERROR,
584 "Attepted LDDIR with level %"PRId64"\n", level);
585 return base;
586 }
587
588 if (FIELD_EX64(base, TLBENTRY, HUGE)) {
589 if (unlikely(level == 4)) {
590 qemu_log_mask(LOG_GUEST_ERROR,
591 "Attempted use of level 4 huge page\n");
592 return base;
593 }
594
595 if (FIELD_EX64(base, TLBENTRY, LEVEL)) {
596 return base;
597 } else {
598 return FIELD_DP64(base, TLBENTRY, LEVEL, level);
599 }
600 }
601
602 badvaddr = env->CSR_TLBRBADV;
603 base = base & TARGET_PHYS_MASK;
604 get_dir_base_width(env, &dir_base, &dir_width, level);
605 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
606 phys = base | index << 3;
607 return ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
608 }
609
helper_ldpte(CPULoongArchState * env,target_ulong base,target_ulong odd,uint32_t mem_idx)610 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
611 uint32_t mem_idx)
612 {
613 CPUState *cs = env_cpu(env);
614 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv;
615 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
616 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
617 uint64_t dir_base, dir_width;
618
619 /*
620 * The parameter "base" has only two types,
621 * one is the page table base address,
622 * whose bit 6 should be 0,
623 * and the other is the huge page entry,
624 * whose bit 6 should be 1.
625 */
626 base = base & TARGET_PHYS_MASK;
627 if (FIELD_EX64(base, TLBENTRY, HUGE)) {
628 /*
629 * Gets the huge page level and Gets huge page size.
630 * Clears the huge page level information in the entry.
631 * Clears huge page bit.
632 * Move HGLOBAL bit to GLOBAL bit.
633 */
634 get_dir_base_width(env, &dir_base, &dir_width,
635 FIELD_EX64(base, TLBENTRY, LEVEL));
636
637 base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
638 base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
639 if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
640 base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
641 base = FIELD_DP64(base, TLBENTRY, G, 1);
642 }
643
644 ps = dir_base + dir_width - 1;
645 /*
646 * Huge pages are evenly split into parity pages
647 * when loaded into the tlb,
648 * so the tlb page size needs to be divided by 2.
649 */
650 tmp0 = base;
651 if (odd) {
652 tmp0 += MAKE_64BIT_MASK(ps, 1);
653 }
654 } else {
655 badv = env->CSR_TLBRBADV;
656
657 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
658 ptindex = ptindex & ~0x1; /* clear bit 0 */
659 ptoffset0 = ptindex << 3;
660 ptoffset1 = (ptindex + 1) << 3;
661 phys = base | (odd ? ptoffset1 : ptoffset0);
662 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
663 ps = ptbase;
664 }
665
666 if (odd) {
667 env->CSR_TLBRELO1 = tmp0;
668 } else {
669 env->CSR_TLBRELO0 = tmp0;
670 }
671 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps);
672 }
673
loongarch_map_tlb_entry(CPULoongArchState * env,hwaddr * physical,int * prot,target_ulong address,int access_type,int index,int mmu_idx)674 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
675 int *prot, target_ulong address,
676 int access_type, int index, int mmu_idx)
677 {
678 LoongArchTLB *tlb = &env->tlb[index];
679 uint64_t plv = mmu_idx;
680 uint64_t tlb_entry, tlb_ppn;
681 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
682
683 if (index >= LOONGARCH_STLB) {
684 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
685 } else {
686 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
687 }
688 n = (address >> tlb_ps) & 0x1;/* Odd or even */
689
690 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
691 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
692 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
693 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
694 if (is_la64(env)) {
695 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
696 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX);
697 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR);
698 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV);
699 } else {
700 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN);
701 tlb_nx = 0;
702 tlb_nr = 0;
703 tlb_rplv = 0;
704 }
705
706 /* Remove sw bit between bit12 -- bit PS*/
707 tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) - 1));
708
709 /* Check access rights */
710 if (!tlb_v) {
711 return TLBRET_INVALID;
712 }
713
714 if (access_type == MMU_INST_FETCH && tlb_nx) {
715 return TLBRET_XI;
716 }
717
718 if (access_type == MMU_DATA_LOAD && tlb_nr) {
719 return TLBRET_RI;
720 }
721
722 if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
723 ((tlb_rplv == 1) && (plv != tlb_plv))) {
724 return TLBRET_PE;
725 }
726
727 if ((access_type == MMU_DATA_STORE) && !tlb_d) {
728 return TLBRET_DIRTY;
729 }
730
731 *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
732 (address & MAKE_64BIT_MASK(0, tlb_ps));
733 *prot = PAGE_READ;
734 if (tlb_d) {
735 *prot |= PAGE_WRITE;
736 }
737 if (!tlb_nx) {
738 *prot |= PAGE_EXEC;
739 }
740 return TLBRET_MATCH;
741 }
742
loongarch_get_addr_from_tlb(CPULoongArchState * env,hwaddr * physical,int * prot,target_ulong address,MMUAccessType access_type,int mmu_idx)743 int loongarch_get_addr_from_tlb(CPULoongArchState *env, hwaddr *physical,
744 int *prot, target_ulong address,
745 MMUAccessType access_type, int mmu_idx)
746 {
747 int index, match;
748
749 match = loongarch_tlb_search(env, address, &index);
750 if (match) {
751 return loongarch_map_tlb_entry(env, physical, prot,
752 address, access_type, index, mmu_idx);
753 }
754
755 return TLBRET_NOMATCH;
756 }
757