1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "system/kvm.h"
24 #include "kvm_ppc.h"
25 #include "mmu-hash64.h"
26 #include "mmu-hash32.h"
27 #include "exec/cputlb.h"
28 #include "exec/page-protection.h"
29 #include "exec/target_page.h"
30 #include "exec/log.h"
31 #include "helper_regs.h"
32 #include "qemu/error-report.h"
33 #include "qemu/qemu-print.h"
34 #include "internal.h"
35 #include "mmu-book3s-v3.h"
36 #include "mmu-radix64.h"
37 #include "mmu-booke.h"
38 #include "exec/helper-proto.h"
39 #include "accel/tcg/cpu-ldst.h"
40
41 /* #define FLUSH_ALL_TLBS */
42
43 /*****************************************************************************/
44 /* PowerPC MMU emulation */
45
46 /* Software driven TLB helpers */
ppc6xx_tlb_invalidate_all(CPUPPCState * env)47 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
48 {
49 ppc6xx_tlb_t *tlb;
50 int nr, max = 2 * env->nb_tlb;
51
52 for (nr = 0; nr < max; nr++) {
53 tlb = &env->tlb.tlb6[nr];
54 pte_invalidate(&tlb->pte0);
55 }
56 tlb_flush(env_cpu(env));
57 }
58
ppc6xx_tlb_invalidate_virt2(CPUPPCState * env,target_ulong eaddr,int is_code,int match_epn)59 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
60 target_ulong eaddr,
61 int is_code, int match_epn)
62 {
63 #if !defined(FLUSH_ALL_TLBS)
64 CPUState *cs = env_cpu(env);
65 ppc6xx_tlb_t *tlb;
66 int way, nr;
67
68 /* Invalidate ITLB + DTLB, all ways */
69 for (way = 0; way < env->nb_ways; way++) {
70 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
71 tlb = &env->tlb.tlb6[nr];
72 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
73 qemu_log_mask(CPU_LOG_MMU, "TLB invalidate %d/%d "
74 TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr);
75 pte_invalidate(&tlb->pte0);
76 tlb_flush_page(cs, tlb->EPN);
77 }
78 }
79 #else
80 /* XXX: PowerPC specification say this is valid as well */
81 ppc6xx_tlb_invalidate_all(env);
82 #endif
83 }
84
ppc6xx_tlb_invalidate_virt(CPUPPCState * env,target_ulong eaddr,int is_code)85 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
86 target_ulong eaddr, int is_code)
87 {
88 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
89 }
90
ppc6xx_tlb_store(CPUPPCState * env,target_ulong EPN,int way,int is_code,target_ulong pte0,target_ulong pte1)91 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
92 int is_code, target_ulong pte0, target_ulong pte1)
93 {
94 ppc6xx_tlb_t *tlb;
95 int nr;
96
97 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
98 tlb = &env->tlb.tlb6[nr];
99 qemu_log_mask(CPU_LOG_MMU, "Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 "
100 TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb,
101 EPN, pte0, pte1);
102 /* Invalidate any pending reference in QEMU for this virtual address */
103 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
104 tlb->pte0 = pte0;
105 tlb->pte1 = pte1;
106 tlb->EPN = EPN;
107 /* Store last way for LRU mechanism */
108 env->last_way = way;
109 }
110
111 /* Helpers specific to PowerPC 40x implementations */
ppc4xx_tlb_invalidate_all(CPUPPCState * env)112 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
113 {
114 ppcemb_tlb_t *tlb;
115 int i;
116
117 for (i = 0; i < env->nb_tlb; i++) {
118 tlb = &env->tlb.tlbe[i];
119 tlb->prot &= ~PAGE_VALID;
120 }
121 tlb_flush(env_cpu(env));
122 }
123
booke206_flush_tlb(CPUPPCState * env,int flags,const int check_iprot)124 static void booke206_flush_tlb(CPUPPCState *env, int flags,
125 const int check_iprot)
126 {
127 int tlb_size;
128 int i, j;
129 ppcmas_tlb_t *tlb = env->tlb.tlbm;
130
131 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
132 if (flags & (1 << i)) {
133 tlb_size = booke206_tlb_size(env, i);
134 for (j = 0; j < tlb_size; j++) {
135 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
136 tlb[j].mas1 &= ~MAS1_VALID;
137 }
138 }
139 }
140 tlb += booke206_tlb_size(env, i);
141 }
142
143 tlb_flush(env_cpu(env));
144 }
145
146 /*****************************************************************************/
147 /* BATs management */
148 #if !defined(FLUSH_ALL_TLBS)
do_invalidate_BAT(CPUPPCState * env,target_ulong BATu,target_ulong mask)149 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
150 target_ulong mask)
151 {
152 CPUState *cs = env_cpu(env);
153 target_ulong base, end, page;
154
155 base = BATu & ~0x0001FFFF;
156 end = base + mask + 0x00020000;
157 if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
158 /* Flushing 1024 4K pages is slower than a complete flush */
159 qemu_log_mask(CPU_LOG_MMU, "Flush all BATs\n");
160 tlb_flush(cs);
161 qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
162 return;
163 }
164 qemu_log_mask(CPU_LOG_MMU, "Flush BAT from " TARGET_FMT_lx
165 " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
166 base, end, mask);
167 for (page = base; page != end; page += TARGET_PAGE_SIZE) {
168 tlb_flush_page(cs, page);
169 }
170 qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
171 }
172 #endif
173
dump_store_bat(CPUPPCState * env,char ID,int ul,int nr,target_ulong value)174 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
175 target_ulong value)
176 {
177 qemu_log_mask(CPU_LOG_MMU, "Set %cBAT%d%c to " TARGET_FMT_lx " ("
178 TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l',
179 value, env->nip);
180 }
181
helper_store_ibatu(CPUPPCState * env,uint32_t nr,target_ulong value)182 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
183 {
184 target_ulong mask;
185
186 dump_store_bat(env, 'I', 0, nr, value);
187 if (env->IBAT[0][nr] != value) {
188 mask = (value << 15) & 0x0FFE0000UL;
189 #if !defined(FLUSH_ALL_TLBS)
190 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
191 #endif
192 /*
193 * When storing valid upper BAT, mask BEPI and BRPN and
194 * invalidate all TLBs covered by this BAT
195 */
196 mask = (value << 15) & 0x0FFE0000UL;
197 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
198 (value & ~0x0001FFFFUL & ~mask);
199 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
200 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
201 #if !defined(FLUSH_ALL_TLBS)
202 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
203 #else
204 tlb_flush(env_cpu(env));
205 #endif
206 }
207 }
208
helper_store_ibatl(CPUPPCState * env,uint32_t nr,target_ulong value)209 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
210 {
211 dump_store_bat(env, 'I', 1, nr, value);
212 env->IBAT[1][nr] = value;
213 }
214
helper_store_dbatu(CPUPPCState * env,uint32_t nr,target_ulong value)215 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
216 {
217 target_ulong mask;
218
219 dump_store_bat(env, 'D', 0, nr, value);
220 if (env->DBAT[0][nr] != value) {
221 /*
222 * When storing valid upper BAT, mask BEPI and BRPN and
223 * invalidate all TLBs covered by this BAT
224 */
225 mask = (value << 15) & 0x0FFE0000UL;
226 #if !defined(FLUSH_ALL_TLBS)
227 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
228 #endif
229 mask = (value << 15) & 0x0FFE0000UL;
230 env->DBAT[0][nr] = (value & 0x00001FFFUL) |
231 (value & ~0x0001FFFFUL & ~mask);
232 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
233 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
234 #if !defined(FLUSH_ALL_TLBS)
235 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
236 #else
237 tlb_flush(env_cpu(env));
238 #endif
239 }
240 }
241
helper_store_dbatl(CPUPPCState * env,uint32_t nr,target_ulong value)242 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
243 {
244 dump_store_bat(env, 'D', 1, nr, value);
245 env->DBAT[1][nr] = value;
246 }
247
248 /*****************************************************************************/
249 /* TLB management */
ppc_tlb_invalidate_all(CPUPPCState * env)250 void ppc_tlb_invalidate_all(CPUPPCState *env)
251 {
252 #if defined(TARGET_PPC64)
253 if (mmu_is_64bit(env->mmu_model)) {
254 env->tlb_need_flush = 0;
255 tlb_flush(env_cpu(env));
256 } else
257 #endif /* defined(TARGET_PPC64) */
258 switch (env->mmu_model) {
259 case POWERPC_MMU_SOFT_6xx:
260 ppc6xx_tlb_invalidate_all(env);
261 break;
262 case POWERPC_MMU_SOFT_4xx:
263 ppc4xx_tlb_invalidate_all(env);
264 break;
265 case POWERPC_MMU_REAL:
266 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n");
267 break;
268 case POWERPC_MMU_MPC8xx:
269 /* XXX: TODO */
270 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
271 break;
272 case POWERPC_MMU_BOOKE:
273 tlb_flush(env_cpu(env));
274 break;
275 case POWERPC_MMU_BOOKE206:
276 booke206_flush_tlb(env, -1, 0);
277 break;
278 case POWERPC_MMU_32B:
279 env->tlb_need_flush = 0;
280 tlb_flush(env_cpu(env));
281 break;
282 default:
283 /* XXX: TODO */
284 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model);
285 break;
286 }
287 }
288
ppc_tlb_invalidate_one(CPUPPCState * env,target_ulong addr)289 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
290 {
291 #if !defined(FLUSH_ALL_TLBS)
292 addr &= TARGET_PAGE_MASK;
293 #if defined(TARGET_PPC64)
294 if (mmu_is_64bit(env->mmu_model)) {
295 /* tlbie invalidate TLBs for all segments */
296 /*
297 * XXX: given the fact that there are too many segments to invalidate,
298 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
299 * we just invalidate all TLBs
300 */
301 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
302 } else
303 #endif /* defined(TARGET_PPC64) */
304 switch (env->mmu_model) {
305 case POWERPC_MMU_SOFT_6xx:
306 ppc6xx_tlb_invalidate_virt(env, addr, 0);
307 ppc6xx_tlb_invalidate_virt(env, addr, 1);
308 break;
309 case POWERPC_MMU_32B:
310 /*
311 * Actual CPUs invalidate entire congruence classes based on
312 * the geometry of their TLBs and some OSes take that into
313 * account, we just mark the TLB to be flushed later (context
314 * synchronizing event or sync instruction on 32-bit).
315 */
316 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
317 break;
318 default:
319 /* Should never reach here with other MMU models */
320 g_assert_not_reached();
321 }
322 #else
323 ppc_tlb_invalidate_all(env);
324 #endif
325 }
326
327 /*****************************************************************************/
328 /* Special registers manipulation */
329
330 /* Segment registers load and store */
helper_load_sr(CPUPPCState * env,target_ulong sr_num)331 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
332 {
333 #if defined(TARGET_PPC64)
334 if (mmu_is_64bit(env->mmu_model)) {
335 /* XXX */
336 return 0;
337 }
338 #endif
339 return env->sr[sr_num];
340 }
341
helper_store_sr(CPUPPCState * env,target_ulong srnum,target_ulong value)342 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
343 {
344 qemu_log_mask(CPU_LOG_MMU,
345 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
346 (int)srnum, value, env->sr[srnum]);
347 #if defined(TARGET_PPC64)
348 if (mmu_is_64bit(env->mmu_model)) {
349 PowerPCCPU *cpu = env_archcpu(env);
350 uint64_t esid, vsid;
351
352 /* ESID = srnum */
353 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
354
355 /* VSID = VSID */
356 vsid = (value & 0xfffffff) << 12;
357 /* flags = flags */
358 vsid |= ((value >> 27) & 0xf) << 8;
359
360 ppc_store_slb(cpu, srnum, esid, vsid);
361 } else
362 #endif
363 if (env->sr[srnum] != value) {
364 env->sr[srnum] = value;
365 /*
366 * Invalidating 256MB of virtual memory in 4kB pages is way
367 * longer than flushing the whole TLB.
368 */
369 #if !defined(FLUSH_ALL_TLBS) && 0
370 {
371 target_ulong page, end;
372 /* Invalidate 256 MB of virtual memory */
373 page = (16 << 20) * srnum;
374 end = page + (16 << 20);
375 for (; page != end; page += TARGET_PAGE_SIZE) {
376 tlb_flush_page(env_cpu(env), page);
377 }
378 }
379 #else
380 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
381 #endif
382 }
383 }
384
385 /* TLB management */
helper_tlbia(CPUPPCState * env)386 void helper_tlbia(CPUPPCState *env)
387 {
388 ppc_tlb_invalidate_all(env);
389 }
390
helper_tlbie(CPUPPCState * env,target_ulong addr)391 void helper_tlbie(CPUPPCState *env, target_ulong addr)
392 {
393 ppc_tlb_invalidate_one(env, addr);
394 }
395
396 #if defined(TARGET_PPC64)
397
398 /* Invalidation Selector */
399 #define TLBIE_IS_VA 0
400 #define TLBIE_IS_PID 1
401 #define TLBIE_IS_LPID 2
402 #define TLBIE_IS_ALL 3
403
404 /* Radix Invalidation Control */
405 #define TLBIE_RIC_TLB 0
406 #define TLBIE_RIC_PWC 1
407 #define TLBIE_RIC_ALL 2
408 #define TLBIE_RIC_GRP 3
409
410 /* Radix Actual Page sizes */
411 #define TLBIE_R_AP_4K 0
412 #define TLBIE_R_AP_64K 5
413 #define TLBIE_R_AP_2M 1
414 #define TLBIE_R_AP_1G 2
415
416 /* RB field masks */
417 #define TLBIE_RB_EPN_MASK PPC_BITMASK(0, 51)
418 #define TLBIE_RB_IS_MASK PPC_BITMASK(52, 53)
419 #define TLBIE_RB_AP_MASK PPC_BITMASK(56, 58)
420
helper_tlbie_isa300(CPUPPCState * env,target_ulong rb,target_ulong rs,uint32_t flags)421 void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs,
422 uint32_t flags)
423 {
424 unsigned ric = (flags & TLBIE_F_RIC_MASK) >> TLBIE_F_RIC_SHIFT;
425 /*
426 * With the exception of the checks for invalid instruction forms,
427 * PRS is currently ignored, because we don't know if a given TLB entry
428 * is process or partition scoped.
429 */
430 bool prs = flags & TLBIE_F_PRS;
431 bool r = flags & TLBIE_F_R;
432 bool local = flags & TLBIE_F_LOCAL;
433 bool effR;
434 unsigned is = extract64(rb, PPC_BIT_NR(53), 2);
435 unsigned ap; /* actual page size */
436 target_ulong addr, pgoffs_mask;
437
438 qemu_log_mask(CPU_LOG_MMU,
439 "%s: local=%d addr=" TARGET_FMT_lx " ric=%u prs=%d r=%d is=%u\n",
440 __func__, local, rb & TARGET_PAGE_MASK, ric, prs, r, is);
441
442 effR = FIELD_EX64(env->msr, MSR, HV) ? r : env->spr[SPR_LPCR] & LPCR_HR;
443
444 /* Partial TLB invalidation is supported for Radix only for now. */
445 if (!effR) {
446 goto inval_all;
447 }
448
449 /* Check for invalid instruction forms (effR=1). */
450 if (unlikely(ric == TLBIE_RIC_GRP ||
451 ((ric == TLBIE_RIC_PWC || ric == TLBIE_RIC_ALL) &&
452 is == TLBIE_IS_VA) ||
453 (!prs && is == TLBIE_IS_PID))) {
454 qemu_log_mask(LOG_GUEST_ERROR,
455 "%s: invalid instruction form: ric=%u prs=%d r=%d is=%u\n",
456 __func__, ric, prs, r, is);
457 goto invalid;
458 }
459
460 /* We don't cache Page Walks. */
461 if (ric == TLBIE_RIC_PWC) {
462 if (local) {
463 unsigned set = extract64(rb, PPC_BIT_NR(51), 12);
464 if (set != 0) {
465 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid set: %d\n",
466 __func__, set);
467 goto invalid;
468 }
469 }
470 return;
471 }
472
473 /*
474 * Invalidation by LPID or PID is not supported, so fallback
475 * to full TLB flush in these cases.
476 */
477 if (is != TLBIE_IS_VA) {
478 goto inval_all;
479 }
480
481 /*
482 * The results of an attempt to invalidate a translation outside of
483 * quadrant 0 for Radix Tree translation (effR=1, RIC=0, PRS=1, IS=0,
484 * and EA 0:1 != 0b00) are boundedly undefined.
485 */
486 if (unlikely(ric == TLBIE_RIC_TLB && prs && is == TLBIE_IS_VA &&
487 (rb & R_EADDR_QUADRANT) != R_EADDR_QUADRANT0)) {
488 qemu_log_mask(LOG_GUEST_ERROR,
489 "%s: attempt to invalidate a translation outside of quadrant 0\n",
490 __func__);
491 goto inval_all;
492 }
493
494 assert(is == TLBIE_IS_VA);
495 assert(ric == TLBIE_RIC_TLB || ric == TLBIE_RIC_ALL);
496
497 ap = extract64(rb, PPC_BIT_NR(58), 3);
498 switch (ap) {
499 case TLBIE_R_AP_4K:
500 pgoffs_mask = 0xfffull;
501 break;
502
503 case TLBIE_R_AP_64K:
504 pgoffs_mask = 0xffffull;
505 break;
506
507 case TLBIE_R_AP_2M:
508 pgoffs_mask = 0x1fffffull;
509 break;
510
511 case TLBIE_R_AP_1G:
512 pgoffs_mask = 0x3fffffffull;
513 break;
514
515 default:
516 /*
517 * If the value specified in RS 0:31, RS 32:63, RB 54:55, RB 56:58,
518 * RB 44:51, or RB 56:63, when it is needed to perform the specified
519 * operation, is not supported by the implementation, the instruction
520 * is treated as if the instruction form were invalid.
521 */
522 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid AP: %d\n", __func__, ap);
523 goto invalid;
524 }
525
526 addr = rb & TLBIE_RB_EPN_MASK & ~pgoffs_mask;
527
528 if (local) {
529 tlb_flush_page(env_cpu(env), addr);
530 } else {
531 tlb_flush_page_all_cpus_synced(env_cpu(env), addr);
532 }
533 return;
534
535 inval_all:
536 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
537 if (!local) {
538 env->tlb_need_flush |= TLB_NEED_GLOBAL_FLUSH;
539 }
540 return;
541
542 invalid:
543 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
544 POWERPC_EXCP_INVAL |
545 POWERPC_EXCP_INVAL_INVAL, GETPC());
546 }
547
548 #endif
549
helper_tlbiva(CPUPPCState * env,target_ulong addr)550 void helper_tlbiva(CPUPPCState *env, target_ulong addr)
551 {
552 /* tlbiva instruction only exists on BookE */
553 assert(env->mmu_model == POWERPC_MMU_BOOKE);
554 /* XXX: TODO */
555 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n");
556 }
557
558 /* Software driven TLBs management */
559 /* PowerPC 602/603 software TLB load instructions helpers */
do_6xx_tlb(CPUPPCState * env,target_ulong new_EPN,int is_code)560 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
561 {
562 target_ulong RPN, CMP, EPN;
563 int way;
564
565 RPN = env->spr[SPR_RPA];
566 if (is_code) {
567 CMP = env->spr[SPR_ICMP];
568 EPN = env->spr[SPR_IMISS];
569 } else {
570 CMP = env->spr[SPR_DCMP];
571 EPN = env->spr[SPR_DMISS];
572 }
573 way = (env->spr[SPR_SRR1] >> 17) & 1;
574 (void)EPN; /* avoid a compiler warning */
575 qemu_log_mask(CPU_LOG_MMU, "%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx
576 " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n",
577 __func__, new_EPN, EPN, CMP, RPN, way);
578 /* Store this TLB */
579 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
580 way, is_code, CMP, RPN);
581 }
582
helper_6xx_tlbd(CPUPPCState * env,target_ulong EPN)583 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
584 {
585 do_6xx_tlb(env, EPN, 0);
586 }
587
helper_6xx_tlbi(CPUPPCState * env,target_ulong EPN)588 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
589 {
590 do_6xx_tlb(env, EPN, 1);
591 }
592
booke_tlb_to_page_size(int size)593 static inline target_ulong booke_tlb_to_page_size(int size)
594 {
595 return 1024 << (2 * size);
596 }
597
booke_page_size_to_tlb(target_ulong page_size)598 static inline int booke_page_size_to_tlb(target_ulong page_size)
599 {
600 int size;
601
602 switch (page_size) {
603 case 0x00000400UL:
604 size = 0x0;
605 break;
606 case 0x00001000UL:
607 size = 0x1;
608 break;
609 case 0x00004000UL:
610 size = 0x2;
611 break;
612 case 0x00010000UL:
613 size = 0x3;
614 break;
615 case 0x00040000UL:
616 size = 0x4;
617 break;
618 case 0x00100000UL:
619 size = 0x5;
620 break;
621 case 0x00400000UL:
622 size = 0x6;
623 break;
624 case 0x01000000UL:
625 size = 0x7;
626 break;
627 case 0x04000000UL:
628 size = 0x8;
629 break;
630 case 0x10000000UL:
631 size = 0x9;
632 break;
633 case 0x40000000UL:
634 size = 0xA;
635 break;
636 #if defined(TARGET_PPC64)
637 case 0x000100000000ULL:
638 size = 0xB;
639 break;
640 case 0x000400000000ULL:
641 size = 0xC;
642 break;
643 case 0x001000000000ULL:
644 size = 0xD;
645 break;
646 case 0x004000000000ULL:
647 size = 0xE;
648 break;
649 case 0x010000000000ULL:
650 size = 0xF;
651 break;
652 #endif
653 default:
654 size = -1;
655 break;
656 }
657
658 return size;
659 }
660
661 /* Helpers for 4xx TLB management */
662 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
663
664 #define PPC4XX_TLBHI_V 0x00000040
665 #define PPC4XX_TLBHI_E 0x00000020
666 #define PPC4XX_TLBHI_SIZE_MIN 0
667 #define PPC4XX_TLBHI_SIZE_MAX 7
668 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
669 #define PPC4XX_TLBHI_SIZE_SHIFT 7
670 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
671
672 #define PPC4XX_TLBLO_EX 0x00000200
673 #define PPC4XX_TLBLO_WR 0x00000100
674 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
675 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
676
helper_store_40x_pid(CPUPPCState * env,target_ulong val)677 void helper_store_40x_pid(CPUPPCState *env, target_ulong val)
678 {
679 if (env->spr[SPR_40x_PID] != val) {
680 env->spr[SPR_40x_PID] = val;
681 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
682 }
683 }
684
helper_4xx_tlbre_hi(CPUPPCState * env,target_ulong entry)685 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
686 {
687 ppcemb_tlb_t *tlb;
688 target_ulong ret;
689 int size;
690
691 entry &= PPC4XX_TLB_ENTRY_MASK;
692 tlb = &env->tlb.tlbe[entry];
693 ret = tlb->EPN;
694 if (tlb->prot & PAGE_VALID) {
695 ret |= PPC4XX_TLBHI_V;
696 }
697 size = booke_page_size_to_tlb(tlb->size);
698 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
699 size = PPC4XX_TLBHI_SIZE_DEFAULT;
700 }
701 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
702 helper_store_40x_pid(env, tlb->PID);
703 return ret;
704 }
705
helper_4xx_tlbre_lo(CPUPPCState * env,target_ulong entry)706 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
707 {
708 ppcemb_tlb_t *tlb;
709 target_ulong ret;
710
711 entry &= PPC4XX_TLB_ENTRY_MASK;
712 tlb = &env->tlb.tlbe[entry];
713 ret = tlb->RPN;
714 if (tlb->prot & PAGE_EXEC) {
715 ret |= PPC4XX_TLBLO_EX;
716 }
717 if (tlb->prot & PAGE_WRITE) {
718 ret |= PPC4XX_TLBLO_WR;
719 }
720 return ret;
721 }
722
ppcemb_tlb_flush(CPUState * cs,ppcemb_tlb_t * tlb)723 static void ppcemb_tlb_flush(CPUState *cs, ppcemb_tlb_t *tlb)
724 {
725 unsigned mmu_idx = 0;
726
727 if (tlb->prot & 0xf) {
728 mmu_idx |= 0x1;
729 }
730 if ((tlb->prot >> 4) & 0xf) {
731 mmu_idx |= 0x2;
732 }
733 if (tlb->attr & 1) {
734 mmu_idx <<= 2;
735 }
736
737 tlb_flush_range_by_mmuidx(cs, tlb->EPN, tlb->size, mmu_idx,
738 TARGET_LONG_BITS);
739 }
740
helper_4xx_tlbwe_hi(CPUPPCState * env,target_ulong entry,target_ulong val)741 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
742 target_ulong val)
743 {
744 CPUState *cs = env_cpu(env);
745 ppcemb_tlb_t *tlb;
746
747 qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n",
748 __func__, (int)entry,
749 val);
750 entry &= PPC4XX_TLB_ENTRY_MASK;
751 tlb = &env->tlb.tlbe[entry];
752 /* Invalidate previous TLB (if it's valid) */
753 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) {
754 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
755 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
756 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
757 ppcemb_tlb_flush(cs, tlb);
758 }
759 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
760 & PPC4XX_TLBHI_SIZE_MASK);
761 /*
762 * We cannot handle TLB size < TARGET_PAGE_SIZE.
763 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
764 */
765 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
766 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
767 "are not supported (%d)\n"
768 "Please implement TARGET_PAGE_BITS_VARY\n",
769 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
770 }
771 tlb->EPN = val & ~(tlb->size - 1);
772 if (val & PPC4XX_TLBHI_V) {
773 tlb->prot |= PAGE_VALID;
774 if (val & PPC4XX_TLBHI_E) {
775 /* XXX: TO BE FIXED */
776 cpu_abort(cs,
777 "Little-endian TLB entries are not supported by now\n");
778 }
779 } else {
780 tlb->prot &= ~PAGE_VALID;
781 }
782 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
783 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
784 " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx
785 " prot %c%c%c%c PID %d\n", __func__,
786 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
787 tlb->prot & PAGE_READ ? 'r' : '-',
788 tlb->prot & PAGE_WRITE ? 'w' : '-',
789 tlb->prot & PAGE_EXEC ? 'x' : '-',
790 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
791 }
792
helper_4xx_tlbwe_lo(CPUPPCState * env,target_ulong entry,target_ulong val)793 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
794 target_ulong val)
795 {
796 CPUState *cs = env_cpu(env);
797 ppcemb_tlb_t *tlb;
798
799 qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n",
800 __func__, (int)entry, val);
801 entry &= PPC4XX_TLB_ENTRY_MASK;
802 tlb = &env->tlb.tlbe[entry];
803 /* Invalidate previous TLB (if it's valid) */
804 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) {
805 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
806 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
807 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
808 ppcemb_tlb_flush(cs, tlb);
809 }
810 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
811 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
812 tlb->prot = PAGE_READ;
813 if (val & PPC4XX_TLBLO_EX) {
814 tlb->prot |= PAGE_EXEC;
815 }
816 if (val & PPC4XX_TLBLO_WR) {
817 tlb->prot |= PAGE_WRITE;
818 }
819 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
820 " EPN " TARGET_FMT_lx
821 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
822 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
823 tlb->prot & PAGE_READ ? 'r' : '-',
824 tlb->prot & PAGE_WRITE ? 'w' : '-',
825 tlb->prot & PAGE_EXEC ? 'x' : '-',
826 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
827 }
828
helper_4xx_tlbsx(CPUPPCState * env,target_ulong address)829 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
830 {
831 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
832 }
833
mmubooke_pid_match(CPUPPCState * env,ppcemb_tlb_t * tlb)834 static bool mmubooke_pid_match(CPUPPCState *env, ppcemb_tlb_t *tlb)
835 {
836 if (tlb->PID == env->spr[SPR_BOOKE_PID]) {
837 return true;
838 }
839 if (!env->nb_pids) {
840 return false;
841 }
842
843 if (env->spr[SPR_BOOKE_PID1] && tlb->PID == env->spr[SPR_BOOKE_PID1]) {
844 return true;
845 }
846 if (env->spr[SPR_BOOKE_PID2] && tlb->PID == env->spr[SPR_BOOKE_PID2]) {
847 return true;
848 }
849
850 return false;
851 }
852
853 /* PowerPC 440 TLB management */
helper_440_tlbwe(CPUPPCState * env,uint32_t word,target_ulong entry,target_ulong value)854 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
855 target_ulong value)
856 {
857 ppcemb_tlb_t *tlb;
858
859 qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n",
860 __func__, word, (int)entry, value);
861 entry &= 0x3F;
862 tlb = &env->tlb.tlbe[entry];
863
864 /* Invalidate previous TLB (if it's valid) */
865 if ((tlb->prot & PAGE_VALID) && mmubooke_pid_match(env, tlb)) {
866 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
867 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
868 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
869 ppcemb_tlb_flush(env_cpu(env), tlb);
870 }
871
872 switch (word) {
873 default:
874 /* Just here to please gcc */
875 case 0:
876 tlb->EPN = value & 0xFFFFFC00;
877 tlb->size = booke_tlb_to_page_size((value >> 4) & 0xF);
878 tlb->attr &= ~0x1;
879 tlb->attr |= (value >> 8) & 1;
880 if (value & 0x200) {
881 tlb->prot |= PAGE_VALID;
882 } else {
883 tlb->prot &= ~PAGE_VALID;
884 }
885 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
886 break;
887 case 1:
888 tlb->RPN = value & 0xFFFFFC0F;
889 break;
890 case 2:
891 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
892 tlb->prot = tlb->prot & PAGE_VALID;
893 if (value & 0x1) {
894 tlb->prot |= PAGE_READ << 4;
895 }
896 if (value & 0x2) {
897 tlb->prot |= PAGE_WRITE << 4;
898 }
899 if (value & 0x4) {
900 tlb->prot |= PAGE_EXEC << 4;
901 }
902 if (value & 0x8) {
903 tlb->prot |= PAGE_READ;
904 }
905 if (value & 0x10) {
906 tlb->prot |= PAGE_WRITE;
907 }
908 if (value & 0x20) {
909 tlb->prot |= PAGE_EXEC;
910 }
911 break;
912 }
913 }
914
helper_440_tlbre(CPUPPCState * env,uint32_t word,target_ulong entry)915 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
916 target_ulong entry)
917 {
918 ppcemb_tlb_t *tlb;
919 target_ulong ret;
920 int size;
921
922 entry &= 0x3F;
923 tlb = &env->tlb.tlbe[entry];
924 switch (word) {
925 default:
926 /* Just here to please gcc */
927 case 0:
928 ret = tlb->EPN;
929 size = booke_page_size_to_tlb(tlb->size);
930 if (size < 0 || size > 0xF) {
931 size = 1;
932 }
933 ret |= size << 4;
934 if (tlb->attr & 0x1) {
935 ret |= 0x100;
936 }
937 if (tlb->prot & PAGE_VALID) {
938 ret |= 0x200;
939 }
940 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
941 env->spr[SPR_440_MMUCR] |= tlb->PID;
942 break;
943 case 1:
944 ret = tlb->RPN;
945 break;
946 case 2:
947 ret = tlb->attr & ~0x1;
948 if (tlb->prot & (PAGE_READ << 4)) {
949 ret |= 0x1;
950 }
951 if (tlb->prot & (PAGE_WRITE << 4)) {
952 ret |= 0x2;
953 }
954 if (tlb->prot & (PAGE_EXEC << 4)) {
955 ret |= 0x4;
956 }
957 if (tlb->prot & PAGE_READ) {
958 ret |= 0x8;
959 }
960 if (tlb->prot & PAGE_WRITE) {
961 ret |= 0x10;
962 }
963 if (tlb->prot & PAGE_EXEC) {
964 ret |= 0x20;
965 }
966 break;
967 }
968 return ret;
969 }
970
helper_440_tlbsx(CPUPPCState * env,target_ulong address)971 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
972 {
973 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
974 }
975
976 /* PowerPC BookE 2.06 TLB management */
977
booke206_cur_tlb(CPUPPCState * env)978 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
979 {
980 uint32_t tlbncfg = 0;
981 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
982 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
983 int tlb;
984
985 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
986 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
987
988 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
989 cpu_abort(env_cpu(env), "we don't support HES yet\n");
990 }
991
992 return booke206_get_tlbm(env, tlb, ea, esel);
993 }
994
helper_booke_setpid(CPUPPCState * env,uint32_t pidn,target_ulong pid)995 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
996 {
997 env->spr[pidn] = pid;
998 /* changing PIDs mean we're in a different address space now */
999 tlb_flush(env_cpu(env));
1000 }
1001
helper_booke_set_eplc(CPUPPCState * env,target_ulong val)1002 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val)
1003 {
1004 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK;
1005 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD);
1006 }
helper_booke_set_epsc(CPUPPCState * env,target_ulong val)1007 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val)
1008 {
1009 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK;
1010 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE);
1011 }
1012
flush_page(CPUPPCState * env,ppcmas_tlb_t * tlb)1013 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb)
1014 {
1015 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
1016 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK);
1017 } else {
1018 tlb_flush(env_cpu(env));
1019 }
1020 }
1021
helper_booke206_tlbwe(CPUPPCState * env)1022 void helper_booke206_tlbwe(CPUPPCState *env)
1023 {
1024 uint32_t tlbncfg, tlbn;
1025 ppcmas_tlb_t *tlb;
1026 uint32_t size_tlb, size_ps;
1027 target_ulong mask;
1028
1029
1030 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
1031 case MAS0_WQ_ALWAYS:
1032 /* good to go, write that entry */
1033 break;
1034 case MAS0_WQ_COND:
1035 /* XXX check if reserved */
1036 if (0) {
1037 return;
1038 }
1039 break;
1040 case MAS0_WQ_CLR_RSRV:
1041 /* XXX clear entry */
1042 return;
1043 default:
1044 /* no idea what to do */
1045 return;
1046 }
1047
1048 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
1049 !FIELD_EX64(env->msr, MSR, GS)) {
1050 /* XXX we don't support direct LRAT setting yet */
1051 fprintf(stderr, "cpu: don't support LRAT setting yet\n");
1052 return;
1053 }
1054
1055 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
1056 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
1057
1058 tlb = booke206_cur_tlb(env);
1059
1060 if (!tlb) {
1061 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1062 POWERPC_EXCP_INVAL |
1063 POWERPC_EXCP_INVAL_INVAL, GETPC());
1064 }
1065
1066 /* check that we support the targeted size */
1067 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
1068 size_ps = booke206_tlbnps(env, tlbn);
1069 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
1070 !(size_ps & (1 << size_tlb))) {
1071 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1072 POWERPC_EXCP_INVAL |
1073 POWERPC_EXCP_INVAL_INVAL, GETPC());
1074 }
1075
1076 if (FIELD_EX64(env->msr, MSR, GS)) {
1077 cpu_abort(env_cpu(env), "missing HV implementation\n");
1078 }
1079
1080 if (tlb->mas1 & MAS1_VALID) {
1081 /*
1082 * Invalidate the page in QEMU TLB if it was a valid entry.
1083 *
1084 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
1085 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
1086 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
1087 *
1088 * "Note that when an L2 TLB entry is written, it may be displacing an
1089 * already valid entry in the same L2 TLB location (a victim). If a
1090 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
1091 * TLB entry is automatically invalidated."
1092 */
1093 flush_page(env, tlb);
1094 }
1095
1096 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
1097 env->spr[SPR_BOOKE_MAS3];
1098 tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
1099
1100 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
1101 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
1102 booke206_fixed_size_tlbn(env, tlbn, tlb);
1103 } else {
1104 if (!(tlbncfg & TLBnCFG_AVAIL)) {
1105 /* force !AVAIL TLB entries to correct page size */
1106 tlb->mas1 &= ~MAS1_TSIZE_MASK;
1107 /* XXX can be configured in MMUCSR0 */
1108 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
1109 }
1110 }
1111
1112 /* Make a mask from TLB size to discard invalid bits in EPN field */
1113 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
1114 /* Add a mask for page attributes */
1115 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
1116
1117 if (!FIELD_EX64(env->msr, MSR, CM)) {
1118 /*
1119 * Executing a tlbwe instruction in 32-bit mode will set bits
1120 * 0:31 of the TLB EPN field to zero.
1121 */
1122 mask &= 0xffffffff;
1123 }
1124
1125 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
1126
1127 if (!(tlbncfg & TLBnCFG_IPROT)) {
1128 /* no IPROT supported by TLB */
1129 tlb->mas1 &= ~MAS1_IPROT;
1130 }
1131
1132 flush_page(env, tlb);
1133 }
1134
booke206_tlb_to_mas(CPUPPCState * env,ppcmas_tlb_t * tlb)1135 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
1136 {
1137 int tlbn = booke206_tlbm_to_tlbn(env, tlb);
1138 int way = booke206_tlbm_to_way(env, tlb);
1139
1140 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
1141 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
1142 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1143
1144 env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
1145 env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
1146 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
1147 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
1148 }
1149
helper_booke206_tlbre(CPUPPCState * env)1150 void helper_booke206_tlbre(CPUPPCState *env)
1151 {
1152 ppcmas_tlb_t *tlb = NULL;
1153
1154 tlb = booke206_cur_tlb(env);
1155 if (!tlb) {
1156 env->spr[SPR_BOOKE_MAS1] = 0;
1157 } else {
1158 booke206_tlb_to_mas(env, tlb);
1159 }
1160 }
1161
helper_booke206_tlbsx(CPUPPCState * env,target_ulong address)1162 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
1163 {
1164 ppcmas_tlb_t *tlb = NULL;
1165 int i, j;
1166 hwaddr raddr;
1167 uint32_t spid, sas;
1168
1169 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
1170 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
1171
1172 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1173 int ways = booke206_tlb_ways(env, i);
1174
1175 for (j = 0; j < ways; j++) {
1176 tlb = booke206_get_tlbm(env, i, address, j);
1177
1178 if (!tlb) {
1179 continue;
1180 }
1181
1182 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
1183 continue;
1184 }
1185
1186 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1187 continue;
1188 }
1189
1190 booke206_tlb_to_mas(env, tlb);
1191 return;
1192 }
1193 }
1194
1195 /* no entry found, fill with defaults */
1196 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
1197 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
1198 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
1199 env->spr[SPR_BOOKE_MAS3] = 0;
1200 env->spr[SPR_BOOKE_MAS7] = 0;
1201
1202 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
1203 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
1204 }
1205
1206 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
1207 << MAS1_TID_SHIFT;
1208
1209 /* next victim logic */
1210 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
1211 env->last_way++;
1212 env->last_way &= booke206_tlb_ways(env, 0) - 1;
1213 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1214 }
1215
booke206_invalidate_ea_tlb(CPUPPCState * env,int tlbn,vaddr ea)1216 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
1217 vaddr ea)
1218 {
1219 int i;
1220 int ways = booke206_tlb_ways(env, tlbn);
1221 target_ulong mask;
1222
1223 for (i = 0; i < ways; i++) {
1224 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
1225 if (!tlb) {
1226 continue;
1227 }
1228 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
1229 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
1230 !(tlb->mas1 & MAS1_IPROT)) {
1231 tlb->mas1 &= ~MAS1_VALID;
1232 }
1233 }
1234 }
1235
helper_booke206_tlbivax(CPUPPCState * env,target_ulong address)1236 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
1237 {
1238 CPUState *cs;
1239
1240 if (address & 0x4) {
1241 /* flush all entries */
1242 if (address & 0x8) {
1243 /* flush all of TLB1 */
1244 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
1245 } else {
1246 /* flush all of TLB0 */
1247 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
1248 }
1249 return;
1250 }
1251
1252 if (address & 0x8) {
1253 /* flush TLB1 entries */
1254 booke206_invalidate_ea_tlb(env, 1, address);
1255 CPU_FOREACH(cs) {
1256 tlb_flush(cs);
1257 }
1258 } else {
1259 /* flush TLB0 entries */
1260 booke206_invalidate_ea_tlb(env, 0, address);
1261 CPU_FOREACH(cs) {
1262 tlb_flush_page(cs, address & MAS2_EPN_MASK);
1263 }
1264 }
1265 }
1266
helper_booke206_tlbilx0(CPUPPCState * env,target_ulong address)1267 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
1268 {
1269 /* XXX missing LPID handling */
1270 booke206_flush_tlb(env, -1, 1);
1271 }
1272
helper_booke206_tlbilx1(CPUPPCState * env,target_ulong address)1273 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
1274 {
1275 int i, j;
1276 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
1277 ppcmas_tlb_t *tlb = env->tlb.tlbm;
1278 int tlb_size;
1279
1280 /* XXX missing LPID handling */
1281 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1282 tlb_size = booke206_tlb_size(env, i);
1283 for (j = 0; j < tlb_size; j++) {
1284 if (!(tlb[j].mas1 & MAS1_IPROT) &&
1285 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
1286 tlb[j].mas1 &= ~MAS1_VALID;
1287 }
1288 }
1289 tlb += booke206_tlb_size(env, i);
1290 }
1291 tlb_flush(env_cpu(env));
1292 }
1293
helper_booke206_tlbilx3(CPUPPCState * env,target_ulong address)1294 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
1295 {
1296 int i, j;
1297 ppcmas_tlb_t *tlb;
1298 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
1299 int pid = tid >> MAS6_SPID_SHIFT;
1300 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
1301 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
1302 /* XXX check for unsupported isize and raise an invalid opcode then */
1303 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
1304 /* XXX implement MAV2 handling */
1305 bool mav2 = false;
1306
1307 /* XXX missing LPID handling */
1308 /* flush by pid and ea */
1309 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1310 int ways = booke206_tlb_ways(env, i);
1311
1312 for (j = 0; j < ways; j++) {
1313 tlb = booke206_get_tlbm(env, i, address, j);
1314 if (!tlb) {
1315 continue;
1316 }
1317 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
1318 (tlb->mas1 & MAS1_IPROT) ||
1319 ((tlb->mas1 & MAS1_IND) != ind) ||
1320 ((tlb->mas8 & MAS8_TGS) != sgs)) {
1321 continue;
1322 }
1323 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
1324 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
1325 continue;
1326 }
1327 /* XXX e500mc doesn't match SAS, but other cores might */
1328 tlb->mas1 &= ~MAS1_VALID;
1329 }
1330 }
1331 tlb_flush(env_cpu(env));
1332 }
1333
helper_booke206_tlbflush(CPUPPCState * env,target_ulong type)1334 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
1335 {
1336 int flags = 0;
1337
1338 if (type & 2) {
1339 flags |= BOOKE206_FLUSH_TLB1;
1340 }
1341
1342 if (type & 4) {
1343 flags |= BOOKE206_FLUSH_TLB0;
1344 }
1345
1346 booke206_flush_tlb(env, flags, 1);
1347 }
1348
1349
helper_check_tlb_flush_local(CPUPPCState * env)1350 void helper_check_tlb_flush_local(CPUPPCState *env)
1351 {
1352 check_tlb_flush(env, false);
1353 }
1354
helper_check_tlb_flush_global(CPUPPCState * env)1355 void helper_check_tlb_flush_global(CPUPPCState *env)
1356 {
1357 check_tlb_flush(env, true);
1358 }
1359
1360
ppc_cpu_tlb_fill(CPUState * cs,vaddr eaddr,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)1361 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
1362 MMUAccessType access_type, int mmu_idx,
1363 bool probe, uintptr_t retaddr)
1364 {
1365 PowerPCCPU *cpu = POWERPC_CPU(cs);
1366 hwaddr raddr;
1367 int page_size, prot;
1368
1369 if (ppc_xlate(cpu, eaddr, access_type, &raddr,
1370 &page_size, &prot, mmu_idx, !probe)) {
1371 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
1372 prot, mmu_idx, 1UL << page_size);
1373 return true;
1374 }
1375 if (probe) {
1376 return false;
1377 }
1378 raise_exception_err_ra(&cpu->env, cs->exception_index,
1379 cpu->env.error_code, retaddr);
1380 }
1381