xref: /qemu/target/ppc/mmu-hash64.c (revision cff1ec67509cacc2ea30d19ba61fa0ab0772e119)
1 /*
2  *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *  Copyright (c) 2013 David Gibson, IBM Corporation
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "qemu/error-report.h"
26 #include "qemu/qemu-print.h"
27 #include "system/hw_accel.h"
28 #include "system/memory.h"
29 #include "kvm_ppc.h"
30 #include "mmu-hash64.h"
31 #include "exec/log.h"
32 #include "hw/hw.h"
33 #include "internal.h"
34 #include "mmu-book3s-v3.h"
35 #include "mmu-books.h"
36 #include "helper_regs.h"
37 
38 #ifdef CONFIG_TCG
39 #include "exec/helper-proto.h"
40 #endif
41 
42 /* #define DEBUG_SLB */
43 
44 #ifdef DEBUG_SLB
45 #  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
46 #else
47 #  define LOG_SLB(...) do { } while (0)
48 #endif
49 
50 /*
51  * SLB handling
52  */
53 
54 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
55 {
56     CPUPPCState *env = &cpu->env;
57     uint64_t esid_256M, esid_1T;
58     int n;
59 
60     LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
61 
62     esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
63     esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
64 
65     for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
66         ppc_slb_t *slb = &env->slb[n];
67 
68         LOG_SLB("%s: slot %d %016" PRIx64 " %016"
69                     PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
70         /*
71          * We check for 1T matches on all MMUs here - if the MMU
72          * doesn't have 1T segment support, we will have prevented 1T
73          * entries from being inserted in the slbmte code.
74          */
75         if (((slb->esid == esid_256M) &&
76              ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
77             || ((slb->esid == esid_1T) &&
78                 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
79             return slb;
80         }
81     }
82 
83     return NULL;
84 }
85 
86 void dump_slb(PowerPCCPU *cpu)
87 {
88     CPUPPCState *env = &cpu->env;
89     int i;
90     uint64_t slbe, slbv;
91 
92     cpu_synchronize_state(CPU(cpu));
93 
94     qemu_printf("SLB\tESID\t\t\tVSID\n");
95     for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
96         slbe = env->slb[i].esid;
97         slbv = env->slb[i].vsid;
98         if (slbe == 0 && slbv == 0) {
99             continue;
100         }
101         qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
102                     i, slbe, slbv);
103     }
104 }
105 
106 #ifdef CONFIG_TCG
107 void helper_SLBIA(CPUPPCState *env, uint32_t ih)
108 {
109     PowerPCCPU *cpu = env_archcpu(env);
110     int starting_entry;
111     int n;
112 
113     /*
114      * slbia must always flush all TLB (which is equivalent to ERAT in ppc
115      * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
116      * can overwrite a valid SLB without flushing its lookaside information.
117      *
118      * It would be possible to keep the TLB in synch with the SLB by flushing
119      * when a valid entry is overwritten by slbmte, and therefore slbia would
120      * not have to flush unless it evicts a valid SLB entry. However it is
121      * expected that slbmte is more common than slbia, and slbia is usually
122      * going to evict valid SLB entries, so that tradeoff is unlikely to be a
123      * good one.
124      *
125      * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate
126      * the same SLB entries (everything but entry 0), but differ in what
127      * "lookaside information" is invalidated. TCG can ignore this and flush
128      * everything.
129      *
130      * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are
131      * invalidated.
132      */
133 
134     env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
135 
136     starting_entry = 1; /* default for IH=0,1,2,6 */
137 
138     if (env->mmu_model == POWERPC_MMU_3_00) {
139         switch (ih) {
140         case 0x7:
141             /* invalidate no SLBs, but all lookaside information */
142             return;
143 
144         case 0x3:
145         case 0x4:
146             /* also considers SLB entry 0 */
147             starting_entry = 0;
148             break;
149 
150         case 0x5:
151             /* treat undefined values as ih==0, and warn */
152             qemu_log_mask(LOG_GUEST_ERROR,
153                           "slbia undefined IH field %u.\n", ih);
154             break;
155 
156         default:
157             /* 0,1,2,6 */
158             break;
159         }
160     }
161 
162     for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) {
163         ppc_slb_t *slb = &env->slb[n];
164 
165         if (!(slb->esid & SLB_ESID_V)) {
166             continue;
167         }
168         if (env->mmu_model == POWERPC_MMU_3_00) {
169             if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) {
170                 /* preserves entries with a class value of 0 */
171                 continue;
172             }
173         }
174 
175         slb->esid &= ~SLB_ESID_V;
176     }
177 }
178 
179 #if defined(TARGET_PPC64)
180 void helper_SLBIAG(CPUPPCState *env, target_ulong rs, uint32_t l)
181 {
182     PowerPCCPU *cpu = env_archcpu(env);
183     int n;
184 
185     /*
186      * slbiag must always flush all TLB (which is equivalent to ERAT in ppc
187      * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
188      * can overwrite a valid SLB without flushing its lookaside information.
189      *
190      * It would be possible to keep the TLB in synch with the SLB by flushing
191      * when a valid entry is overwritten by slbmte, and therefore slbiag would
192      * not have to flush unless it evicts a valid SLB entry. However it is
193      * expected that slbmte is more common than slbiag, and slbiag is usually
194      * going to evict valid SLB entries, so that tradeoff is unlikely to be a
195      * good one.
196      */
197     env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
198 
199     for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
200         ppc_slb_t *slb = &env->slb[n];
201         slb->esid &= ~SLB_ESID_V;
202     }
203 }
204 #endif
205 
206 static void __helper_slbie(CPUPPCState *env, target_ulong addr,
207                            target_ulong global)
208 {
209     PowerPCCPU *cpu = env_archcpu(env);
210     ppc_slb_t *slb;
211 
212     slb = slb_lookup(cpu, addr);
213     if (!slb) {
214         return;
215     }
216 
217     if (slb->esid & SLB_ESID_V) {
218         slb->esid &= ~SLB_ESID_V;
219 
220         /*
221          * XXX: given the fact that segment size is 256 MB or 1TB,
222          *      and we still don't have a tlb_flush_mask(env, n, mask)
223          *      in QEMU, we just invalidate all TLBs
224          */
225         env->tlb_need_flush |=
226             (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
227     }
228 }
229 
230 void helper_SLBIE(CPUPPCState *env, target_ulong addr)
231 {
232     __helper_slbie(env, addr, false);
233 }
234 
235 void helper_SLBIEG(CPUPPCState *env, target_ulong addr)
236 {
237     __helper_slbie(env, addr, true);
238 }
239 #endif
240 
241 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
242                   target_ulong esid, target_ulong vsid)
243 {
244     CPUPPCState *env = &cpu->env;
245     ppc_slb_t *slb = &env->slb[slot];
246     const PPCHash64SegmentPageSizes *sps = NULL;
247     int i;
248 
249     if (slot >= cpu->hash64_opts->slb_size) {
250         return -1; /* Bad slot number */
251     }
252     if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
253         return -1; /* Reserved bits set */
254     }
255     if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
256         return -1; /* Bad segment size */
257     }
258     if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) {
259         return -1; /* 1T segment on MMU that doesn't support it */
260     }
261 
262     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
263         const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
264 
265         if (!sps1->page_shift) {
266             break;
267         }
268 
269         if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
270             sps = sps1;
271             break;
272         }
273     }
274 
275     if (!sps) {
276         error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
277                      " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
278                      slot, esid, vsid);
279         return -1;
280     }
281 
282     slb->esid = esid;
283     slb->vsid = vsid;
284     slb->sps = sps;
285 
286     LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
287             " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
288             slb->esid, slb->vsid);
289 
290     return 0;
291 }
292 
293 #ifdef CONFIG_TCG
294 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
295                              target_ulong *rt)
296 {
297     CPUPPCState *env = &cpu->env;
298     int slot = rb & 0xfff;
299     ppc_slb_t *slb = &env->slb[slot];
300 
301     if (slot >= cpu->hash64_opts->slb_size) {
302         return -1;
303     }
304 
305     *rt = slb->esid;
306     return 0;
307 }
308 
309 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
310                              target_ulong *rt)
311 {
312     CPUPPCState *env = &cpu->env;
313     int slot = rb & 0xfff;
314     ppc_slb_t *slb = &env->slb[slot];
315 
316     if (slot >= cpu->hash64_opts->slb_size) {
317         return -1;
318     }
319 
320     *rt = slb->vsid;
321     return 0;
322 }
323 
324 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
325                              target_ulong *rt)
326 {
327     CPUPPCState *env = &cpu->env;
328     ppc_slb_t *slb;
329 
330     if (!msr_is_64bit(env, env->msr)) {
331         rb &= 0xffffffff;
332     }
333     slb = slb_lookup(cpu, rb);
334     if (slb == NULL) {
335         *rt = (target_ulong)-1ul;
336     } else {
337         *rt = slb->vsid;
338     }
339     return 0;
340 }
341 
342 void helper_SLBMTE(CPUPPCState *env, target_ulong rb, target_ulong rs)
343 {
344     PowerPCCPU *cpu = env_archcpu(env);
345 
346     if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
347         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
348                                POWERPC_EXCP_INVAL, GETPC());
349     }
350 }
351 
352 target_ulong helper_SLBMFEE(CPUPPCState *env, target_ulong rb)
353 {
354     PowerPCCPU *cpu = env_archcpu(env);
355     target_ulong rt = 0;
356 
357     if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
358         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
359                                POWERPC_EXCP_INVAL, GETPC());
360     }
361     return rt;
362 }
363 
364 target_ulong helper_SLBFEE(CPUPPCState *env, target_ulong rb)
365 {
366     PowerPCCPU *cpu = env_archcpu(env);
367     target_ulong rt = 0;
368 
369     if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
370         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
371                                POWERPC_EXCP_INVAL, GETPC());
372     }
373     return rt;
374 }
375 
376 target_ulong helper_SLBMFEV(CPUPPCState *env, target_ulong rb)
377 {
378     PowerPCCPU *cpu = env_archcpu(env);
379     target_ulong rt = 0;
380 
381     if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
382         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
383                                POWERPC_EXCP_INVAL, GETPC());
384     }
385     return rt;
386 }
387 #endif
388 
389 /* Check No-Execute or Guarded Storage */
390 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
391                                               ppc_hash_pte64_t pte)
392 {
393     /* Exec permissions CANNOT take away read or write permissions */
394     return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
395             PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
396 }
397 
398 /* Check Basic Storage Protection */
399 static int ppc_hash64_pte_prot(int mmu_idx,
400                                ppc_slb_t *slb, ppc_hash_pte64_t pte)
401 {
402     unsigned pp, key;
403     /*
404      * Some pp bit combinations have undefined behaviour, so default
405      * to no access in those cases
406      */
407     int prot = 0;
408 
409     key = !!(mmuidx_pr(mmu_idx) ? (slb->vsid & SLB_VSID_KP)
410              : (slb->vsid & SLB_VSID_KS));
411     pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
412 
413     if (key == 0) {
414         switch (pp) {
415         case 0x0:
416         case 0x1:
417         case 0x2:
418             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
419             break;
420 
421         case 0x3:
422         case 0x6:
423             prot = PAGE_READ | PAGE_EXEC;
424             break;
425         }
426     } else {
427         switch (pp) {
428         case 0x0:
429         case 0x6:
430             break;
431 
432         case 0x1:
433         case 0x3:
434             prot = PAGE_READ | PAGE_EXEC;
435             break;
436 
437         case 0x2:
438             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
439             break;
440         }
441     }
442 
443     return prot;
444 }
445 
446 /* Check the instruction access permissions specified in the IAMR */
447 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
448 {
449     CPUPPCState *env = &cpu->env;
450     int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
451 
452     /*
453      * An instruction fetch is permitted if the IAMR bit is 0.
454      * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
455      * can only take away EXEC permissions not READ or WRITE permissions.
456      * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
457      * EXEC permissions are allowed.
458      */
459     return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
460                                PAGE_READ | PAGE_WRITE | PAGE_EXEC;
461 }
462 
463 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
464 {
465     CPUPPCState *env = &cpu->env;
466     int key, amrbits;
467     int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
468 
469     /* Only recent MMUs implement Virtual Page Class Key Protection */
470     if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) {
471         return prot;
472     }
473 
474     key = HPTE64_R_KEY(pte.pte1);
475     amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
476 
477     /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
478     /*         env->spr[SPR_AMR]); */
479 
480     /*
481      * A store is permitted if the AMR bit is 0. Remove write
482      * protection if it is set.
483      */
484     if (amrbits & 0x2) {
485         prot &= ~PAGE_WRITE;
486     }
487     /*
488      * A load is permitted if the AMR bit is 0. Remove read
489      * protection if it is set.
490      */
491     if (amrbits & 0x1) {
492         prot &= ~PAGE_READ;
493     }
494 
495     switch (env->mmu_model) {
496     /*
497      * MMU version 2.07 and later support IAMR
498      * Check if the IAMR allows the instruction access - it will return
499      * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
500      * if it does (and prot will be unchanged indicating execution support).
501      */
502     case POWERPC_MMU_2_07:
503     case POWERPC_MMU_3_00:
504         prot &= ppc_hash64_iamr_prot(cpu, key);
505         break;
506     default:
507         break;
508     }
509 
510     return prot;
511 }
512 
513 static hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu)
514 {
515     uint64_t base;
516 
517     if (cpu->vhyp) {
518         return 0;
519     }
520     if (cpu->env.mmu_model == POWERPC_MMU_3_00) {
521         ppc_v3_pate_t pate;
522 
523         if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
524             return 0;
525         }
526         base = pate.dw0;
527     } else {
528         base = cpu->env.spr[SPR_SDR1];
529     }
530     return base & SDR_64_HTABORG;
531 }
532 
533 static hwaddr ppc_hash64_hpt_mask(PowerPCCPU *cpu)
534 {
535     uint64_t base;
536 
537     if (cpu->vhyp) {
538         return cpu->vhyp_class->hpt_mask(cpu->vhyp);
539     }
540     if (cpu->env.mmu_model == POWERPC_MMU_3_00) {
541         ppc_v3_pate_t pate;
542 
543         if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
544             return 0;
545         }
546         base = pate.dw0;
547     } else {
548         base = cpu->env.spr[SPR_SDR1];
549     }
550     return (1ULL << ((base & SDR_64_HTABSIZE) + 18 - 7)) - 1;
551 }
552 
553 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
554                                              hwaddr ptex, int n)
555 {
556     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
557     hwaddr base;
558     hwaddr plen = n * HASH_PTE_SIZE_64;
559     const ppc_hash_pte64_t *hptes;
560 
561     if (cpu->vhyp) {
562         return cpu->vhyp_class->map_hptes(cpu->vhyp, ptex, n);
563     }
564     base = ppc_hash64_hpt_base(cpu);
565 
566     if (!base) {
567         return NULL;
568     }
569 
570     hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
571                               MEMTXATTRS_UNSPECIFIED);
572     if (plen < (n * HASH_PTE_SIZE_64)) {
573         hw_error("%s: Unable to map all requested HPTEs\n", __func__);
574     }
575     return hptes;
576 }
577 
578 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
579                             hwaddr ptex, int n)
580 {
581     if (cpu->vhyp) {
582         cpu->vhyp_class->unmap_hptes(cpu->vhyp, hptes, ptex, n);
583         return;
584     }
585 
586     address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
587                         false, n * HASH_PTE_SIZE_64);
588 }
589 
590 bool ppc_hash64_valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
591 {
592     /* hash value/pteg group index is normalized by HPT mask */
593     if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
594         return false;
595     }
596     return true;
597 }
598 
599 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
600                                 uint64_t pte0, uint64_t pte1)
601 {
602     int i;
603 
604     if (!(pte0 & HPTE64_V_LARGE)) {
605         if (sps->page_shift != 12) {
606             /* 4kiB page in a non 4kiB segment */
607             return 0;
608         }
609         /* Normal 4kiB page */
610         return 12;
611     }
612 
613     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
614         const PPCHash64PageSize *ps = &sps->enc[i];
615         uint64_t mask;
616 
617         if (!ps->page_shift) {
618             break;
619         }
620 
621         if (ps->page_shift == 12) {
622             /* L bit is set so this can't be a 4kiB page */
623             continue;
624         }
625 
626         mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
627 
628         if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
629             return ps->page_shift;
630         }
631     }
632 
633     return 0; /* Bad page size encoding */
634 }
635 
636 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1)
637 {
638     /* Insert B into pte0 */
639     *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) |
640             ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) <<
641              (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT));
642 
643     /* Remove B from pte1 */
644     *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK;
645 }
646 
647 
648 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
649                                      const PPCHash64SegmentPageSizes *sps,
650                                      target_ulong ptem,
651                                      ppc_hash_pte64_t *pte, unsigned *pshift)
652 {
653     int i;
654     const ppc_hash_pte64_t *pteg;
655     target_ulong pte0, pte1;
656     target_ulong ptex;
657 
658     ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
659     pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
660     if (!pteg) {
661         return -1;
662     }
663     for (i = 0; i < HPTES_PER_GROUP; i++) {
664         pte0 = ppc_hash64_hpte0(cpu, pteg, i);
665         /*
666          * pte0 contains the valid bit and must be read before pte1,
667          * otherwise we might see an old pte1 with a new valid bit and
668          * thus an inconsistent hpte value
669          */
670         smp_rmb();
671         pte1 = ppc_hash64_hpte1(cpu, pteg, i);
672 
673         /* Convert format if necessary */
674         if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) {
675             ppc64_v3_new_to_old_hpte(&pte0, &pte1);
676         }
677 
678         /* This compares V, B, H (secondary) and the AVPN */
679         if (HPTE64_V_COMPARE(pte0, ptem)) {
680             *pshift = hpte_page_shift(sps, pte0, pte1);
681             /*
682              * If there is no match, ignore the PTE, it could simply
683              * be for a different segment size encoding and the
684              * architecture specifies we should not match. Linux will
685              * potentially leave behind PTEs for the wrong base page
686              * size when demoting segments.
687              */
688             if (*pshift == 0) {
689                 continue;
690             }
691             /*
692              * We don't do anything with pshift yet as qemu TLB only
693              * deals with 4K pages anyway
694              */
695             pte->pte0 = pte0;
696             pte->pte1 = pte1;
697             ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
698             return ptex + i;
699         }
700     }
701     ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
702     /*
703      * We didn't find a valid entry.
704      */
705     return -1;
706 }
707 
708 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
709                                      ppc_slb_t *slb, target_ulong eaddr,
710                                      ppc_hash_pte64_t *pte, unsigned *pshift)
711 {
712     CPUPPCState *env = &cpu->env;
713     hwaddr hash, ptex;
714     uint64_t vsid, epnmask, epn, ptem;
715     const PPCHash64SegmentPageSizes *sps = slb->sps;
716 
717     /*
718      * The SLB store path should prevent any bad page size encodings
719      * getting in there, so:
720      */
721     assert(sps);
722 
723     /* If ISL is set in LPCR we need to clamp the page size to 4K */
724     if (env->spr[SPR_LPCR] & LPCR_ISL) {
725         /* We assume that when using TCG, 4k is first entry of SPS */
726         sps = &cpu->hash64_opts->sps[0];
727         assert(sps->page_shift == 12);
728     }
729 
730     epnmask = ~((1ULL << sps->page_shift) - 1);
731 
732     if (slb->vsid & SLB_VSID_B) {
733         /* 1TB segment */
734         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
735         epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
736         hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
737     } else {
738         /* 256M segment */
739         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
740         epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
741         hash = vsid ^ (epn >> sps->page_shift);
742     }
743     ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
744     ptem |= HPTE64_V_VALID;
745 
746     /* Page address translation */
747     qemu_log_mask(CPU_LOG_MMU,
748             "htab_base " HWADDR_FMT_plx " htab_mask " HWADDR_FMT_plx
749             " hash " HWADDR_FMT_plx "\n",
750             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
751 
752     /* Primary PTEG lookup */
753     qemu_log_mask(CPU_LOG_MMU,
754             "0 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
755             " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
756             " hash=" HWADDR_FMT_plx "\n",
757             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
758             vsid, ptem,  hash);
759     ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
760 
761     if (ptex == -1) {
762         /* Secondary PTEG lookup */
763         ptem |= HPTE64_V_SECONDARY;
764         qemu_log_mask(CPU_LOG_MMU,
765                 "1 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
766                 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
767                 " hash=" HWADDR_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
768                 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
769 
770         ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
771     }
772 
773     return ptex;
774 }
775 
776 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
777                                           uint64_t pte0, uint64_t pte1)
778 {
779     int i;
780 
781     if (!(pte0 & HPTE64_V_LARGE)) {
782         return 12;
783     }
784 
785     /*
786      * The encodings in env->sps need to be carefully chosen so that
787      * this gives an unambiguous result.
788      */
789     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
790         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
791         unsigned shift;
792 
793         if (!sps->page_shift) {
794             break;
795         }
796 
797         shift = hpte_page_shift(sps, pte0, pte1);
798         if (shift) {
799             return shift;
800         }
801     }
802 
803     return 0;
804 }
805 
806 static bool ppc_hash64_use_vrma(CPUPPCState *env)
807 {
808     switch (env->mmu_model) {
809     case POWERPC_MMU_3_00:
810         /*
811          * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
812          * register no longer exist
813          */
814         return true;
815 
816     default:
817         return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
818     }
819 }
820 
821 static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t slb_vsid,
822                                uint64_t error_code)
823 {
824     CPUPPCState *env = &POWERPC_CPU(cs)->env;
825     bool vpm;
826 
827     if (!mmuidx_real(mmu_idx)) {
828         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
829     } else {
830         vpm = ppc_hash64_use_vrma(env);
831     }
832     if (vpm && !mmuidx_hv(mmu_idx)) {
833         cs->exception_index = POWERPC_EXCP_HISI;
834         env->spr[SPR_ASDR] = slb_vsid;
835     } else {
836         cs->exception_index = POWERPC_EXCP_ISI;
837     }
838     env->error_code = error_code;
839 }
840 
841 static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t slb_vsid,
842                                uint64_t dar, uint64_t dsisr)
843 {
844     CPUPPCState *env = &POWERPC_CPU(cs)->env;
845     bool vpm;
846 
847     if (!mmuidx_real(mmu_idx)) {
848         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
849     } else {
850         vpm = ppc_hash64_use_vrma(env);
851     }
852     if (vpm && !mmuidx_hv(mmu_idx)) {
853         cs->exception_index = POWERPC_EXCP_HDSI;
854         env->spr[SPR_HDAR] = dar;
855         env->spr[SPR_HDSISR] = dsisr;
856         env->spr[SPR_ASDR] = slb_vsid;
857     } else {
858         cs->exception_index = POWERPC_EXCP_DSI;
859         env->spr[SPR_DAR] = dar;
860         env->spr[SPR_DSISR] = dsisr;
861    }
862     env->error_code = 0;
863 }
864 
865 
866 static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
867 {
868     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R;
869 
870     if (cpu->vhyp) {
871         cpu->vhyp_class->hpte_set_r(cpu->vhyp, ptex, pte1);
872         return;
873     }
874     base = ppc_hash64_hpt_base(cpu);
875 
876 
877     /* The HW performs a non-atomic byte update */
878     stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
879 }
880 
881 static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
882 {
883     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C;
884 
885     if (cpu->vhyp) {
886         cpu->vhyp_class->hpte_set_c(cpu->vhyp, ptex, pte1);
887         return;
888     }
889     base = ppc_hash64_hpt_base(cpu);
890 
891     /* The HW performs a non-atomic byte update */
892     stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
893 }
894 
895 static target_ulong rmls_limit(PowerPCCPU *cpu)
896 {
897     CPUPPCState *env = &cpu->env;
898     /*
899      * In theory the meanings of RMLS values are implementation
900      * dependent.  In practice, this seems to have been the set from
901      * POWER4+..POWER8, and RMLS is no longer supported in POWER9.
902      *
903      * Unsupported values mean the OS has shot itself in the
904      * foot. Return a 0-sized RMA in this case, which we expect
905      * to trigger an immediate DSI or ISI
906      */
907     static const target_ulong rma_sizes[16] = {
908         [0] = 256 * GiB,
909         [1] = 16 * GiB,
910         [2] = 1 * GiB,
911         [3] = 64 * MiB,
912         [4] = 256 * MiB,
913         [7] = 128 * MiB,
914         [8] = 32 * MiB,
915     };
916     target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT;
917 
918     return rma_sizes[rmls];
919 }
920 
921 /* Return the LLP in SLB_VSID format */
922 static uint64_t get_vrma_llp(PowerPCCPU *cpu)
923 {
924     CPUPPCState *env = &cpu->env;
925     uint64_t llp;
926 
927     if (env->mmu_model == POWERPC_MMU_3_00) {
928         ppc_v3_pate_t pate;
929         uint64_t ps, l, lp;
930 
931         /*
932          * ISA v3.0 removes the LPCR[VRMASD] field and puts the VRMA base
933          * page size (L||LP equivalent) in the PS field in the HPT partition
934          * table entry.
935          */
936         if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
937             error_report("Bad VRMA with no partition table entry");
938             return 0;
939         }
940         ps = PATE0_GET_PS(pate.dw0);
941         /* PS has L||LP in 3 consecutive bits, put them into SLB LLP format */
942         l = (ps >> 2) & 0x1;
943         lp = ps & 0x3;
944         llp = (l << SLB_VSID_L_SHIFT) | (lp << SLB_VSID_LP_SHIFT);
945 
946     } else {
947         uint64_t lpcr = env->spr[SPR_LPCR];
948         target_ulong vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
949 
950         /* VRMASD LLP matches SLB format, just shift and mask it */
951         llp = (vrmasd << SLB_VSID_LP_SHIFT) & SLB_VSID_LLP_MASK;
952     }
953 
954     return llp;
955 }
956 
957 static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
958 {
959     uint64_t llp = get_vrma_llp(cpu);
960     target_ulong vsid = SLB_VSID_VRMA | llp;
961     int i;
962 
963     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
964         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
965 
966         if (!sps->page_shift) {
967             break;
968         }
969 
970         if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
971             slb->esid = SLB_ESID_V;
972             slb->vsid = vsid;
973             slb->sps = sps;
974             return 0;
975         }
976     }
977 
978     error_report("Bad VRMA page size encoding 0x" TARGET_FMT_lx, llp);
979 
980     return -1;
981 }
982 
983 bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
984                       hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
985                       bool guest_visible)
986 {
987     CPUState *cs = CPU(cpu);
988     CPUPPCState *env = &cpu->env;
989     ppc_slb_t vrma_slbe;
990     ppc_slb_t *slb;
991     unsigned apshift;
992     hwaddr ptex;
993     ppc_hash_pte64_t pte;
994     int exec_prot, pp_prot, amr_prot, prot;
995     int need_prot;
996     hwaddr raddr;
997     bool vrma = false;
998 
999     /*
1000      * Note on LPCR usage: 970 uses HID4, but our special variant of
1001      * store_spr copies relevant fields into env->spr[SPR_LPCR].
1002      * Similarly we filter unimplemented bits when storing into LPCR
1003      * depending on the MMU version. This code can thus just use the
1004      * LPCR "as-is".
1005      */
1006 
1007     /* 1. Handle real mode accesses */
1008     if (mmuidx_real(mmu_idx)) {
1009         /*
1010          * Translation is supposedly "off", but in real mode the top 4
1011          * effective address bits are (mostly) ignored
1012          */
1013         raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
1014 
1015         if (cpu->vhyp) {
1016             /*
1017              * In virtual hypervisor mode, there's nothing to do:
1018              *   EA == GPA == qemu guest address
1019              */
1020         } else if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
1021             /* In HV mode, add HRMOR if top EA bit is clear */
1022             if (!(eaddr >> 63)) {
1023                 raddr |= env->spr[SPR_HRMOR];
1024             }
1025         } else if (ppc_hash64_use_vrma(env)) {
1026             /* Emulated VRMA mode */
1027             vrma = true;
1028             slb = &vrma_slbe;
1029             if (build_vrma_slbe(cpu, slb) != 0) {
1030                 /* Invalid VRMA setup, machine check */
1031                 if (guest_visible) {
1032                     cs->exception_index = POWERPC_EXCP_MCHECK;
1033                     env->error_code = 0;
1034                 }
1035                 return false;
1036             }
1037 
1038             goto skip_slb_search;
1039         } else {
1040             target_ulong limit = rmls_limit(cpu);
1041 
1042             /* Emulated old-style RMO mode, bounds check against RMLS */
1043             if (raddr >= limit) {
1044                 if (!guest_visible) {
1045                     return false;
1046                 }
1047                 switch (access_type) {
1048                 case MMU_INST_FETCH:
1049                     ppc_hash64_set_isi(cs, mmu_idx, 0, SRR1_PROTFAULT);
1050                     break;
1051                 case MMU_DATA_LOAD:
1052                     ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr, DSISR_PROTFAULT);
1053                     break;
1054                 case MMU_DATA_STORE:
1055                     ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr,
1056                                        DSISR_PROTFAULT | DSISR_ISSTORE);
1057                     break;
1058                 default:
1059                     g_assert_not_reached();
1060                 }
1061                 return false;
1062             }
1063 
1064             raddr |= env->spr[SPR_RMOR];
1065         }
1066 
1067         *raddrp = raddr;
1068         *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1069         *psizep = TARGET_PAGE_BITS;
1070         return true;
1071     }
1072 
1073     /* 2. Translation is on, so look up the SLB */
1074     slb = slb_lookup(cpu, eaddr);
1075     if (!slb) {
1076         /* No entry found, check if in-memory segment tables are in use */
1077         if (ppc64_use_proc_tbl(cpu)) {
1078             /* TODO - Unsupported */
1079             error_report("Segment Table Support Unimplemented");
1080             exit(1);
1081         }
1082         /* Segment still not found, generate the appropriate interrupt */
1083         if (!guest_visible) {
1084             return false;
1085         }
1086         switch (access_type) {
1087         case MMU_INST_FETCH:
1088             cs->exception_index = POWERPC_EXCP_ISEG;
1089             env->error_code = 0;
1090             break;
1091         case MMU_DATA_LOAD:
1092         case MMU_DATA_STORE:
1093             cs->exception_index = POWERPC_EXCP_DSEG;
1094             env->error_code = 0;
1095             env->spr[SPR_DAR] = eaddr;
1096             break;
1097         default:
1098             g_assert_not_reached();
1099         }
1100         return false;
1101     }
1102 
1103  skip_slb_search:
1104 
1105     /* 3. Check for segment level no-execute violation */
1106     if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) {
1107         if (guest_visible) {
1108             ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOEXEC_GUARD);
1109         }
1110         return false;
1111     }
1112 
1113     /* 4. Locate the PTE in the hash table */
1114     ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
1115     if (ptex == -1) {
1116         if (!guest_visible) {
1117             return false;
1118         }
1119         switch (access_type) {
1120         case MMU_INST_FETCH:
1121             ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOPTE);
1122             break;
1123         case MMU_DATA_LOAD:
1124             ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, DSISR_NOPTE);
1125             break;
1126         case MMU_DATA_STORE:
1127             ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr,
1128                                DSISR_NOPTE | DSISR_ISSTORE);
1129             break;
1130         default:
1131             g_assert_not_reached();
1132         }
1133         return false;
1134     }
1135     qemu_log_mask(CPU_LOG_MMU,
1136                   "found PTE at index %08" HWADDR_PRIx "\n", ptex);
1137 
1138     /* 5. Check access permissions */
1139 
1140     exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
1141     pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte);
1142     if (vrma) {
1143         /* VRMA does not check keys */
1144         amr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1145     } else {
1146         amr_prot = ppc_hash64_amr_prot(cpu, pte);
1147     }
1148     prot = exec_prot & pp_prot & amr_prot;
1149 
1150     need_prot = check_prot_access_type(PAGE_RWX, access_type);
1151     if (need_prot & ~prot) {
1152         /* Access right violation */
1153         qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
1154         if (!guest_visible) {
1155             return false;
1156         }
1157         if (access_type == MMU_INST_FETCH) {
1158             int srr1 = 0;
1159             if (PAGE_EXEC & ~exec_prot) {
1160                 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
1161             } else if (PAGE_EXEC & ~pp_prot) {
1162                 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
1163             }
1164             if (PAGE_EXEC & ~amr_prot) {
1165                 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
1166             }
1167             ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, srr1);
1168         } else {
1169             int dsisr = 0;
1170             if (need_prot & ~pp_prot) {
1171                 dsisr |= DSISR_PROTFAULT;
1172             }
1173             if (access_type == MMU_DATA_STORE) {
1174                 dsisr |= DSISR_ISSTORE;
1175             }
1176             if (need_prot & ~amr_prot) {
1177                 dsisr |= DSISR_AMR;
1178             }
1179             ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, dsisr);
1180         }
1181         return false;
1182     }
1183 
1184     qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
1185 
1186     /* 6. Update PTE referenced and changed bits if necessary */
1187 
1188     if (!(pte.pte1 & HPTE64_R_R)) {
1189         ppc_hash64_set_r(cpu, ptex, pte.pte1);
1190     }
1191     if (!(pte.pte1 & HPTE64_R_C)) {
1192         if (access_type == MMU_DATA_STORE) {
1193             ppc_hash64_set_c(cpu, ptex, pte.pte1);
1194         } else {
1195             /*
1196              * Treat the page as read-only for now, so that a later write
1197              * will pass through this function again to set the C bit
1198              */
1199             prot &= ~PAGE_WRITE;
1200         }
1201     }
1202 
1203     /* 7. Determine the real address from the PTE */
1204 
1205     *raddrp = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
1206     *protp = prot;
1207     *psizep = apshift;
1208     return true;
1209 }
1210 
1211 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
1212                                target_ulong pte0, target_ulong pte1)
1213 {
1214     /*
1215      * XXX: given the fact that there are too many segments to
1216      * invalidate, and we still don't have a tlb_flush_mask(env, n,
1217      * mask) in QEMU, we just invalidate all TLBs
1218      */
1219     cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
1220 }
1221 
1222 #ifdef CONFIG_TCG
1223 void helper_store_lpcr(CPUPPCState *env, target_ulong val)
1224 {
1225     PowerPCCPU *cpu = env_archcpu(env);
1226 
1227     ppc_store_lpcr(cpu, val);
1228 }
1229 #endif
1230 
1231 void ppc_hash64_init(PowerPCCPU *cpu)
1232 {
1233     CPUPPCState *env = &cpu->env;
1234     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1235 
1236     if (!pcc->hash64_opts) {
1237         assert(!mmu_is_64bit(env->mmu_model));
1238         return;
1239     }
1240 
1241     cpu->hash64_opts = g_memdup2(pcc->hash64_opts, sizeof(*cpu->hash64_opts));
1242 }
1243 
1244 void ppc_hash64_finalize(PowerPCCPU *cpu)
1245 {
1246     g_free(cpu->hash64_opts);
1247 }
1248 
1249 const PPCHash64Options ppc_hash64_opts_basic = {
1250     .flags = 0,
1251     .slb_size = 64,
1252     .sps = {
1253         { .page_shift = 12, /* 4K */
1254           .slb_enc = 0,
1255           .enc = { { .page_shift = 12, .pte_enc = 0 } }
1256         },
1257         { .page_shift = 24, /* 16M */
1258           .slb_enc = 0x100,
1259           .enc = { { .page_shift = 24, .pte_enc = 0 } }
1260         },
1261     },
1262 };
1263 
1264 const PPCHash64Options ppc_hash64_opts_POWER7 = {
1265     .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE,
1266     .slb_size = 32,
1267     .sps = {
1268         {
1269             .page_shift = 12, /* 4K */
1270             .slb_enc = 0,
1271             .enc = { { .page_shift = 12, .pte_enc = 0 },
1272                      { .page_shift = 16, .pte_enc = 0x7 },
1273                      { .page_shift = 24, .pte_enc = 0x38 }, },
1274         },
1275         {
1276             .page_shift = 16, /* 64K */
1277             .slb_enc = SLB_VSID_64K,
1278             .enc = { { .page_shift = 16, .pte_enc = 0x1 },
1279                      { .page_shift = 24, .pte_enc = 0x8 }, },
1280         },
1281         {
1282             .page_shift = 24, /* 16M */
1283             .slb_enc = SLB_VSID_16M,
1284             .enc = { { .page_shift = 24, .pte_enc = 0 }, },
1285         },
1286         {
1287             .page_shift = 34, /* 16G */
1288             .slb_enc = SLB_VSID_16G,
1289             .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
1290         },
1291     }
1292 };
1293 
1294 
1295