xref: /qemu/target/hppa/mem_helper.c (revision 4748be5e9df56e13045c0f76fe9f60fa7655fed7)
1 /*
2  *  HPPA memory access helper routines
3  *
4  *  Copyright (c) 2017 Helge Deller
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/cputlb.h"
25 #include "exec/page-protection.h"
26 #include "exec/helper-proto.h"
27 #include "hw/core/cpu.h"
28 #include "trace.h"
29 
30 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
31 {
32     /*
33      * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
34      * an algorithm in which a 62-bit absolute address is transformed to
35      * a 64-bit physical address.  This must then be combined with that
36      * pictured in Figure H-11 "Physical Address Space Mapping", in which
37      * the full physical address is truncated to the N-bit physical address
38      * supported by the implementation.
39      *
40      * Since the supported physical address space is below 54 bits, the
41      * H-8 algorithm is moot and all that is left is to truncate.
42      */
43     QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
44     return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
45 }
46 
47 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
48 {
49     /*
50      * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
51      * combined with Figure H-11, as above.
52      */
53     if (likely(extract32(addr, 28, 4) != 0xf)) {
54         /* Memory address space */
55         addr = (uint32_t)addr;
56     } else if (extract32(addr, 24, 4) != 0) {
57         /* I/O address space */
58         addr = (int32_t)addr;
59     } else {
60         /*
61          * PDC address space:
62          * Figures H-10 and H-11 of the parisc2.0 spec do not specify
63          * where to map into the 64-bit PDC address space.
64          * We map with an offset which equals the 32-bit address, which
65          * is what can be seen on physical machines too.
66          */
67         addr = (uint32_t)addr;
68         addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
69     }
70     return addr;
71 }
72 
73 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
74 {
75     IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
76 
77     if (i) {
78         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
79         trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
80                                   ent->itree.start, ent->itree.last, ent->pa);
81         return ent;
82     }
83     trace_hppa_tlb_find_entry_not_found(env, addr);
84     return NULL;
85 }
86 
87 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
88                                bool force_flush_btlb)
89 {
90     CPUState *cs = env_cpu(env);
91     bool is_btlb;
92 
93     if (!ent->entry_valid) {
94         return;
95     }
96 
97     trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
98                              ent->itree.last, ent->pa);
99 
100     tlb_flush_range_by_mmuidx(cs, ent->itree.start,
101                               ent->itree.last - ent->itree.start + 1,
102                               HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
103 
104     /* Never clear BTLBs, unless forced to do so. */
105     is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
106     if (is_btlb && !force_flush_btlb) {
107         return;
108     }
109 
110     interval_tree_remove(&ent->itree, &env->tlb_root);
111     memset(ent, 0, sizeof(*ent));
112 
113     if (!is_btlb) {
114         ent->unused_next = env->tlb_unused;
115         env->tlb_unused = ent;
116     }
117 }
118 
119 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
120 {
121     IntervalTreeNode *i, *n;
122 
123     i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
124     for (; i ; i = n) {
125         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
126 
127         /*
128          * Find the next entry now: In the normal case the current entry
129          * will be removed, but in the BTLB case it will remain.
130          */
131         n = interval_tree_iter_next(i, va_b, va_e);
132         hppa_flush_tlb_ent(env, ent, false);
133     }
134 }
135 
136 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
137 {
138     HPPATLBEntry *ent = env->tlb_unused;
139 
140     if (ent == NULL) {
141         uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
142         uint32_t i = env->tlb_last;
143 
144         if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
145             i = btlb_entries;
146         }
147         env->tlb_last = i + 1;
148 
149         ent = &env->tlb[i];
150         hppa_flush_tlb_ent(env, ent, false);
151     }
152 
153     env->tlb_unused = ent->unused_next;
154     return ent;
155 }
156 
157 #define ACCESS_ID_MASK 0xffff
158 
159 /* Return the set of protections allowed by a PID match. */
160 static int match_prot_id_1(uint32_t access_id, uint32_t prot_id)
161 {
162     if (((access_id ^ (prot_id >> 1)) & ACCESS_ID_MASK) == 0) {
163         return (prot_id & 1
164                 ? PAGE_EXEC | PAGE_READ
165                 : PAGE_EXEC | PAGE_READ | PAGE_WRITE);
166     }
167     return 0;
168 }
169 
170 static int match_prot_id32(CPUHPPAState *env, uint32_t access_id)
171 {
172     int r, i;
173 
174     for (i = CR_PID1; i <= CR_PID4; ++i) {
175         r = match_prot_id_1(access_id, env->cr[i]);
176         if (r) {
177             return r;
178         }
179     }
180     return 0;
181 }
182 
183 static int match_prot_id64(CPUHPPAState *env, uint32_t access_id)
184 {
185     int r, i;
186 
187     for (i = CR_PID1; i <= CR_PID4; ++i) {
188         r = match_prot_id_1(access_id, env->cr[i]);
189         if (r) {
190             return r;
191         }
192         r = match_prot_id_1(access_id, env->cr[i] >> 32);
193         if (r) {
194             return r;
195         }
196     }
197     return 0;
198 }
199 
200 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
201                               int type, MemOp mop, hwaddr *pphys, int *pprot)
202 {
203     hwaddr phys;
204     int prot, r_prot, w_prot, x_prot, priv;
205     HPPATLBEntry *ent;
206     int ret = -1;
207 
208     /* Virtual translation disabled.  Map absolute to physical.  */
209     if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
210         switch (mmu_idx) {
211         case MMU_ABS_W_IDX:
212             phys = hppa_abs_to_phys_pa2_w1(addr);
213             break;
214         case MMU_ABS_IDX:
215             if (hppa_is_pa20(env)) {
216                 phys = hppa_abs_to_phys_pa2_w0(addr);
217             } else {
218                 phys = (uint32_t)addr;
219             }
220             break;
221         default:
222             g_assert_not_reached();
223         }
224         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
225         goto egress_align;
226     }
227 
228     /* Find a valid tlb entry that matches the virtual address.  */
229     ent = hppa_find_tlb(env, addr);
230     if (ent == NULL) {
231         phys = 0;
232         prot = 0;
233         ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
234         goto egress;
235     }
236 
237     /* We now know the physical address.  */
238     phys = ent->pa + (addr - ent->itree.start);
239 
240     /* Map TLB access_rights field to QEMU protection.  */
241     priv = MMU_IDX_TO_PRIV(mmu_idx);
242     r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
243     w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
244     x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
245     switch (ent->ar_type) {
246     case 0: /* read-only: data page */
247         prot = r_prot;
248         break;
249     case 1: /* read/write: dynamic data page */
250         prot = r_prot | w_prot;
251         break;
252     case 2: /* read/execute: normal code page */
253         prot = r_prot | x_prot;
254         break;
255     case 3: /* read/write/execute: dynamic code page */
256         prot = r_prot | w_prot | x_prot;
257         break;
258     default: /* execute: promote to privilege level type & 3 */
259         prot = x_prot;
260         break;
261     }
262 
263     /*
264      * No guest access type indicates a non-architectural access from
265      * within QEMU.  Bypass checks for access, D, B, P and T bits.
266      */
267     if (type == 0) {
268         goto egress;
269     }
270 
271     if (unlikely(!(prot & type))) {
272         /* Not allowed -- Inst/Data Memory Access Rights Fault. */
273         ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
274         goto egress;
275     }
276 
277     /* access_id == 0 means public page and no check is performed */
278     if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
279         int access_prot = (hppa_is_pa20(env)
280                            ? match_prot_id64(env, ent->access_id)
281                            : match_prot_id32(env, ent->access_id));
282         if (unlikely(!(type & access_prot))) {
283             /* Not allowed -- Inst/Data Memory Protection Id Fault. */
284             ret = type & PAGE_EXEC ? EXCP_IMP : EXCP_DMPI;
285             goto egress;
286         }
287         /* Otherwise exclude permissions not allowed (i.e WD). */
288         prot &= access_prot;
289     }
290 
291     /*
292      * In reverse priority order, check for conditions which raise faults.
293      * Remove PROT bits that cover the condition we want to check,
294      * so that the resulting PROT will force a re-check of the
295      * architectural TLB entry for the next access.
296      */
297     if (unlikely(ent->t)) {
298         prot &= PAGE_EXEC;
299         if (!(type & PAGE_EXEC)) {
300             /* The T bit is set -- Page Reference Fault.  */
301             ret = EXCP_PAGE_REF;
302         }
303     }
304     if (unlikely(!ent->d)) {
305         prot &= PAGE_READ | PAGE_EXEC;
306         if (type & PAGE_WRITE) {
307             /* The D bit is not set -- TLB Dirty Bit Fault.  */
308             ret = EXCP_TLB_DIRTY;
309         }
310     }
311     if (unlikely(ent->b)) {
312         prot &= PAGE_READ | PAGE_EXEC;
313         if (type & PAGE_WRITE) {
314             /*
315              * The B bit is set -- Data Memory Break Fault.
316              * Except when PSW_X is set, allow this single access to succeed.
317              * The write bit will be invalidated for subsequent accesses.
318              */
319             if (env->psw_xb & PSW_X) {
320                 prot |= PAGE_WRITE_INV;
321             } else {
322                 ret = EXCP_DMB;
323             }
324         }
325     }
326 
327  egress_align:
328     if (addr & ((1u << memop_alignment_bits(mop)) - 1)) {
329         ret = EXCP_UNALIGN;
330     }
331 
332  egress:
333     *pphys = phys;
334     *pprot = prot;
335     trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
336     return ret;
337 }
338 
339 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
340 {
341     HPPACPU *cpu = HPPA_CPU(cs);
342     hwaddr phys;
343     int prot, excp, mmu_idx;
344 
345     /* If the (data) mmu is disabled, bypass translation.  */
346     /* ??? We really ought to know if the code mmu is disabled too,
347        in order to get the correct debugging dumps.  */
348     mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
349                cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
350 
351     excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, 0,
352                                      &phys, &prot);
353 
354     /* Since we're translating for debugging, the only error that is a
355        hard error is no translation at all.  Otherwise, while a real cpu
356        access might not have permission, the debugger does.  */
357     return excp == EXCP_DTLB_MISS ? -1 : phys;
358 }
359 
360 void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled)
361 {
362     if (env->psw & PSW_Q) {
363         /*
364          * For pa1.x, the offset and space never overlap, and so we
365          * simply extract the high and low part of the virtual address.
366          *
367          * For pa2.0, the formation of these are described in section
368          * "Interruption Parameter Registers", page 2-15.
369          */
370         env->cr[CR_IOR] = (uint32_t)addr;
371         env->cr[CR_ISR] = addr >> 32;
372 
373         if (hppa_is_pa20(env)) {
374             if (mmu_disabled) {
375                 /*
376                  * If data translation was disabled, the ISR contains
377                  * the upper portion of the abs address, zero-extended.
378                  */
379                 env->cr[CR_ISR] &= 0x3fffffff;
380             } else {
381                 /*
382                  * If data translation was enabled, the upper two bits
383                  * of the IOR (the b field) are equal to the two space
384                  * bits from the base register used to form the gva.
385                  */
386                 uint64_t b;
387 
388                 b = env->unwind_breg ? env->gr[env->unwind_breg] : 0;
389                 b >>= (env->psw & PSW_W ? 62 : 30);
390                 env->cr[CR_IOR] |= b << 62;
391             }
392         }
393     }
394 }
395 
396 G_NORETURN static void
397 raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
398                          vaddr addr, bool mmu_disabled)
399 {
400     CPUState *cs = env_cpu(env);
401 
402     cs->exception_index = excp;
403     cpu_restore_state(cs, retaddr);
404     hppa_set_ior_and_isr(env, addr, mmu_disabled);
405 
406     cpu_loop_exit(cs);
407 }
408 
409 void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
410                                      vaddr addr, unsigned size,
411                                      MMUAccessType access_type,
412                                      int mmu_idx, MemTxAttrs attrs,
413                                      MemTxResult response, uintptr_t retaddr)
414 {
415     CPUHPPAState *env = cpu_env(cs);
416 
417     qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx
418                 " while accessing I/O at %#08" HWADDR_PRIx "\n",
419                 env->iasq_f, env->iaoq_f, physaddr);
420 
421     /* FIXME: Enable HPMC exceptions when firmware has clean device probing */
422     if (0) {
423         raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr,
424                                  MMU_IDX_MMU_DISABLED(mmu_idx));
425     }
426 }
427 
428 bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
429                              MMUAccessType type, int mmu_idx,
430                              MemOp memop, int size, bool probe, uintptr_t ra)
431 {
432     CPUHPPAState *env = cpu_env(cs);
433     int prot, excp, a_prot;
434     hwaddr phys;
435 
436     switch (type) {
437     case MMU_INST_FETCH:
438         a_prot = PAGE_EXEC;
439         break;
440     case MMU_DATA_STORE:
441         a_prot = PAGE_WRITE;
442         break;
443     default:
444         a_prot = PAGE_READ;
445         break;
446     }
447 
448     excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, memop,
449                                      &phys, &prot);
450     if (unlikely(excp >= 0)) {
451         if (probe) {
452             return false;
453         }
454         trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
455 
456         /* Failure.  Raise the indicated exception.  */
457         raise_exception_with_ior(env, excp, ra, addr,
458                                  MMU_IDX_MMU_DISABLED(mmu_idx));
459     }
460 
461     trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
462                                 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
463 
464     /*
465      * Success!  Store the translation into the QEMU TLB.
466      * Note that we always install a single-page entry, because that
467      * is what works best with softmmu -- anything else will trigger
468      * the large page protection mask.  We do not require this,
469      * because we record the large page here in the hppa tlb.
470      */
471     memset(out, 0, sizeof(*out));
472     out->phys_addr = phys;
473     out->prot = prot;
474     out->attrs = MEMTXATTRS_UNSPECIFIED;
475     out->lg_page_size = TARGET_PAGE_BITS;
476 
477     return true;
478 }
479 
480 /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
481 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
482 {
483     HPPATLBEntry *ent;
484 
485     /* Zap any old entries covering ADDR. */
486     addr &= TARGET_PAGE_MASK;
487     hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
488 
489     ent = env->tlb_partial;
490     if (ent == NULL) {
491         ent = hppa_alloc_tlb_ent(env);
492         env->tlb_partial = ent;
493     }
494 
495     /* Note that ent->entry_valid == 0 already.  */
496     ent->itree.start = addr;
497     ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
498     ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
499     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
500 }
501 
502 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
503                                  target_ulong reg)
504 {
505     ent->access_id = extract32(reg, 1, 18);
506     ent->u = extract32(reg, 19, 1);
507     ent->ar_pl2 = extract32(reg, 20, 2);
508     ent->ar_pl1 = extract32(reg, 22, 2);
509     ent->ar_type = extract32(reg, 24, 3);
510     ent->b = extract32(reg, 27, 1);
511     ent->d = extract32(reg, 28, 1);
512     ent->t = extract32(reg, 29, 1);
513     ent->entry_valid = 1;
514 
515     interval_tree_insert(&ent->itree, &env->tlb_root);
516     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
517                          ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
518 }
519 
520 /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
521 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
522 {
523     HPPATLBEntry *ent = env->tlb_partial;
524 
525     if (ent) {
526         env->tlb_partial = NULL;
527         if (ent->itree.start <= addr && addr <= ent->itree.last) {
528             set_access_bits_pa11(env, ent, reg);
529             return;
530         }
531     }
532     qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
533 }
534 
535 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
536                        target_ulong r2, vaddr va_b)
537 {
538     HPPATLBEntry *ent;
539     vaddr va_e;
540     uint64_t va_size;
541     int mask_shift;
542 
543     mask_shift = 2 * (r1 & 0xf);
544     va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
545     va_b &= -va_size;
546     va_e = va_b + va_size - 1;
547 
548     hppa_flush_tlb_range(env, va_b, va_e);
549     ent = hppa_alloc_tlb_ent(env);
550 
551     ent->itree.start = va_b;
552     ent->itree.last = va_e;
553 
554     /* Extract all 52 bits present in the page table entry. */
555     ent->pa = r1 << (TARGET_PAGE_BITS - 5);
556     /* Align per the page size. */
557     ent->pa &= TARGET_PAGE_MASK << mask_shift;
558     /* Ignore the bits beyond physical address space. */
559     ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
560 
561     ent->t = extract64(r2, 61, 1);
562     ent->d = extract64(r2, 60, 1);
563     ent->b = extract64(r2, 59, 1);
564     ent->ar_type = extract64(r2, 56, 3);
565     ent->ar_pl1 = extract64(r2, 54, 2);
566     ent->ar_pl2 = extract64(r2, 52, 2);
567     ent->u = extract64(r2, 51, 1);
568     /* o = bit 50 */
569     /* p = bit 49 */
570     ent->access_id = extract64(r2, 1, 31);
571     ent->entry_valid = 1;
572 
573     interval_tree_insert(&ent->itree, &env->tlb_root);
574     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
575     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
576                          ent->ar_pl2, ent->ar_pl1, ent->ar_type,
577                          ent->b, ent->d, ent->t);
578 }
579 
580 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
581 {
582     vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
583     itlbt_pa20(env, r1, r2, va_b);
584 }
585 
586 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
587 {
588     vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
589     itlbt_pa20(env, r1, r2, va_b);
590 }
591 
592 /* Purge (Insn/Data) TLB. */
593 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
594 {
595     vaddr start = data.target_ptr;
596     vaddr end;
597 
598     /*
599      * PA2.0 allows a range of pages encoded into GR[b], which we have
600      * copied into the bottom bits of the otherwise page-aligned address.
601      * PA1.x will always provide zero here, for a single page flush.
602      */
603     end = start & 0xf;
604     start &= TARGET_PAGE_MASK;
605     end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
606     end = start + end - 1;
607 
608     hppa_flush_tlb_range(cpu_env(cpu), start, end);
609 }
610 
611 /* This is local to the current cpu. */
612 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
613 {
614     trace_hppa_tlb_ptlb_local(env);
615     ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
616 }
617 
618 /* This is synchronous across all processors.  */
619 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
620 {
621     CPUState *src = env_cpu(env);
622     CPUState *cpu;
623     bool wait = false;
624 
625     trace_hppa_tlb_ptlb(env);
626     run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
627 
628     CPU_FOREACH(cpu) {
629         if (cpu != src) {
630             async_run_on_cpu(cpu, ptlb_work, data);
631             wait = true;
632         }
633     }
634     if (wait) {
635         async_safe_run_on_cpu(src, ptlb_work, data);
636     } else {
637         ptlb_work(src, data);
638     }
639 }
640 
641 void hppa_ptlbe(CPUHPPAState *env)
642 {
643     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
644     uint32_t i;
645 
646     /* Zap the (non-btlb) tlb entries themselves. */
647     memset(&env->tlb[btlb_entries], 0,
648            sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
649     env->tlb_last = btlb_entries;
650     env->tlb_partial = NULL;
651 
652     /* Put them all onto the unused list. */
653     env->tlb_unused = &env->tlb[btlb_entries];
654     for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
655         env->tlb[i].unused_next = &env->tlb[i + 1];
656     }
657 
658     /* Re-initialize the interval tree with only the btlb entries. */
659     memset(&env->tlb_root, 0, sizeof(env->tlb_root));
660     for (i = 0; i < btlb_entries; ++i) {
661         if (env->tlb[i].entry_valid) {
662             interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
663         }
664     }
665 
666     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
667 }
668 
669 /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
670    number of pages/entries (we choose all), and is local to the cpu.  */
671 void HELPER(ptlbe)(CPUHPPAState *env)
672 {
673     trace_hppa_tlb_ptlbe(env);
674     qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
675     hppa_ptlbe(env);
676 }
677 
678 void cpu_hppa_change_prot_id(CPUHPPAState *env)
679 {
680     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
681 }
682 
683 void HELPER(change_prot_id)(CPUHPPAState *env)
684 {
685     cpu_hppa_change_prot_id(env);
686 }
687 
688 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
689 {
690     hwaddr phys;
691     int prot, excp;
692 
693     excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, 0,
694                                      &phys, &prot);
695     if (excp >= 0) {
696         if (excp == EXCP_DTLB_MISS) {
697             excp = EXCP_NA_DTLB_MISS;
698         }
699         trace_hppa_tlb_lpa_failed(env, addr);
700         raise_exception_with_ior(env, excp, GETPC(), addr, false);
701     }
702     trace_hppa_tlb_lpa_success(env, addr, phys);
703     return phys;
704 }
705 
706 /*
707  * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
708  * allow operating systems to modify the Block TLB (BTLB) entries.
709  * For implementation details see page 1-13 in
710  * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
711  */
712 void HELPER(diag_btlb)(CPUHPPAState *env)
713 {
714     unsigned int phys_page, len, slot;
715     int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
716     uintptr_t ra = GETPC();
717     HPPATLBEntry *btlb;
718     uint64_t virt_page;
719     uint32_t *vaddr;
720     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
721 
722     /* BTLBs are not supported on 64-bit CPUs */
723     if (btlb_entries == 0) {
724         env->gr[28] = -1; /* nonexistent procedure */
725         return;
726     }
727 
728     env->gr[28] = 0; /* PDC_OK */
729 
730     switch (env->gr[25]) {
731     case 0:
732         /* return BTLB parameters */
733         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
734         vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t),
735                              MMU_DATA_STORE, mmu_idx, ra);
736         if (vaddr == NULL) {
737             env->gr[28] = -10; /* invalid argument */
738         } else {
739             vaddr[0] = cpu_to_be32(1);
740             vaddr[1] = cpu_to_be32(16 * 1024);
741             vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
742             vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
743         }
744         break;
745     case 1:
746         /* insert BTLB entry */
747         virt_page = env->gr[24];        /* upper 32 bits */
748         virt_page <<= 32;
749         virt_page |= env->gr[23];       /* lower 32 bits */
750         phys_page = env->gr[22];
751         len = env->gr[21];
752         slot = env->gr[19];
753         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
754                     "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
755                     "into slot %d\n",
756                     (long long) virt_page << TARGET_PAGE_BITS,
757                     (long long) (virt_page + len) << TARGET_PAGE_BITS,
758                     (long long) virt_page, phys_page, len, slot);
759         if (slot < btlb_entries) {
760             btlb = &env->tlb[slot];
761 
762             /* Force flush of possibly existing BTLB entry. */
763             hppa_flush_tlb_ent(env, btlb, true);
764 
765             /* Create new BTLB entry */
766             btlb->itree.start = virt_page << TARGET_PAGE_BITS;
767             btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
768             btlb->pa = phys_page << TARGET_PAGE_BITS;
769             set_access_bits_pa11(env, btlb, env->gr[20]);
770             btlb->t = 0;
771             btlb->d = 1;
772         } else {
773             env->gr[28] = -10; /* invalid argument */
774         }
775         break;
776     case 2:
777         /* Purge BTLB entry */
778         slot = env->gr[22];
779         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
780                                     slot);
781         if (slot < btlb_entries) {
782             btlb = &env->tlb[slot];
783             hppa_flush_tlb_ent(env, btlb, true);
784         } else {
785             env->gr[28] = -10; /* invalid argument */
786         }
787         break;
788     case 3:
789         /* Purge all BTLB entries */
790         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
791         for (slot = 0; slot < btlb_entries; slot++) {
792             btlb = &env->tlb[slot];
793             hppa_flush_tlb_ent(env, btlb, true);
794         }
795         break;
796     default:
797         env->gr[28] = -2; /* nonexistent option */
798         break;
799     }
800 }
801 
802 uint64_t HELPER(b_gate_priv)(CPUHPPAState *env, uint64_t iaoq_f)
803 {
804     uint64_t gva = hppa_form_gva(env, env->iasq_f, iaoq_f);
805     HPPATLBEntry *ent = hppa_find_tlb(env, gva);
806 
807     if (ent == NULL) {
808         raise_exception_with_ior(env, EXCP_ITLB_MISS, GETPC(), gva, false);
809     }
810 
811     /*
812      * There should be no need to check page permissions, as that will
813      * already have been done by tb_lookup via get_page_addr_code.
814      * All we need at this point is to check the ar_type.
815      *
816      * No change for non-gateway pages or for priv decrease.
817      */
818     if (ent->ar_type & 4) {
819         int old_priv = iaoq_f & 3;
820         int new_priv = ent->ar_type & 3;
821 
822         if (new_priv < old_priv) {
823             iaoq_f = (iaoq_f & -4) | new_priv;
824         }
825     }
826     return iaoq_f;
827 }
828 
829 void HELPER(update_gva_offset_mask)(CPUHPPAState *env)
830 {
831     update_gva_offset_mask(env);
832 }
833