xref: /qemu/target/hppa/mem_helper.c (revision 7cef6d686309e2792186504ae17cf4f3eb57ef68)
1 /*
2  *  HPPA memory access helper routines
3  *
4  *  Copyright (c) 2017 Helge Deller
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/cputlb.h"
24 #include "accel/tcg/cpu-mmu-index.h"
25 #include "accel/tcg/probe.h"
26 #include "exec/page-protection.h"
27 #include "exec/target_page.h"
28 #include "exec/helper-proto.h"
29 #include "hw/core/cpu.h"
30 #include "trace.h"
31 
hppa_abs_to_phys_pa2_w1(vaddr addr)32 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
33 {
34     /*
35      * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
36      * an algorithm in which a 62-bit absolute address is transformed to
37      * a 64-bit physical address.  This must then be combined with that
38      * pictured in Figure H-11 "Physical Address Space Mapping", in which
39      * the full physical address is truncated to the N-bit physical address
40      * supported by the implementation.
41      *
42      * Since the supported physical address space is below 54 bits, the
43      * H-8 algorithm is moot and all that is left is to truncate.
44      */
45     QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
46     return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
47 }
48 
hppa_abs_to_phys_pa2_w0(vaddr addr)49 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
50 {
51     /*
52      * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
53      * combined with Figure H-11, as above.
54      */
55     if (likely(extract32(addr, 28, 4) != 0xf)) {
56         /* Memory address space */
57         addr = (uint32_t)addr;
58     } else if (extract32(addr, 24, 4) != 0) {
59         /* I/O address space */
60         addr = (int32_t)addr;
61     } else {
62         /*
63          * PDC address space:
64          * Figures H-10 and H-11 of the parisc2.0 spec do not specify
65          * where to map into the 64-bit PDC address space.
66          * We map with an offset which equals the 32-bit address, which
67          * is what can be seen on physical machines too.
68          */
69         addr = (uint32_t)addr;
70         addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
71     }
72     return addr;
73 }
74 
hppa_find_tlb(CPUHPPAState * env,vaddr addr)75 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
76 {
77     IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
78 
79     if (i) {
80         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
81         trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
82                                   ent->itree.start, ent->itree.last, ent->pa);
83         return ent;
84     }
85     trace_hppa_tlb_find_entry_not_found(env, addr);
86     return NULL;
87 }
88 
hppa_flush_tlb_ent(CPUHPPAState * env,HPPATLBEntry * ent,bool force_flush_btlb)89 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
90                                bool force_flush_btlb)
91 {
92     CPUState *cs = env_cpu(env);
93     bool is_btlb;
94 
95     if (!ent->entry_valid) {
96         return;
97     }
98 
99     trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
100                              ent->itree.last, ent->pa);
101 
102     tlb_flush_range_by_mmuidx(cs, ent->itree.start,
103                               ent->itree.last - ent->itree.start + 1,
104                               HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
105 
106     /* Never clear BTLBs, unless forced to do so. */
107     is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
108     if (is_btlb && !force_flush_btlb) {
109         return;
110     }
111 
112     interval_tree_remove(&ent->itree, &env->tlb_root);
113     memset(ent, 0, sizeof(*ent));
114 
115     if (!is_btlb) {
116         ent->unused_next = env->tlb_unused;
117         env->tlb_unused = ent;
118     }
119 }
120 
hppa_flush_tlb_range(CPUHPPAState * env,vaddr va_b,vaddr va_e)121 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
122 {
123     IntervalTreeNode *i, *n;
124 
125     i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
126     for (; i ; i = n) {
127         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
128 
129         /*
130          * Find the next entry now: In the normal case the current entry
131          * will be removed, but in the BTLB case it will remain.
132          */
133         n = interval_tree_iter_next(i, va_b, va_e);
134         hppa_flush_tlb_ent(env, ent, false);
135     }
136 }
137 
hppa_alloc_tlb_ent(CPUHPPAState * env)138 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
139 {
140     HPPATLBEntry *ent = env->tlb_unused;
141 
142     if (ent == NULL) {
143         uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
144         uint32_t i = env->tlb_last;
145 
146         if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
147             i = btlb_entries;
148         }
149         env->tlb_last = i + 1;
150 
151         ent = &env->tlb[i];
152         hppa_flush_tlb_ent(env, ent, false);
153     }
154 
155     env->tlb_unused = ent->unused_next;
156     return ent;
157 }
158 
159 #define ACCESS_ID_MASK 0xffff
160 
161 /* Return the set of protections allowed by a PID match. */
match_prot_id_1(uint32_t access_id,uint32_t prot_id)162 static int match_prot_id_1(uint32_t access_id, uint32_t prot_id)
163 {
164     if (((access_id ^ (prot_id >> 1)) & ACCESS_ID_MASK) == 0) {
165         return (prot_id & 1
166                 ? PAGE_EXEC | PAGE_READ
167                 : PAGE_EXEC | PAGE_READ | PAGE_WRITE);
168     }
169     return 0;
170 }
171 
match_prot_id32(CPUHPPAState * env,uint32_t access_id)172 static int match_prot_id32(CPUHPPAState *env, uint32_t access_id)
173 {
174     int r, i;
175 
176     for (i = CR_PID1; i <= CR_PID4; ++i) {
177         r = match_prot_id_1(access_id, env->cr[i]);
178         if (r) {
179             return r;
180         }
181     }
182     return 0;
183 }
184 
match_prot_id64(CPUHPPAState * env,uint32_t access_id)185 static int match_prot_id64(CPUHPPAState *env, uint32_t access_id)
186 {
187     int r, i;
188 
189     for (i = CR_PID1; i <= CR_PID4; ++i) {
190         r = match_prot_id_1(access_id, env->cr[i]);
191         if (r) {
192             return r;
193         }
194         r = match_prot_id_1(access_id, env->cr[i] >> 32);
195         if (r) {
196             return r;
197         }
198     }
199     return 0;
200 }
201 
hppa_get_physical_address(CPUHPPAState * env,vaddr addr,int mmu_idx,int type,MemOp mop,hwaddr * pphys,int * pprot)202 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
203                               int type, MemOp mop, hwaddr *pphys, int *pprot)
204 {
205     hwaddr phys;
206     int prot, r_prot, w_prot, x_prot, priv;
207     HPPATLBEntry *ent;
208     int ret = -1;
209 
210     /* Virtual translation disabled.  Map absolute to physical.  */
211     if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
212         switch (mmu_idx) {
213         case MMU_ABS_W_IDX:
214             phys = hppa_abs_to_phys_pa2_w1(addr);
215             break;
216         case MMU_ABS_IDX:
217             if (hppa_is_pa20(env)) {
218                 phys = hppa_abs_to_phys_pa2_w0(addr);
219             } else {
220                 phys = (uint32_t)addr;
221             }
222             break;
223         default:
224             g_assert_not_reached();
225         }
226         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
227         goto egress_align;
228     }
229 
230     /* Find a valid tlb entry that matches the virtual address.  */
231     ent = hppa_find_tlb(env, addr);
232     if (ent == NULL) {
233         phys = 0;
234         prot = 0;
235         ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
236         goto egress;
237     }
238 
239     /* We now know the physical address.  */
240     phys = ent->pa + (addr - ent->itree.start);
241 
242     /* Map TLB access_rights field to QEMU protection.  */
243     priv = MMU_IDX_TO_PRIV(mmu_idx);
244     r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
245     w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
246     x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
247     switch (ent->ar_type) {
248     case 0: /* read-only: data page */
249         prot = r_prot;
250         break;
251     case 1: /* read/write: dynamic data page */
252         prot = r_prot | w_prot;
253         break;
254     case 2: /* read/execute: normal code page */
255         prot = r_prot | x_prot;
256         break;
257     case 3: /* read/write/execute: dynamic code page */
258         prot = r_prot | w_prot | x_prot;
259         break;
260     default: /* execute: promote to privilege level type & 3 */
261         prot = x_prot;
262         break;
263     }
264 
265     /*
266      * No guest access type indicates a non-architectural access from
267      * within QEMU.  Bypass checks for access, D, B, P and T bits.
268      */
269     if (type == 0) {
270         goto egress;
271     }
272 
273     if (unlikely(!(prot & type))) {
274         /* Not allowed -- Inst/Data Memory Access Rights Fault. */
275         ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
276         goto egress;
277     }
278 
279     /* access_id == 0 means public page and no check is performed */
280     if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
281         int access_prot = (hppa_is_pa20(env)
282                            ? match_prot_id64(env, ent->access_id)
283                            : match_prot_id32(env, ent->access_id));
284         if (unlikely(!(type & access_prot))) {
285             /* Not allowed -- Inst/Data Memory Protection Id Fault. */
286             ret = type & PAGE_EXEC ? EXCP_IMP : EXCP_DMPI;
287             goto egress;
288         }
289         /* Otherwise exclude permissions not allowed (i.e WD). */
290         prot &= access_prot;
291     }
292 
293     /*
294      * In reverse priority order, check for conditions which raise faults.
295      * Remove PROT bits that cover the condition we want to check,
296      * so that the resulting PROT will force a re-check of the
297      * architectural TLB entry for the next access.
298      */
299     if (unlikely(ent->t)) {
300         prot &= PAGE_EXEC;
301         if (!(type & PAGE_EXEC)) {
302             /* The T bit is set -- Page Reference Fault.  */
303             ret = EXCP_PAGE_REF;
304         }
305     }
306     if (unlikely(!ent->d)) {
307         prot &= PAGE_READ | PAGE_EXEC;
308         if (type & PAGE_WRITE) {
309             /* The D bit is not set -- TLB Dirty Bit Fault.  */
310             ret = EXCP_TLB_DIRTY;
311         }
312     }
313     if (unlikely(ent->b)) {
314         prot &= PAGE_READ | PAGE_EXEC;
315         if (type & PAGE_WRITE) {
316             /*
317              * The B bit is set -- Data Memory Break Fault.
318              * Except when PSW_X is set, allow this single access to succeed.
319              * The write bit will be invalidated for subsequent accesses.
320              */
321             if (env->psw_xb & PSW_X) {
322                 prot |= PAGE_WRITE_INV;
323             } else {
324                 ret = EXCP_DMB;
325             }
326         }
327     }
328 
329  egress_align:
330     if (addr & ((1u << memop_alignment_bits(mop)) - 1)) {
331         ret = EXCP_UNALIGN;
332     }
333 
334  egress:
335     *pphys = phys;
336     *pprot = prot;
337     trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
338     return ret;
339 }
340 
hppa_cpu_get_phys_page_debug(CPUState * cs,vaddr addr)341 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
342 {
343     HPPACPU *cpu = HPPA_CPU(cs);
344     hwaddr phys;
345     int prot, excp, mmu_idx;
346 
347     /* If the (data) mmu is disabled, bypass translation.  */
348     /* ??? We really ought to know if the code mmu is disabled too,
349        in order to get the correct debugging dumps.  */
350     mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
351                cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
352 
353     excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, 0,
354                                      &phys, &prot);
355 
356     /* Since we're translating for debugging, the only error that is a
357        hard error is no translation at all.  Otherwise, while a real cpu
358        access might not have permission, the debugger does.  */
359     return excp == EXCP_DTLB_MISS ? -1 : phys;
360 }
361 
hppa_set_ior_and_isr(CPUHPPAState * env,vaddr addr,bool mmu_disabled)362 void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled)
363 {
364     if (env->psw & PSW_Q) {
365         /*
366          * For pa1.x, the offset and space never overlap, and so we
367          * simply extract the high and low part of the virtual address.
368          *
369          * For pa2.0, the formation of these are described in section
370          * "Interruption Parameter Registers", page 2-15.
371          */
372         env->cr[CR_IOR] = (uint32_t)addr;
373         env->cr[CR_ISR] = addr >> 32;
374 
375         if (hppa_is_pa20(env)) {
376             if (mmu_disabled) {
377                 /*
378                  * If data translation was disabled, the ISR contains
379                  * the upper portion of the abs address, zero-extended.
380                  */
381                 env->cr[CR_ISR] &= 0x3fffffff;
382             } else {
383                 /*
384                  * If data translation was enabled, the upper two bits
385                  * of the IOR (the b field) are equal to the two space
386                  * bits from the base register used to form the gva.
387                  */
388                 uint64_t b;
389 
390                 b = env->unwind_breg ? env->gr[env->unwind_breg] : 0;
391                 b >>= (env->psw & PSW_W ? 62 : 30);
392                 env->cr[CR_IOR] |= b << 62;
393             }
394         }
395     }
396 }
397 
398 G_NORETURN static void
raise_exception_with_ior(CPUHPPAState * env,int excp,uintptr_t retaddr,vaddr addr,bool mmu_disabled)399 raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
400                          vaddr addr, bool mmu_disabled)
401 {
402     CPUState *cs = env_cpu(env);
403 
404     cs->exception_index = excp;
405     cpu_restore_state(cs, retaddr);
406     hppa_set_ior_and_isr(env, addr, mmu_disabled);
407 
408     cpu_loop_exit(cs);
409 }
410 
hppa_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)411 void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
412                                      vaddr addr, unsigned size,
413                                      MMUAccessType access_type,
414                                      int mmu_idx, MemTxAttrs attrs,
415                                      MemTxResult response, uintptr_t retaddr)
416 {
417     CPUHPPAState *env = cpu_env(cs);
418 
419     qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx
420                 " while accessing I/O at %#08" HWADDR_PRIx "\n",
421                 env->iasq_f, env->iaoq_f, physaddr);
422 
423     /* FIXME: Enable HPMC exceptions when firmware has clean device probing */
424     if (0) {
425         raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr,
426                                  MMU_IDX_MMU_DISABLED(mmu_idx));
427     }
428 }
429 
hppa_cpu_tlb_fill_align(CPUState * cs,CPUTLBEntryFull * out,vaddr addr,MMUAccessType type,int mmu_idx,MemOp memop,int size,bool probe,uintptr_t ra)430 bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
431                              MMUAccessType type, int mmu_idx,
432                              MemOp memop, int size, bool probe, uintptr_t ra)
433 {
434     CPUHPPAState *env = cpu_env(cs);
435     int prot, excp, a_prot;
436     hwaddr phys;
437 
438     switch (type) {
439     case MMU_INST_FETCH:
440         a_prot = PAGE_EXEC;
441         break;
442     case MMU_DATA_STORE:
443         a_prot = PAGE_WRITE;
444         break;
445     default:
446         a_prot = PAGE_READ;
447         break;
448     }
449 
450     excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, memop,
451                                      &phys, &prot);
452     if (unlikely(excp >= 0)) {
453         if (probe) {
454             return false;
455         }
456         trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
457 
458         /* Failure.  Raise the indicated exception.  */
459         raise_exception_with_ior(env, excp, ra, addr,
460                                  MMU_IDX_MMU_DISABLED(mmu_idx));
461     }
462 
463     trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
464                                 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
465 
466     /*
467      * Success!  Store the translation into the QEMU TLB.
468      * Note that we always install a single-page entry, because that
469      * is what works best with softmmu -- anything else will trigger
470      * the large page protection mask.  We do not require this,
471      * because we record the large page here in the hppa tlb.
472      */
473     memset(out, 0, sizeof(*out));
474     out->phys_addr = phys;
475     out->prot = prot;
476     out->attrs = MEMTXATTRS_UNSPECIFIED;
477     out->lg_page_size = TARGET_PAGE_BITS;
478 
479     return true;
480 }
481 
482 /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
HELPER(itlba_pa11)483 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
484 {
485     HPPATLBEntry *ent;
486 
487     /* Zap any old entries covering ADDR. */
488     addr &= TARGET_PAGE_MASK;
489     hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
490 
491     ent = env->tlb_partial;
492     if (ent == NULL) {
493         ent = hppa_alloc_tlb_ent(env);
494         env->tlb_partial = ent;
495     }
496 
497     /* Note that ent->entry_valid == 0 already.  */
498     ent->itree.start = addr;
499     ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
500     ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
501     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
502 }
503 
set_access_bits_pa11(CPUHPPAState * env,HPPATLBEntry * ent,target_ulong reg)504 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
505                                  target_ulong reg)
506 {
507     ent->access_id = extract32(reg, 1, 18);
508     ent->u = extract32(reg, 19, 1);
509     ent->ar_pl2 = extract32(reg, 20, 2);
510     ent->ar_pl1 = extract32(reg, 22, 2);
511     ent->ar_type = extract32(reg, 24, 3);
512     ent->b = extract32(reg, 27, 1);
513     ent->d = extract32(reg, 28, 1);
514     ent->t = extract32(reg, 29, 1);
515     ent->entry_valid = 1;
516 
517     interval_tree_insert(&ent->itree, &env->tlb_root);
518     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
519                          ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
520 }
521 
522 /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
HELPER(itlbp_pa11)523 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
524 {
525     HPPATLBEntry *ent = env->tlb_partial;
526 
527     if (ent) {
528         env->tlb_partial = NULL;
529         if (ent->itree.start <= addr && addr <= ent->itree.last) {
530             set_access_bits_pa11(env, ent, reg);
531             return;
532         }
533     }
534     qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
535 }
536 
itlbt_pa20(CPUHPPAState * env,target_ulong r1,target_ulong r2,vaddr va_b)537 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
538                        target_ulong r2, vaddr va_b)
539 {
540     HPPATLBEntry *ent;
541     vaddr va_e;
542     uint64_t va_size;
543     int mask_shift;
544 
545     mask_shift = 2 * (r1 & 0xf);
546     va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
547     va_b &= -va_size;
548     va_e = va_b + va_size - 1;
549 
550     hppa_flush_tlb_range(env, va_b, va_e);
551     ent = hppa_alloc_tlb_ent(env);
552 
553     ent->itree.start = va_b;
554     ent->itree.last = va_e;
555 
556     /* Extract all 52 bits present in the page table entry. */
557     ent->pa = r1 << (TARGET_PAGE_BITS - 5);
558     /* Align per the page size. */
559     ent->pa &= TARGET_PAGE_MASK << mask_shift;
560     /* Ignore the bits beyond physical address space. */
561     ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
562 
563     ent->t = extract64(r2, 61, 1);
564     ent->d = extract64(r2, 60, 1);
565     ent->b = extract64(r2, 59, 1);
566     ent->ar_type = extract64(r2, 56, 3);
567     ent->ar_pl1 = extract64(r2, 54, 2);
568     ent->ar_pl2 = extract64(r2, 52, 2);
569     ent->u = extract64(r2, 51, 1);
570     /* o = bit 50 */
571     /* p = bit 49 */
572     ent->access_id = extract64(r2, 1, 31);
573     ent->entry_valid = 1;
574 
575     interval_tree_insert(&ent->itree, &env->tlb_root);
576     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
577     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
578                          ent->ar_pl2, ent->ar_pl1, ent->ar_type,
579                          ent->b, ent->d, ent->t);
580 }
581 
HELPER(idtlbt_pa20)582 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
583 {
584     vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
585     itlbt_pa20(env, r1, r2, va_b);
586 }
587 
HELPER(iitlbt_pa20)588 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
589 {
590     vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
591     itlbt_pa20(env, r1, r2, va_b);
592 }
593 
594 /* Purge (Insn/Data) TLB. */
ptlb_work(CPUState * cpu,run_on_cpu_data data)595 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
596 {
597     vaddr start = data.target_ptr;
598     vaddr end;
599 
600     /*
601      * PA2.0 allows a range of pages encoded into GR[b], which we have
602      * copied into the bottom bits of the otherwise page-aligned address.
603      * PA1.x will always provide zero here, for a single page flush.
604      */
605     end = start & 0xf;
606     start &= TARGET_PAGE_MASK;
607     end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
608     end = start + end - 1;
609 
610     hppa_flush_tlb_range(cpu_env(cpu), start, end);
611 }
612 
613 /* This is local to the current cpu. */
HELPER(ptlb_l)614 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
615 {
616     trace_hppa_tlb_ptlb_local(env);
617     ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
618 }
619 
620 /* This is synchronous across all processors.  */
HELPER(ptlb)621 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
622 {
623     CPUState *src = env_cpu(env);
624     CPUState *cpu;
625     bool wait = false;
626 
627     trace_hppa_tlb_ptlb(env);
628     run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
629 
630     CPU_FOREACH(cpu) {
631         if (cpu != src) {
632             async_run_on_cpu(cpu, ptlb_work, data);
633             wait = true;
634         }
635     }
636     if (wait) {
637         async_safe_run_on_cpu(src, ptlb_work, data);
638     } else {
639         ptlb_work(src, data);
640     }
641 }
642 
hppa_ptlbe(CPUHPPAState * env)643 void hppa_ptlbe(CPUHPPAState *env)
644 {
645     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
646     uint32_t i;
647 
648     /* Zap the (non-btlb) tlb entries themselves. */
649     memset(&env->tlb[btlb_entries], 0,
650            sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
651     env->tlb_last = btlb_entries;
652     env->tlb_partial = NULL;
653 
654     /* Put them all onto the unused list. */
655     env->tlb_unused = &env->tlb[btlb_entries];
656     for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
657         env->tlb[i].unused_next = &env->tlb[i + 1];
658     }
659 
660     /* Re-initialize the interval tree with only the btlb entries. */
661     memset(&env->tlb_root, 0, sizeof(env->tlb_root));
662     for (i = 0; i < btlb_entries; ++i) {
663         if (env->tlb[i].entry_valid) {
664             interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
665         }
666     }
667 
668     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
669 }
670 
671 /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
672    number of pages/entries (we choose all), and is local to the cpu.  */
HELPER(ptlbe)673 void HELPER(ptlbe)(CPUHPPAState *env)
674 {
675     trace_hppa_tlb_ptlbe(env);
676     qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
677     hppa_ptlbe(env);
678 }
679 
cpu_hppa_change_prot_id(CPUHPPAState * env)680 void cpu_hppa_change_prot_id(CPUHPPAState *env)
681 {
682     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
683 }
684 
HELPER(change_prot_id)685 void HELPER(change_prot_id)(CPUHPPAState *env)
686 {
687     cpu_hppa_change_prot_id(env);
688 }
689 
HELPER(lpa)690 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
691 {
692     hwaddr phys;
693     int prot, excp;
694 
695     excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, 0,
696                                      &phys, &prot);
697     if (excp >= 0) {
698         if (excp == EXCP_DTLB_MISS) {
699             excp = EXCP_NA_DTLB_MISS;
700         }
701         trace_hppa_tlb_lpa_failed(env, addr);
702         raise_exception_with_ior(env, excp, GETPC(), addr, false);
703     }
704     trace_hppa_tlb_lpa_success(env, addr, phys);
705     return phys;
706 }
707 
708 /*
709  * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
710  * allow operating systems to modify the Block TLB (BTLB) entries.
711  * For implementation details see page 1-13 in
712  * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
713  */
HELPER(diag_btlb)714 void HELPER(diag_btlb)(CPUHPPAState *env)
715 {
716     unsigned int phys_page, len, slot;
717     int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
718     uintptr_t ra = GETPC();
719     HPPATLBEntry *btlb;
720     uint64_t virt_page;
721     uint32_t *vaddr;
722     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
723 
724     /* BTLBs are not supported on 64-bit CPUs */
725     if (btlb_entries == 0) {
726         env->gr[28] = -1; /* nonexistent procedure */
727         return;
728     }
729 
730     env->gr[28] = 0; /* PDC_OK */
731 
732     switch (env->gr[25]) {
733     case 0:
734         /* return BTLB parameters */
735         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
736         vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t),
737                              MMU_DATA_STORE, mmu_idx, ra);
738         if (vaddr == NULL) {
739             env->gr[28] = -10; /* invalid argument */
740         } else {
741             vaddr[0] = cpu_to_be32(1);
742             vaddr[1] = cpu_to_be32(16 * 1024);
743             vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
744             vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
745         }
746         break;
747     case 1:
748         /* insert BTLB entry */
749         virt_page = env->gr[24];        /* upper 32 bits */
750         virt_page <<= 32;
751         virt_page |= env->gr[23];       /* lower 32 bits */
752         phys_page = env->gr[22];
753         len = env->gr[21];
754         slot = env->gr[19];
755         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
756                     "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
757                     "into slot %d\n",
758                     (long long) virt_page << TARGET_PAGE_BITS,
759                     (long long) (virt_page + len) << TARGET_PAGE_BITS,
760                     (long long) virt_page, phys_page, len, slot);
761         if (slot < btlb_entries) {
762             btlb = &env->tlb[slot];
763 
764             /* Force flush of possibly existing BTLB entry. */
765             hppa_flush_tlb_ent(env, btlb, true);
766 
767             /* Create new BTLB entry */
768             btlb->itree.start = virt_page << TARGET_PAGE_BITS;
769             btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
770             btlb->pa = phys_page << TARGET_PAGE_BITS;
771             set_access_bits_pa11(env, btlb, env->gr[20]);
772             btlb->t = 0;
773             btlb->d = 1;
774         } else {
775             env->gr[28] = -10; /* invalid argument */
776         }
777         break;
778     case 2:
779         /* Purge BTLB entry */
780         slot = env->gr[22];
781         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
782                                     slot);
783         if (slot < btlb_entries) {
784             btlb = &env->tlb[slot];
785             hppa_flush_tlb_ent(env, btlb, true);
786         } else {
787             env->gr[28] = -10; /* invalid argument */
788         }
789         break;
790     case 3:
791         /* Purge all BTLB entries */
792         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
793         for (slot = 0; slot < btlb_entries; slot++) {
794             btlb = &env->tlb[slot];
795             hppa_flush_tlb_ent(env, btlb, true);
796         }
797         break;
798     default:
799         env->gr[28] = -2; /* nonexistent option */
800         break;
801     }
802 }
803 
HELPER(b_gate_priv)804 uint64_t HELPER(b_gate_priv)(CPUHPPAState *env, uint64_t iaoq_f)
805 {
806     uint64_t gva = hppa_form_gva(env, env->iasq_f, iaoq_f);
807     HPPATLBEntry *ent = hppa_find_tlb(env, gva);
808 
809     if (ent == NULL) {
810         raise_exception_with_ior(env, EXCP_ITLB_MISS, GETPC(), gva, false);
811     }
812 
813     /*
814      * There should be no need to check page permissions, as that will
815      * already have been done by tb_lookup via get_page_addr_code.
816      * All we need at this point is to check the ar_type.
817      *
818      * No change for non-gateway pages or for priv decrease.
819      */
820     if (ent->ar_type & 4) {
821         int old_priv = iaoq_f & 3;
822         int new_priv = ent->ar_type & 3;
823 
824         if (new_priv < old_priv) {
825             iaoq_f = (iaoq_f & -4) | new_priv;
826         }
827     }
828     return iaoq_f;
829 }
830 
HELPER(update_gva_offset_mask)831 void HELPER(update_gva_offset_mask)(CPUHPPAState *env)
832 {
833     update_gva_offset_mask(env);
834 }
835