Lines Matching +full:sparc +full:- +full:softmmu
21 #include "qemu/main-loop.h"
22 #include "qemu/target-info.h"
23 #include "accel/tcg/cpu-ops.h"
26 #include "exec/page-protection.h"
28 #include "accel/tcg/cpu-ldst-common.h"
29 #include "accel/tcg/cpu-mmu-index.h"
31 #include "exec/tb-flush.h"
33 #include "exec/mmu-access-type.h"
34 #include "exec/tlb-common.h"
37 #include "qemu/error-report.h"
39 #include "exec/helper-proto-common.h"
40 #include "exec/tlb-flags.h"
43 #include "tb-internal.h"
45 #include "tb-hash.h"
46 #include "tb-internal.h"
47 #include "tlb-bounds.h"
48 #include "internal-common.h"
50 #include "qemu/plugin-memory.h"
52 #include "tcg/tcg-ldst.h"
53 #include "backend-ldst.h"
83 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
95 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
99 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; in tlb_n_entries()
104 return fast->mask + (1 << CPU_TLB_ENTRY_BITS); in sizeof_tlb()
118 const uintptr_t *ptr = &entry->addr_idx[access_type]; in tlb_read_idx()
132 uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; in tlb_index()
141 return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)]; in tlb_entry()
147 desc->window_begin_ns = ns; in tlb_window_reset()
148 desc->window_max_entries = max_entries; in tlb_window_reset()
153 CPUJumpCache *jc = cpu->tb_jmp_cache; in tb_jmp_cache_clear_page()
162 qatomic_set(&jc->array[i0 + i].tb, NULL); in tb_jmp_cache_clear_page()
167 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
180 * In general, a memory-hungry process can benefit greatly from an appropriately
187 * To achieve near-optimal performance for all kinds of workloads, we:
191 * memory-hungry process will execute again, and its memory hungriness will
200 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
201 * since in that range performance is likely near-optimal. Recall that the TLB
214 bool window_expired = now > desc->window_begin_ns + window_len_ns; in tlb_mmu_resize_locked()
216 if (desc->n_used_entries > desc->window_max_entries) { in tlb_mmu_resize_locked()
217 desc->window_max_entries = desc->n_used_entries; in tlb_mmu_resize_locked()
219 rate = desc->window_max_entries * 100 / old_size; in tlb_mmu_resize_locked()
224 size_t ceil = pow2ceil(desc->window_max_entries); in tlb_mmu_resize_locked()
225 size_t expected_rate = desc->window_max_entries * 100 / ceil; in tlb_mmu_resize_locked()
234 * expect to get is 35%, which is still in the 30-70% range where in tlb_mmu_resize_locked()
245 tlb_window_reset(desc, now, desc->n_used_entries); in tlb_mmu_resize_locked()
250 g_free(fast->table); in tlb_mmu_resize_locked()
251 g_free(desc->fulltlb); in tlb_mmu_resize_locked()
254 /* desc->n_used_entries is cleared by the caller */ in tlb_mmu_resize_locked()
255 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; in tlb_mmu_resize_locked()
256 fast->table = g_try_new(CPUTLBEntry, new_size); in tlb_mmu_resize_locked()
257 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); in tlb_mmu_resize_locked()
266 while (fast->table == NULL || desc->fulltlb == NULL) { in tlb_mmu_resize_locked()
272 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; in tlb_mmu_resize_locked()
274 g_free(fast->table); in tlb_mmu_resize_locked()
275 g_free(desc->fulltlb); in tlb_mmu_resize_locked()
276 fast->table = g_try_new(CPUTLBEntry, new_size); in tlb_mmu_resize_locked()
277 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); in tlb_mmu_resize_locked()
283 desc->n_used_entries = 0; in tlb_mmu_flush_locked()
284 desc->large_page_addr = -1; in tlb_mmu_flush_locked()
285 desc->large_page_mask = -1; in tlb_mmu_flush_locked()
286 desc->vindex = 0; in tlb_mmu_flush_locked()
287 memset(fast->table, -1, sizeof_tlb(fast)); in tlb_mmu_flush_locked()
288 memset(desc->vtable, -1, sizeof(desc->vtable)); in tlb_mmu_flush_locked()
294 CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; in tlb_flush_one_mmuidx_locked()
295 CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; in tlb_flush_one_mmuidx_locked()
306 desc->n_used_entries = 0; in tlb_mmu_init()
307 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; in tlb_mmu_init()
308 fast->table = g_new(CPUTLBEntry, n_entries); in tlb_mmu_init()
309 desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); in tlb_mmu_init()
315 cpu->neg.tlb.d[mmu_idx].n_used_entries++; in tlb_n_used_entries_inc()
320 cpu->neg.tlb.d[mmu_idx].n_used_entries--; in tlb_n_used_entries_dec()
328 qemu_spin_init(&cpu->neg.tlb.c.lock); in tlb_init()
331 cpu->neg.tlb.c.dirty = 0; in tlb_init()
334 tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now); in tlb_init()
342 qemu_spin_destroy(&cpu->neg.tlb.c.lock); in tlb_destroy()
344 CPUTLBDesc *desc = &cpu->neg.tlb.d[i]; in tlb_destroy()
345 CPUTLBDescFast *fast = &cpu->neg.tlb.f[i]; in tlb_destroy()
347 g_free(fast->table); in tlb_destroy()
348 g_free(desc->fulltlb); in tlb_destroy()
381 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_flush_by_mmuidx_async_work()
383 all_dirty = cpu->neg.tlb.c.dirty; in tlb_flush_by_mmuidx_async_work()
386 cpu->neg.tlb.c.dirty = all_dirty; in tlb_flush_by_mmuidx_async_work()
388 for (work = to_clean; work != 0; work &= work - 1) { in tlb_flush_by_mmuidx_async_work()
393 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_flush_by_mmuidx_async_work()
398 qatomic_set(&cpu->neg.tlb.c.full_flush_count, in tlb_flush_by_mmuidx_async_work()
399 cpu->neg.tlb.c.full_flush_count + 1); in tlb_flush_by_mmuidx_async_work()
401 qatomic_set(&cpu->neg.tlb.c.part_flush_count, in tlb_flush_by_mmuidx_async_work()
402 cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean)); in tlb_flush_by_mmuidx_async_work()
404 qatomic_set(&cpu->neg.tlb.c.elide_flush_count, in tlb_flush_by_mmuidx_async_work()
405 cpu->neg.tlb.c.elide_flush_count + in tlb_flush_by_mmuidx_async_work()
446 return (page == (tlb_entry->addr_read & mask) || in tlb_hit_page_mask_anyprot()
448 page == (tlb_entry->addr_code & mask)); in tlb_hit_page_mask_anyprot()
453 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); in tlb_hit_page_anyprot()
457 * tlb_entry_is_empty - return true if the entry is not in use
462 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; in tlb_entry_is_empty()
471 memset(tlb_entry, -1, sizeof(*tlb_entry)); in tlb_flush_entry_mask_locked()
479 return tlb_flush_entry_mask_locked(tlb_entry, page, -1); in tlb_flush_entry_locked()
487 CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx]; in tlb_flush_vtlb_page_mask_locked()
492 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { in tlb_flush_vtlb_page_mask_locked()
501 tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1); in tlb_flush_vtlb_page_locked()
506 vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr; in tlb_flush_page_locked()
507 vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask; in tlb_flush_page_locked()
542 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_flush_page_by_mmuidx_async_0()
548 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_flush_page_by_mmuidx_async_0()
554 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); in tlb_flush_page_by_mmuidx_async_0()
598 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); in tlb_flush_page_by_mmuidx_async_2()
645 d->addr = addr; in tlb_flush_page_by_mmuidx_all_cpus_synced()
646 d->idxmap = idxmap; in tlb_flush_page_by_mmuidx_all_cpus_synced()
653 d->addr = addr; in tlb_flush_page_by_mmuidx_all_cpus_synced()
654 d->idxmap = idxmap; in tlb_flush_page_by_mmuidx_all_cpus_synced()
669 CPUTLBDesc *d = &cpu->neg.tlb.d[midx]; in tlb_flush_range_locked()
670 CPUTLBDescFast *f = &cpu->neg.tlb.f[midx]; in tlb_flush_range_locked()
683 if (mask < f->mask || len > f->mask) { in tlb_flush_range_locked()
696 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { in tlb_flush_range_locked()
699 midx, d->large_page_addr, d->large_page_mask); in tlb_flush_range_locked()
732 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_flush_range_by_mmuidx_async_0()
738 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_flush_range_by_mmuidx_async_0()
753 d.addr -= TARGET_PAGE_SIZE; in tlb_flush_range_by_mmuidx_async_0()
885 * te->addr_write with qatomic_set. We don't need to worry about this for
893 const uintptr_t addr = ent->addr_write; in tlb_reset_dirty_range_locked()
894 int flags = addr | full->slow_flags[MMU_DATA_STORE]; in tlb_reset_dirty_range_locked()
898 uintptr_t host = (addr & TARGET_PAGE_MASK) + ent->addend; in tlb_reset_dirty_range_locked()
899 if ((host - start) < length) { in tlb_reset_dirty_range_locked()
900 qatomic_set(&ent->addr_write, addr | TLB_NOTDIRTY); in tlb_reset_dirty_range_locked()
917 * thing actually updated is the target TLB entry ->addr_write flags.
923 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_reset_dirty()
925 CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; in tlb_reset_dirty()
926 CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; in tlb_reset_dirty()
931 tlb_reset_dirty_range_locked(&desc->fulltlb[i], &fast->table[i], in tlb_reset_dirty()
936 tlb_reset_dirty_range_locked(&desc->vfulltlb[i], &desc->vtable[i], in tlb_reset_dirty()
940 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_reset_dirty()
947 if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) { in tlb_set_dirty1_locked()
948 tlb_entry->addr_write = addr; in tlb_set_dirty1_locked()
961 qemu_spin_lock(&cpu->neg.tlb.c.lock); in tlb_set_dirty()
969 tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr); in tlb_set_dirty()
972 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in tlb_set_dirty()
980 vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr; in tlb_add_large_page()
981 vaddr lp_mask = ~(size - 1); in tlb_add_large_page()
983 if (lp_addr == (vaddr)-1) { in tlb_add_large_page()
990 lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask; in tlb_add_large_page()
995 cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask; in tlb_add_large_page()
996 cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask; in tlb_add_large_page()
1010 address = -1; in tlb_set_compare()
1013 ent->addr_idx[access_type] = address; in tlb_set_compare()
1014 full->slow_flags[access_type] = flags; in tlb_set_compare()
1022 * Called from TCG-generated code, which is under an RCU read-side
1028 CPUTLB *tlb = &cpu->neg.tlb; in tlb_set_page_full()
1029 CPUTLBDesc *desc = &tlb->d[mmu_idx]; in tlb_set_page_full()
1041 if (full->lg_page_size <= TARGET_PAGE_BITS) { in tlb_set_page_full()
1044 sz = (hwaddr)1 << full->lg_page_size; in tlb_set_page_full()
1048 paddr_page = full->phys_addr & TARGET_PAGE_MASK; in tlb_set_page_full()
1050 prot = full->prot; in tlb_set_page_full()
1051 asidx = cpu_asidx_from_attrs(cpu, full->attrs); in tlb_set_page_full()
1053 &xlat, &sz, full->attrs, &prot); in tlb_set_page_full()
1058 addr, full->phys_addr, prot, mmu_idx); in tlb_set_page_full()
1060 read_flags = full->tlb_fill_flags; in tlb_set_page_full()
1061 if (full->lg_page_size < TARGET_PAGE_BITS) { in tlb_set_page_full()
1066 is_ram = memory_region_is_ram(section->mr); in tlb_set_page_full()
1067 is_romd = memory_region_is_romd(section->mr); in tlb_set_page_full()
1071 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; in tlb_set_page_full()
1079 iotlb = memory_region_get_ram_addr(section->mr) + xlat; in tlb_set_page_full()
1086 if (section->readonly) { in tlb_set_page_full()
1119 qemu_spin_lock(&tlb->c.lock); in tlb_set_page_full()
1122 tlb->c.dirty |= 1 << mmu_idx; in tlb_set_page_full()
1132 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; in tlb_set_page_full()
1133 CPUTLBEntry *tv = &desc->vtable[vidx]; in tlb_set_page_full()
1137 desc->vfulltlb[vidx] = desc->fulltlb[index]; in tlb_set_page_full()
1146 * - a physical section number in the lower TARGET_PAGE_BITS in tlb_set_page_full()
1147 * - the offset within section->mr of the page base (I/O, ROMD) with the in tlb_set_page_full()
1151 * (non-page-aligned) vaddr of the eventual memory access to get in tlb_set_page_full()
1156 desc->fulltlb[index] = *full; in tlb_set_page_full()
1157 full = &desc->fulltlb[index]; in tlb_set_page_full()
1158 full->xlat_section = iotlb - addr_page; in tlb_set_page_full()
1159 full->phys_addr = paddr_page; in tlb_set_page_full()
1162 tn.addend = addend - addr_page; in tlb_set_page_full()
1184 qemu_spin_unlock(&tlb->c.lock); in tlb_set_page_full()
1243 const TCGCPUOps *ops = cpu->cc->tcg_ops; in tlb_fill_align()
1246 if (ops->tlb_fill_align) { in tlb_fill_align()
1247 if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx, in tlb_fill_align()
1254 if (addr & ((1u << memop_alignment_bits(memop)) - 1)) { in tlb_fill_align()
1255 ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra); in tlb_fill_align()
1257 if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) { in tlb_fill_align()
1269 cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, in cpu_unaligned_access()
1282 cpu->mem_io_pc = retaddr; in io_prepare()
1283 if (!cpu->neg.can_do_io) { in io_prepare()
1295 if (!cpu->ignore_memory_transaction_failures in io_failed()
1296 && cpu->cc->tcg_ops->do_transaction_failed) { in io_failed()
1297 hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK); in io_failed()
1299 cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, in io_failed()
1301 full->attrs, response, retaddr); in io_failed()
1314 CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx]; in victim_tlb_hit()
1319 CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index]; in victim_tlb_hit()
1321 qemu_spin_lock(&cpu->neg.tlb.c.lock); in victim_tlb_hit()
1325 qemu_spin_unlock(&cpu->neg.tlb.c.lock); in victim_tlb_hit()
1327 CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in victim_tlb_hit()
1328 CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx]; in victim_tlb_hit()
1340 ram_addr_t ram_addr = mem_vaddr + full->xlat_section; in notdirty_write()
1379 /* Non-faulting page table read failed. */ in probe_access_internal()
1400 *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in probe_access_internal()
1401 flags |= full->slow_flags[access_type]; in probe_access_internal()
1403 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ in probe_access_internal()
1411 *phost = (void *)((uintptr_t)addr + entry->addend); in probe_access_internal()
1465 g_assert(-(addr | TARGET_PAGE_MASK) >= size); in probe_access_flags()
1488 g_assert(-(addr | TARGET_PAGE_MASK) >= size); in probe_access()
1505 full->attrs, wp_access, retaddr); in probe_access()
1534 * Return -1 if we can't translate and execute from an entire page
1551 return -1; in get_page_addr_code_hostp()
1554 if (full->lg_page_size < TARGET_PAGE_BITS) { in get_page_addr_code_hostp()
1555 return -1; in get_page_addr_code_hostp()
1571 * in the softmmu lookup code (or helper). We don't handle re-fills or
1592 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in tlb_plugin_lookup()
1593 data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK); in tlb_plugin_lookup()
1598 iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK, in tlb_plugin_lookup()
1599 full->attrs); in tlb_plugin_lookup()
1600 data->is_io = true; in tlb_plugin_lookup()
1601 data->mr = section->mr; in tlb_plugin_lookup()
1603 data->is_io = false; in tlb_plugin_lookup()
1604 data->mr = NULL; in tlb_plugin_lookup()
1640 * tlb_fill_align will longjmp out. Return true if the softmmu tlb for
1646 vaddr addr = data->addr; in mmu_lookup1()
1659 memop, data->size, false, ra); in mmu_lookup1()
1667 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in mmu_lookup1()
1669 flags |= full->slow_flags[access_type]; in mmu_lookup1()
1685 if (unlikely(addr & ((1 << a_bits) - 1))) { in mmu_lookup1()
1690 data->full = full; in mmu_lookup1()
1691 data->flags = flags; in mmu_lookup1()
1693 data->haddr = (void *)((uintptr_t)addr + entry->addend); in mmu_lookup1()
1711 CPUTLBEntryFull *full = data->full; in mmu_watch_or_dirty()
1712 vaddr addr = data->addr; in mmu_watch_or_dirty()
1713 int flags = data->flags; in mmu_watch_or_dirty()
1714 int size = data->size; in mmu_watch_or_dirty()
1719 cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra); in mmu_watch_or_dirty()
1728 data->flags = flags; in mmu_watch_or_dirty()
1749 l->memop = get_memop(oi); in mmu_lookup()
1750 l->mmu_idx = get_mmuidx(oi); in mmu_lookup()
1752 tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); in mmu_lookup()
1754 l->page[0].addr = addr; in mmu_lookup()
1755 l->page[0].size = memop_size(l->memop); in mmu_lookup()
1756 l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; in mmu_lookup()
1757 l->page[1].size = 0; in mmu_lookup()
1758 crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; in mmu_lookup()
1761 mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra); in mmu_lookup()
1763 flags = l->page[0].flags; in mmu_lookup()
1765 mmu_watch_or_dirty(cpu, &l->page[0], type, ra); in mmu_lookup()
1768 l->memop ^= MO_BSWAP; in mmu_lookup()
1772 int size0 = l->page[1].addr - addr; in mmu_lookup()
1773 l->page[1].size = l->page[0].size - size0; in mmu_lookup()
1774 l->page[0].size = size0; in mmu_lookup()
1776 l->page[1].addr = cpu->cc->tcg_ops->pointer_wrap(cpu, l->mmu_idx, in mmu_lookup()
1777 l->page[1].addr, addr); in mmu_lookup()
1783 mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra); in mmu_lookup()
1784 if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) { in mmu_lookup()
1785 uintptr_t index = tlb_index(cpu, l->mmu_idx, addr); in mmu_lookup()
1786 l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index]; in mmu_lookup()
1789 flags = l->page[0].flags | l->page[1].flags; in mmu_lookup()
1791 mmu_watch_or_dirty(cpu, &l->page[0], type, ra); in mmu_lookup()
1792 mmu_watch_or_dirty(cpu, &l->page[1], type, ra); in mmu_lookup()
1796 * Since target/sparc is the only user of TLB_BSWAP, and all in mmu_lookup()
1797 * Sparc accesses are aligned, any treatment across two pages in mmu_lookup()
1825 retaddr -= GETPC_ADJ; in atomic_mmu_lookup()
1845 * Let the guest notice RMW on a write-only page. in atomic_mmu_lookup()
1848 * but addr_read will only be -1 if PAGE_READ was unset. in atomic_mmu_lookup()
1850 if (unlikely(tlbe->addr_read == -1)) { in atomic_mmu_lookup()
1862 if (!did_tlb_fill && (addr & ((1 << memop_alignment_bits(mop)) - 1))) { in atomic_mmu_lookup()
1867 if (unlikely(addr & (size - 1))) { in atomic_mmu_lookup()
1878 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; in atomic_mmu_lookup()
1879 tlb_addr |= tlbe->addr_read; in atomic_mmu_lookup()
1881 tlb_addr |= full->slow_flags[MMU_DATA_STORE]; in atomic_mmu_lookup()
1882 tlb_addr |= full->slow_flags[MMU_DATA_LOAD]; in atomic_mmu_lookup()
1884 /* Notice an IO access or a needs-MMU-lookup access */ in atomic_mmu_lookup()
1887 support this apart from stop-the-world. */ in atomic_mmu_lookup()
1891 hostaddr = (void *)((uintptr_t)addr + tlbe->addend); in atomic_mmu_lookup()
1900 if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) { in atomic_mmu_lookup()
1903 if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) { in atomic_mmu_lookup()
1907 full->attrs, wp_flags, retaddr); in atomic_mmu_lookup()
1925 * complication of ABI-specific return type promotion and always
1927 * tcg_target_long, except in the case of a 32-bit host and 64-bit
1944 * Load @size bytes from @addr, which is memory-mapped i/o.
1945 * The bytes are concatenated in big-endian order with @ret_be.
1964 this_mop, full->attrs); in int_ld_mmio_beN()
1975 size -= this_size; in int_ld_mmio_beN()
1992 attrs = full->attrs; in do_ld_mmio_beN()
1993 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); in do_ld_mmio_beN()
1994 mr = section->mr; in do_ld_mmio_beN()
2013 attrs = full->attrs; in do_ld16_mmio_beN()
2014 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); in do_ld16_mmio_beN()
2015 mr = section->mr; in do_ld16_mmio_beN()
2018 a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx, in do_ld16_mmio_beN()
2020 b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx, in do_ld16_mmio_beN()
2021 MMU_DATA_LOAD, ra, mr, mr_offset + size - 8); in do_ld16_mmio_beN()
2030 * Load @p->size bytes from @p->haddr, which is RAM.
2031 * The bytes to concatenated in big-endian order with @ret_be.
2035 uint8_t *haddr = p->haddr; in do_ld_bytes_beN()
2036 int i, size = p->size; in do_ld_bytes_beN()
2053 void *haddr = p->haddr; in do_ld_parts_beN()
2054 int size = p->size; in do_ld_parts_beN()
2087 size -= n; in do_ld_parts_beN()
2102 int o = p->addr & 3; in do_ld_whole_be4()
2103 uint32_t x = load_atomic4(p->haddr - o); in do_ld_whole_be4()
2107 x >>= (4 - p->size) * 8; in do_ld_whole_be4()
2108 return (ret_be << (p->size * 8)) | x; in do_ld_whole_be4()
2122 int o = p->addr & 7; in do_ld_whole_be8()
2123 uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o); in do_ld_whole_be8()
2127 x >>= (8 - p->size) * 8; in do_ld_whole_be8()
2128 return (ret_be << (p->size * 8)) | x; in do_ld_whole_be8()
2142 int o = p->addr & 15; in do_ld_whole_be16()
2143 Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o); in do_ld_whole_be16()
2144 int size = p->size; in do_ld_whole_be16()
2150 y = int128_urshift(y, (16 - size) * 8); in do_ld_whole_be16()
2166 if (unlikely(p->flags & TLB_MMIO)) { in do_ld_beN()
2167 return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size, in do_ld_beN()
2183 tmp = tmp ? tmp - 1 : 0; in do_ld_beN()
2186 ? p->size == half_size in do_ld_beN()
2187 : p->size >= half_size) { in do_ld_beN()
2188 if (!HAVE_al8_fast && p->size < 4) { in do_ld_beN()
2212 int size = p->size; in do_ld16_beN()
2216 if (unlikely(p->flags & TLB_MMIO)) { in do_ld16_beN()
2217 return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra); in do_ld16_beN()
2227 p->size = size - 8; in do_ld16_beN()
2229 p->haddr += size - 8; in do_ld16_beN()
2230 p->size = 8; in do_ld16_beN()
2246 p->size = size - 8; in do_ld16_beN()
2248 b = ldq_be_p(p->haddr + size - 8); in do_ld16_beN()
2261 if (unlikely(p->flags & TLB_MMIO)) { in do_ld_1()
2262 return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra); in do_ld_1()
2264 return *(uint8_t *)p->haddr; in do_ld_1()
2273 if (unlikely(p->flags & TLB_MMIO)) { in do_ld_2()
2274 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra); in do_ld_2()
2280 ret = load_atom_2(cpu, ra, p->haddr, memop); in do_ld_2()
2293 if (unlikely(p->flags & TLB_MMIO)) { in do_ld_4()
2294 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra); in do_ld_4()
2300 ret = load_atom_4(cpu, ra, p->haddr, memop); in do_ld_4()
2313 if (unlikely(p->flags & TLB_MMIO)) { in do_ld_8()
2314 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra); in do_ld_8()
2320 ret = load_atom_8(cpu, ra, p->haddr, memop); in do_ld_8()
2484 * Store @size bytes at @addr, which is memory-mapped i/o.
2485 * The bytes to store are extracted in little-endian order from @val_le;
2486 * return the bytes of @val_le beyond @p->size that have not been stored.
2504 this_mop, full->attrs); in int_st_mmio_leN()
2516 size -= this_size; in int_st_mmio_leN()
2533 attrs = full->attrs; in do_st_mmio_leN()
2534 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); in do_st_mmio_leN()
2535 mr = section->mr; in do_st_mmio_leN()
2553 attrs = full->attrs; in do_st16_mmio_leN()
2554 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); in do_st16_mmio_leN()
2555 mr = section->mr; in do_st16_mmio_leN()
2561 size - 8, mmu_idx, ra, mr, mr_offset + 8); in do_st16_mmio_leN()
2574 if (unlikely(p->flags & TLB_MMIO)) { in do_st_leN()
2575 return do_st_mmio_leN(cpu, p->full, val_le, p->addr, in do_st_leN()
2576 p->size, mmu_idx, ra); in do_st_leN()
2577 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st_leN()
2578 return val_le >> (p->size * 8); in do_st_leN()
2588 return store_parts_leN(p->haddr, p->size, val_le); in do_st_leN()
2593 tmp = tmp ? tmp - 1 : 0; in do_st_leN()
2596 ? p->size == half_size in do_st_leN()
2597 : p->size >= half_size) { in do_st_leN()
2598 if (!HAVE_al8_fast && p->size <= 4) { in do_st_leN()
2599 return store_whole_le4(p->haddr, p->size, val_le); in do_st_leN()
2601 return store_whole_le8(p->haddr, p->size, val_le); in do_st_leN()
2611 return store_bytes_leN(p->haddr, p->size, val_le); in do_st_leN()
2625 int size = p->size; in do_st16_leN()
2628 if (unlikely(p->flags & TLB_MMIO)) { in do_st16_leN()
2629 return do_st16_mmio_leN(cpu, p->full, val_le, p->addr, in do_st16_leN()
2631 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st16_leN()
2632 return int128_gethi(val_le) >> ((size - 8) * 8); in do_st16_leN()
2642 store_parts_leN(p->haddr, 8, int128_getlo(val_le)); in do_st16_leN()
2643 return store_parts_leN(p->haddr + 8, p->size - 8, in do_st16_leN()
2651 return store_whole_le16(p->haddr, p->size, val_le); in do_st16_leN()
2661 stq_le_p(p->haddr, int128_getlo(val_le)); in do_st16_leN()
2662 return store_bytes_leN(p->haddr + 8, p->size - 8, in do_st16_leN()
2673 if (unlikely(p->flags & TLB_MMIO)) { in do_st_1()
2674 do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra); in do_st_1()
2675 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st_1()
2678 *(uint8_t *)p->haddr = val; in do_st_1()
2685 if (unlikely(p->flags & TLB_MMIO)) { in do_st_2()
2689 do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra); in do_st_2()
2690 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st_2()
2697 store_atom_2(cpu, ra, p->haddr, memop, val); in do_st_2()
2704 if (unlikely(p->flags & TLB_MMIO)) { in do_st_4()
2708 do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra); in do_st_4()
2709 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st_4()
2716 store_atom_4(cpu, ra, p->haddr, memop, val); in do_st_4()
2723 if (unlikely(p->flags & TLB_MMIO)) { in do_st_8()
2727 do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra); in do_st_8()
2728 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { in do_st_8()
2735 store_atom_8(cpu, ra, p->haddr, memop, val); in do_st_8()
2947 /* To be used for strict 32-bit targets. */