xref: /qemu/system/physmem.c (revision 1a1562f5ea3da17d45d3829e35b5f49da9ec2db5)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard 
27055403b2SStefan Weil #include "qemu-common.h"
286180a181Sbellard #include "cpu.h"
29b67d9a52Sbellard #include "tcg.h"
30b3c7724cSpbrook #include "hw/hw.h"
31cc9e98cbSAlex Williamson #include "hw/qdev.h"
321de7afc9SPaolo Bonzini #include "qemu/osdep.h"
339c17d615SPaolo Bonzini #include "sysemu/kvm.h"
340d09e41aSPaolo Bonzini #include "hw/xen/xen.h"
351de7afc9SPaolo Bonzini #include "qemu/timer.h"
361de7afc9SPaolo Bonzini #include "qemu/config-file.h"
37022c62cbSPaolo Bonzini #include "exec/memory.h"
389c17d615SPaolo Bonzini #include "sysemu/dma.h"
39022c62cbSPaolo Bonzini #include "exec/address-spaces.h"
4053a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4153a5960aSpbrook #include <qemu.h>
42432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
439c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
446506e4f9SStefano Stabellini #include "trace.h"
4553a5960aSpbrook #endif
460d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
4754936004Sbellard 
48022c62cbSPaolo Bonzini #include "exec/cputlb.h"
495b6dd868SBlue Swirl #include "translate-all.h"
500cac1b66SBlue Swirl 
51022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
5267d95c15SAvi Kivity 
53db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
541196be37Sths 
5599773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
569fa3e853Sbellard int phys_ram_fd;
5774576198Saliguori static int in_migration;
5894a6b54fSpbrook 
59a3161038SPaolo Bonzini RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
6062152b8aSAvi Kivity 
6162152b8aSAvi Kivity static MemoryRegion *system_memory;
62309cb471SAvi Kivity static MemoryRegion *system_io;
6362152b8aSAvi Kivity 
64f6790af6SAvi Kivity AddressSpace address_space_io;
65f6790af6SAvi Kivity AddressSpace address_space_memory;
662673a5daSAvi Kivity 
670844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
68acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
690e0df1e2SAvi Kivity 
70e2eef170Spbrook #endif
719fa3e853Sbellard 
729349b4f9SAndreas Färber CPUArchState *first_cpu;
736a00d601Sbellard /* current CPU in the current thread. It is only valid inside
746a00d601Sbellard    cpu_exec() */
759349b4f9SAndreas Färber DEFINE_TLS(CPUArchState *,cpu_single_env);
762e70f6efSpbrook /* 0 = Do not count executed instructions.
77bf20dc07Sths    1 = Precise instruction counting.
782e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
795708fc66SPaolo Bonzini int use_icount;
806a00d601Sbellard 
81e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
824346ae3eSAvi Kivity 
831db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
841db8abb1SPaolo Bonzini 
851db8abb1SPaolo Bonzini struct PhysPageEntry {
861db8abb1SPaolo Bonzini     uint16_t is_leaf : 1;
871db8abb1SPaolo Bonzini      /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
881db8abb1SPaolo Bonzini     uint16_t ptr : 15;
891db8abb1SPaolo Bonzini };
901db8abb1SPaolo Bonzini 
911db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
921db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
931db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
941db8abb1SPaolo Bonzini      */
951db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
961db8abb1SPaolo Bonzini     MemoryListener listener;
97acc9d80bSJan Kiszka     AddressSpace *as;
981db8abb1SPaolo Bonzini };
991db8abb1SPaolo Bonzini 
10090260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
10190260c6cSJan Kiszka typedef struct subpage_t {
10290260c6cSJan Kiszka     MemoryRegion iomem;
103acc9d80bSJan Kiszka     AddressSpace *as;
10490260c6cSJan Kiszka     hwaddr base;
10590260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
10690260c6cSJan Kiszka } subpage_t;
10790260c6cSJan Kiszka 
1085312bd8bSAvi Kivity static MemoryRegionSection *phys_sections;
1095312bd8bSAvi Kivity static unsigned phys_sections_nb, phys_sections_nb_alloc;
1105312bd8bSAvi Kivity static uint16_t phys_section_unassigned;
111aa102231SAvi Kivity static uint16_t phys_section_notdirty;
112aa102231SAvi Kivity static uint16_t phys_section_rom;
113aa102231SAvi Kivity static uint16_t phys_section_watch;
1145312bd8bSAvi Kivity 
115d6f2ea22SAvi Kivity /* Simple allocator for PhysPageEntry nodes */
116d6f2ea22SAvi Kivity static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
117d6f2ea22SAvi Kivity static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
118d6f2ea22SAvi Kivity 
11907f07b31SAvi Kivity #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
120d6f2ea22SAvi Kivity 
121e2eef170Spbrook static void io_mem_init(void);
12262152b8aSAvi Kivity static void memory_map_init(void);
1238b9c99d9SBlue Swirl static void *qemu_safe_ram_ptr(ram_addr_t addr);
124e2eef170Spbrook 
1251ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
1266658ffb8Spbrook #endif
12754936004Sbellard 
1286d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
129d6f2ea22SAvi Kivity 
130f7bf5461SAvi Kivity static void phys_map_node_reserve(unsigned nodes)
131f7bf5461SAvi Kivity {
132f7bf5461SAvi Kivity     if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
133f7bf5461SAvi Kivity         typedef PhysPageEntry Node[L2_SIZE];
134f7bf5461SAvi Kivity         phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
135f7bf5461SAvi Kivity         phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
136f7bf5461SAvi Kivity                                       phys_map_nodes_nb + nodes);
137f7bf5461SAvi Kivity         phys_map_nodes = g_renew(Node, phys_map_nodes,
138f7bf5461SAvi Kivity                                  phys_map_nodes_nb_alloc);
139f7bf5461SAvi Kivity     }
140f7bf5461SAvi Kivity }
141f7bf5461SAvi Kivity 
142f7bf5461SAvi Kivity static uint16_t phys_map_node_alloc(void)
143d6f2ea22SAvi Kivity {
144d6f2ea22SAvi Kivity     unsigned i;
145d6f2ea22SAvi Kivity     uint16_t ret;
146d6f2ea22SAvi Kivity 
147f7bf5461SAvi Kivity     ret = phys_map_nodes_nb++;
148d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
149f7bf5461SAvi Kivity     assert(ret != phys_map_nodes_nb_alloc);
150d6f2ea22SAvi Kivity     for (i = 0; i < L2_SIZE; ++i) {
15107f07b31SAvi Kivity         phys_map_nodes[ret][i].is_leaf = 0;
152c19e8800SAvi Kivity         phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
153d6f2ea22SAvi Kivity     }
154f7bf5461SAvi Kivity     return ret;
155d6f2ea22SAvi Kivity }
156d6f2ea22SAvi Kivity 
157d6f2ea22SAvi Kivity static void phys_map_nodes_reset(void)
158d6f2ea22SAvi Kivity {
159d6f2ea22SAvi Kivity     phys_map_nodes_nb = 0;
160d6f2ea22SAvi Kivity }
161d6f2ea22SAvi Kivity 
162f7bf5461SAvi Kivity 
163a8170e5eSAvi Kivity static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
164a8170e5eSAvi Kivity                                 hwaddr *nb, uint16_t leaf,
1652999097bSAvi Kivity                                 int level)
16692e873b9Sbellard {
167f7bf5461SAvi Kivity     PhysPageEntry *p;
168f7bf5461SAvi Kivity     int i;
169a8170e5eSAvi Kivity     hwaddr step = (hwaddr)1 << (level * L2_BITS);
1705cd2c5b6SRichard Henderson 
17107f07b31SAvi Kivity     if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
172c19e8800SAvi Kivity         lp->ptr = phys_map_node_alloc();
173c19e8800SAvi Kivity         p = phys_map_nodes[lp->ptr];
174f7bf5461SAvi Kivity         if (level == 0) {
175f7bf5461SAvi Kivity             for (i = 0; i < L2_SIZE; i++) {
17607f07b31SAvi Kivity                 p[i].is_leaf = 1;
177c19e8800SAvi Kivity                 p[i].ptr = phys_section_unassigned;
17867c4d23cSpbrook             }
17992e873b9Sbellard         }
180d6f2ea22SAvi Kivity     } else {
181c19e8800SAvi Kivity         p = phys_map_nodes[lp->ptr];
1824346ae3eSAvi Kivity     }
1832999097bSAvi Kivity     lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
184f7bf5461SAvi Kivity 
1852999097bSAvi Kivity     while (*nb && lp < &p[L2_SIZE]) {
18607f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
18707f07b31SAvi Kivity             lp->is_leaf = true;
188c19e8800SAvi Kivity             lp->ptr = leaf;
18907f07b31SAvi Kivity             *index += step;
19007f07b31SAvi Kivity             *nb -= step;
191f7bf5461SAvi Kivity         } else {
1922999097bSAvi Kivity             phys_page_set_level(lp, index, nb, leaf, level - 1);
1932999097bSAvi Kivity         }
1942999097bSAvi Kivity         ++lp;
195f7bf5461SAvi Kivity     }
1964346ae3eSAvi Kivity }
1975cd2c5b6SRichard Henderson 
198ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
199a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2002999097bSAvi Kivity                           uint16_t leaf)
201f7bf5461SAvi Kivity {
2022999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
20307f07b31SAvi Kivity     phys_map_node_reserve(3 * P_L2_LEVELS);
204f7bf5461SAvi Kivity 
205ac1970fbSAvi Kivity     phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
20692e873b9Sbellard }
20792e873b9Sbellard 
208149f54b5SPaolo Bonzini static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
20992e873b9Sbellard {
210ac1970fbSAvi Kivity     PhysPageEntry lp = d->phys_map;
21131ab2b4aSAvi Kivity     PhysPageEntry *p;
21231ab2b4aSAvi Kivity     int i;
213f1f6e3b8SAvi Kivity 
21407f07b31SAvi Kivity     for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
215c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
216fd298934SPaolo Bonzini             return &phys_sections[phys_section_unassigned];
217f1f6e3b8SAvi Kivity         }
218c19e8800SAvi Kivity         p = phys_map_nodes[lp.ptr];
21931ab2b4aSAvi Kivity         lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
22031ab2b4aSAvi Kivity     }
221fd298934SPaolo Bonzini     return &phys_sections[lp.ptr];
222f3705d53SAvi Kivity }
223f3705d53SAvi Kivity 
224e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
225e5548617SBlue Swirl {
2262a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
227e5548617SBlue Swirl         && mr != &io_mem_watch;
228e5548617SBlue Swirl }
229149f54b5SPaolo Bonzini 
2309f029603SJan Kiszka static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
23190260c6cSJan Kiszka                                                         hwaddr addr,
23290260c6cSJan Kiszka                                                         bool resolve_subpage)
2339f029603SJan Kiszka {
23490260c6cSJan Kiszka     MemoryRegionSection *section;
23590260c6cSJan Kiszka     subpage_t *subpage;
23690260c6cSJan Kiszka 
23790260c6cSJan Kiszka     section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
23890260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
23990260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
24090260c6cSJan Kiszka         section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
24190260c6cSJan Kiszka     }
24290260c6cSJan Kiszka     return section;
2439f029603SJan Kiszka }
2449f029603SJan Kiszka 
24590260c6cSJan Kiszka static MemoryRegionSection *
24690260c6cSJan Kiszka address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
24790260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
248149f54b5SPaolo Bonzini {
249149f54b5SPaolo Bonzini     MemoryRegionSection *section;
250149f54b5SPaolo Bonzini     Int128 diff;
251149f54b5SPaolo Bonzini 
25290260c6cSJan Kiszka     section = address_space_lookup_region(as, addr, resolve_subpage);
253149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
254149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
255149f54b5SPaolo Bonzini 
256149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
257149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
258149f54b5SPaolo Bonzini 
259149f54b5SPaolo Bonzini     diff = int128_sub(section->mr->size, int128_make64(addr));
2603752a036SPeter Maydell     *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
261149f54b5SPaolo Bonzini     return section;
262149f54b5SPaolo Bonzini }
26390260c6cSJan Kiszka 
2645c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
26590260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
26690260c6cSJan Kiszka                                       bool is_write)
26790260c6cSJan Kiszka {
26830951157SAvi Kivity     IOMMUTLBEntry iotlb;
26930951157SAvi Kivity     MemoryRegionSection *section;
27030951157SAvi Kivity     MemoryRegion *mr;
27130951157SAvi Kivity     hwaddr len = *plen;
27230951157SAvi Kivity 
27330951157SAvi Kivity     for (;;) {
27430951157SAvi Kivity         section = address_space_translate_internal(as, addr, &addr, plen, true);
27530951157SAvi Kivity         mr = section->mr;
27630951157SAvi Kivity 
27730951157SAvi Kivity         if (!mr->iommu_ops) {
27830951157SAvi Kivity             break;
27930951157SAvi Kivity         }
28030951157SAvi Kivity 
28130951157SAvi Kivity         iotlb = mr->iommu_ops->translate(mr, addr);
28230951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
28330951157SAvi Kivity                 | (addr & iotlb.addr_mask));
28430951157SAvi Kivity         len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
28530951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
28630951157SAvi Kivity             mr = &io_mem_unassigned;
28730951157SAvi Kivity             break;
28830951157SAvi Kivity         }
28930951157SAvi Kivity 
29030951157SAvi Kivity         as = iotlb.target_as;
29130951157SAvi Kivity     }
29230951157SAvi Kivity 
29330951157SAvi Kivity     *plen = len;
29430951157SAvi Kivity     *xlat = addr;
29530951157SAvi Kivity     return mr;
29690260c6cSJan Kiszka }
29790260c6cSJan Kiszka 
29890260c6cSJan Kiszka MemoryRegionSection *
29990260c6cSJan Kiszka address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
30090260c6cSJan Kiszka                                   hwaddr *plen)
30190260c6cSJan Kiszka {
30230951157SAvi Kivity     MemoryRegionSection *section;
30330951157SAvi Kivity     section = address_space_translate_internal(as, addr, xlat, plen, false);
30430951157SAvi Kivity 
30530951157SAvi Kivity     assert(!section->mr->iommu_ops);
30630951157SAvi Kivity     return section;
30790260c6cSJan Kiszka }
3089fa3e853Sbellard #endif
309fd6ce8f6Sbellard 
310d5ab9713SJan Kiszka void cpu_exec_init_all(void)
311d5ab9713SJan Kiszka {
312d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY)
313b2a8658eSUmesh Deshpande     qemu_mutex_init(&ram_list.mutex);
314d5ab9713SJan Kiszka     memory_map_init();
315d5ab9713SJan Kiszka     io_mem_init();
316d5ab9713SJan Kiszka #endif
317d5ab9713SJan Kiszka }
318d5ab9713SJan Kiszka 
319b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
3209656f324Spbrook 
321e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
322e7f4eff7SJuan Quintela {
323259186a7SAndreas Färber     CPUState *cpu = opaque;
324e7f4eff7SJuan Quintela 
3253098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
3263098dba0Saurel32        version_id is increased. */
327259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
328259186a7SAndreas Färber     tlb_flush(cpu->env_ptr, 1);
3299656f324Spbrook 
3309656f324Spbrook     return 0;
3319656f324Spbrook }
332e7f4eff7SJuan Quintela 
3331a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
334e7f4eff7SJuan Quintela     .name = "cpu_common",
335e7f4eff7SJuan Quintela     .version_id = 1,
336e7f4eff7SJuan Quintela     .minimum_version_id = 1,
337e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
338e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
339e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
340259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
341259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
342e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
343e7f4eff7SJuan Quintela     }
344e7f4eff7SJuan Quintela };
3451a1562f5SAndreas Färber 
3469656f324Spbrook #endif
3479656f324Spbrook 
34838d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
349950f1472SGlauber Costa {
3509349b4f9SAndreas Färber     CPUArchState *env = first_cpu;
35138d8f5c8SAndreas Färber     CPUState *cpu = NULL;
352950f1472SGlauber Costa 
353950f1472SGlauber Costa     while (env) {
35455e5c285SAndreas Färber         cpu = ENV_GET_CPU(env);
35555e5c285SAndreas Färber         if (cpu->cpu_index == index) {
356950f1472SGlauber Costa             break;
35755e5c285SAndreas Färber         }
358950f1472SGlauber Costa         env = env->next_cpu;
359950f1472SGlauber Costa     }
360950f1472SGlauber Costa 
361d76fddaeSIgor Mammedov     return env ? cpu : NULL;
362950f1472SGlauber Costa }
363950f1472SGlauber Costa 
364d6b9e0d6SMichael S. Tsirkin void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
365d6b9e0d6SMichael S. Tsirkin {
366d6b9e0d6SMichael S. Tsirkin     CPUArchState *env = first_cpu;
367d6b9e0d6SMichael S. Tsirkin 
368d6b9e0d6SMichael S. Tsirkin     while (env) {
369d6b9e0d6SMichael S. Tsirkin         func(ENV_GET_CPU(env), data);
370d6b9e0d6SMichael S. Tsirkin         env = env->next_cpu;
371d6b9e0d6SMichael S. Tsirkin     }
372d6b9e0d6SMichael S. Tsirkin }
373d6b9e0d6SMichael S. Tsirkin 
3749349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env)
375fd6ce8f6Sbellard {
3769f09e18aSAndreas Färber     CPUState *cpu = ENV_GET_CPU(env);
377b170fce3SAndreas Färber     CPUClass *cc = CPU_GET_CLASS(cpu);
3789349b4f9SAndreas Färber     CPUArchState **penv;
3796a00d601Sbellard     int cpu_index;
3806a00d601Sbellard 
381c2764719Spbrook #if defined(CONFIG_USER_ONLY)
382c2764719Spbrook     cpu_list_lock();
383c2764719Spbrook #endif
3846a00d601Sbellard     env->next_cpu = NULL;
3856a00d601Sbellard     penv = &first_cpu;
3866a00d601Sbellard     cpu_index = 0;
3876a00d601Sbellard     while (*penv != NULL) {
3881e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
3896a00d601Sbellard         cpu_index++;
3906a00d601Sbellard     }
39155e5c285SAndreas Färber     cpu->cpu_index = cpu_index;
3921b1ed8dcSAndreas Färber     cpu->numa_node = 0;
39372cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
39472cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
395dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
3969f09e18aSAndreas Färber     cpu->thread_id = qemu_get_thread_id();
397dc7a09cfSJan Kiszka #endif
3986a00d601Sbellard     *penv = env;
399c2764719Spbrook #if defined(CONFIG_USER_ONLY)
400c2764719Spbrook     cpu_list_unlock();
401c2764719Spbrook #endif
402259186a7SAndreas Färber     vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
403b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
4040be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
405b3c7724cSpbrook                     cpu_save, cpu_load, env);
406b170fce3SAndreas Färber     assert(cc->vmsd == NULL);
407b3c7724cSpbrook #endif
408b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
409b170fce3SAndreas Färber         vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
410b170fce3SAndreas Färber     }
411fd6ce8f6Sbellard }
412fd6ce8f6Sbellard 
4131fddef4bSbellard #if defined(TARGET_HAS_ICE)
41494df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
4159349b4f9SAndreas Färber static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
41694df27fdSPaul Brook {
41794df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
41894df27fdSPaul Brook }
41994df27fdSPaul Brook #else
4201e7855a5SMax Filippov static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
4211e7855a5SMax Filippov {
4229d70c4b7SMax Filippov     tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
4239d70c4b7SMax Filippov             (pc & ~TARGET_PAGE_MASK));
4241e7855a5SMax Filippov }
425c27004ecSbellard #endif
42694df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
427d720b93dSbellard 
428c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
4299349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
430c527ee8fSPaul Brook 
431c527ee8fSPaul Brook {
432c527ee8fSPaul Brook }
433c527ee8fSPaul Brook 
4349349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
435c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
436c527ee8fSPaul Brook {
437c527ee8fSPaul Brook     return -ENOSYS;
438c527ee8fSPaul Brook }
439c527ee8fSPaul Brook #else
4406658ffb8Spbrook /* Add a watchpoint.  */
4419349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
442a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
4436658ffb8Spbrook {
444b4051334Saliguori     target_ulong len_mask = ~(len - 1);
445c0ce998eSaliguori     CPUWatchpoint *wp;
4466658ffb8Spbrook 
447b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
4480dc23828SMax Filippov     if ((len & (len - 1)) || (addr & ~len_mask) ||
4490dc23828SMax Filippov             len == 0 || len > TARGET_PAGE_SIZE) {
450b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
451b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
452b4051334Saliguori         return -EINVAL;
453b4051334Saliguori     }
4547267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
4556658ffb8Spbrook 
456a1d1bb31Saliguori     wp->vaddr = addr;
457b4051334Saliguori     wp->len_mask = len_mask;
458a1d1bb31Saliguori     wp->flags = flags;
459a1d1bb31Saliguori 
4602dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
461c0ce998eSaliguori     if (flags & BP_GDB)
46272cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
463c0ce998eSaliguori     else
46472cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
465a1d1bb31Saliguori 
4666658ffb8Spbrook     tlb_flush_page(env, addr);
467a1d1bb31Saliguori 
468a1d1bb31Saliguori     if (watchpoint)
469a1d1bb31Saliguori         *watchpoint = wp;
470a1d1bb31Saliguori     return 0;
4716658ffb8Spbrook }
4726658ffb8Spbrook 
473a1d1bb31Saliguori /* Remove a specific watchpoint.  */
4749349b4f9SAndreas Färber int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
475a1d1bb31Saliguori                           int flags)
4766658ffb8Spbrook {
477b4051334Saliguori     target_ulong len_mask = ~(len - 1);
478a1d1bb31Saliguori     CPUWatchpoint *wp;
4796658ffb8Spbrook 
48072cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
481b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
4826e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
483a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
4846658ffb8Spbrook             return 0;
4856658ffb8Spbrook         }
4866658ffb8Spbrook     }
487a1d1bb31Saliguori     return -ENOENT;
4886658ffb8Spbrook }
4896658ffb8Spbrook 
490a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
4919349b4f9SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
492a1d1bb31Saliguori {
49372cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
4947d03f82fSedgar_igl 
495a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
496a1d1bb31Saliguori 
4977267c094SAnthony Liguori     g_free(watchpoint);
4987d03f82fSedgar_igl }
4997d03f82fSedgar_igl 
500a1d1bb31Saliguori /* Remove all matching watchpoints.  */
5019349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
502a1d1bb31Saliguori {
503c0ce998eSaliguori     CPUWatchpoint *wp, *next;
504a1d1bb31Saliguori 
50572cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
506a1d1bb31Saliguori         if (wp->flags & mask)
507a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
508a1d1bb31Saliguori     }
509c0ce998eSaliguori }
510c527ee8fSPaul Brook #endif
511a1d1bb31Saliguori 
512a1d1bb31Saliguori /* Add a breakpoint.  */
5139349b4f9SAndreas Färber int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
514a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
5154c3a88a2Sbellard {
5161fddef4bSbellard #if defined(TARGET_HAS_ICE)
517c0ce998eSaliguori     CPUBreakpoint *bp;
5184c3a88a2Sbellard 
5197267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
5204c3a88a2Sbellard 
521a1d1bb31Saliguori     bp->pc = pc;
522a1d1bb31Saliguori     bp->flags = flags;
523a1d1bb31Saliguori 
5242dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
525c0ce998eSaliguori     if (flags & BP_GDB)
52672cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
527c0ce998eSaliguori     else
52872cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
529d720b93dSbellard 
530d720b93dSbellard     breakpoint_invalidate(env, pc);
531a1d1bb31Saliguori 
532a1d1bb31Saliguori     if (breakpoint)
533a1d1bb31Saliguori         *breakpoint = bp;
5344c3a88a2Sbellard     return 0;
5354c3a88a2Sbellard #else
536a1d1bb31Saliguori     return -ENOSYS;
5374c3a88a2Sbellard #endif
5384c3a88a2Sbellard }
5394c3a88a2Sbellard 
540a1d1bb31Saliguori /* Remove a specific breakpoint.  */
5419349b4f9SAndreas Färber int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
542a1d1bb31Saliguori {
5437d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
544a1d1bb31Saliguori     CPUBreakpoint *bp;
545a1d1bb31Saliguori 
54672cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
547a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
548a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
549a1d1bb31Saliguori             return 0;
5507d03f82fSedgar_igl         }
551a1d1bb31Saliguori     }
552a1d1bb31Saliguori     return -ENOENT;
553a1d1bb31Saliguori #else
554a1d1bb31Saliguori     return -ENOSYS;
5557d03f82fSedgar_igl #endif
5567d03f82fSedgar_igl }
5577d03f82fSedgar_igl 
558a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
5599349b4f9SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
5604c3a88a2Sbellard {
5611fddef4bSbellard #if defined(TARGET_HAS_ICE)
56272cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
563d720b93dSbellard 
564a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
565a1d1bb31Saliguori 
5667267c094SAnthony Liguori     g_free(breakpoint);
567a1d1bb31Saliguori #endif
568a1d1bb31Saliguori }
569a1d1bb31Saliguori 
570a1d1bb31Saliguori /* Remove all matching breakpoints. */
5719349b4f9SAndreas Färber void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
572a1d1bb31Saliguori {
573a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
574c0ce998eSaliguori     CPUBreakpoint *bp, *next;
575a1d1bb31Saliguori 
57672cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
577a1d1bb31Saliguori         if (bp->flags & mask)
578a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
579c0ce998eSaliguori     }
5804c3a88a2Sbellard #endif
5814c3a88a2Sbellard }
5824c3a88a2Sbellard 
583c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
584c33a346eSbellard    CPU loop after each instruction */
5859349b4f9SAndreas Färber void cpu_single_step(CPUArchState *env, int enabled)
586c33a346eSbellard {
5871fddef4bSbellard #if defined(TARGET_HAS_ICE)
588c33a346eSbellard     if (env->singlestep_enabled != enabled) {
589c33a346eSbellard         env->singlestep_enabled = enabled;
590e22a25c9Saliguori         if (kvm_enabled())
591e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
592e22a25c9Saliguori         else {
593ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
5949fa3e853Sbellard             /* XXX: only flush what is necessary */
5950124311eSbellard             tb_flush(env);
596c33a346eSbellard         }
597e22a25c9Saliguori     }
598c33a346eSbellard #endif
599c33a346eSbellard }
600c33a346eSbellard 
6019349b4f9SAndreas Färber void cpu_exit(CPUArchState *env)
6023098dba0Saurel32 {
603fcd7d003SAndreas Färber     CPUState *cpu = ENV_GET_CPU(env);
604fcd7d003SAndreas Färber 
605fcd7d003SAndreas Färber     cpu->exit_request = 1;
606378df4b2SPeter Maydell     cpu->tcg_exit_req = 1;
6073098dba0Saurel32 }
6083098dba0Saurel32 
6099349b4f9SAndreas Färber void cpu_abort(CPUArchState *env, const char *fmt, ...)
6107501267eSbellard {
6117501267eSbellard     va_list ap;
612493ae1f0Spbrook     va_list ap2;
6137501267eSbellard 
6147501267eSbellard     va_start(ap, fmt);
615493ae1f0Spbrook     va_copy(ap2, ap);
6167501267eSbellard     fprintf(stderr, "qemu: fatal: ");
6177501267eSbellard     vfprintf(stderr, fmt, ap);
6187501267eSbellard     fprintf(stderr, "\n");
6196fd2a026SPeter Maydell     cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
62093fcfe39Saliguori     if (qemu_log_enabled()) {
62193fcfe39Saliguori         qemu_log("qemu: fatal: ");
62293fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
62393fcfe39Saliguori         qemu_log("\n");
6246fd2a026SPeter Maydell         log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
62531b1a7b4Saliguori         qemu_log_flush();
62693fcfe39Saliguori         qemu_log_close();
627924edcaeSbalrog     }
628493ae1f0Spbrook     va_end(ap2);
629f9373291Sj_mayer     va_end(ap);
630fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
631fd052bf6SRiku Voipio     {
632fd052bf6SRiku Voipio         struct sigaction act;
633fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
634fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
635fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
636fd052bf6SRiku Voipio     }
637fd052bf6SRiku Voipio #endif
6387501267eSbellard     abort();
6397501267eSbellard }
6407501267eSbellard 
6419349b4f9SAndreas Färber CPUArchState *cpu_copy(CPUArchState *env)
642c5be9f08Sths {
6439349b4f9SAndreas Färber     CPUArchState *new_env = cpu_init(env->cpu_model_str);
6449349b4f9SAndreas Färber     CPUArchState *next_cpu = new_env->next_cpu;
6455a38f081Saliguori #if defined(TARGET_HAS_ICE)
6465a38f081Saliguori     CPUBreakpoint *bp;
6475a38f081Saliguori     CPUWatchpoint *wp;
6485a38f081Saliguori #endif
6495a38f081Saliguori 
6509349b4f9SAndreas Färber     memcpy(new_env, env, sizeof(CPUArchState));
6515a38f081Saliguori 
65255e5c285SAndreas Färber     /* Preserve chaining. */
653c5be9f08Sths     new_env->next_cpu = next_cpu;
6545a38f081Saliguori 
6555a38f081Saliguori     /* Clone all break/watchpoints.
6565a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
6575a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
65872cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
65972cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
6605a38f081Saliguori #if defined(TARGET_HAS_ICE)
66172cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
6625a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
6635a38f081Saliguori     }
66472cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
6655a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
6665a38f081Saliguori                               wp->flags, NULL);
6675a38f081Saliguori     }
6685a38f081Saliguori #endif
6695a38f081Saliguori 
670c5be9f08Sths     return new_env;
671c5be9f08Sths }
672c5be9f08Sths 
6730124311eSbellard #if !defined(CONFIG_USER_ONLY)
674d24981d3SJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
675d24981d3SJuan Quintela                                       uintptr_t length)
6761ccde1cbSbellard {
677d24981d3SJuan Quintela     uintptr_t start1;
678f23db169Sbellard 
6791ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
6801ccde1cbSbellard        when accessing the range */
6818efe0ca8SStefan Weil     start1 = (uintptr_t)qemu_safe_ram_ptr(start);
682a57d23e4SStefan Weil     /* Check that we don't span multiple blocks - this breaks the
6835579c7f3Spbrook        address comparisons below.  */
6848efe0ca8SStefan Weil     if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
6855579c7f3Spbrook             != (end - 1) - start) {
6865579c7f3Spbrook         abort();
6875579c7f3Spbrook     }
688e5548617SBlue Swirl     cpu_tlb_reset_dirty_all(start1, length);
689d24981d3SJuan Quintela 
690d24981d3SJuan Quintela }
691d24981d3SJuan Quintela 
692d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
693d24981d3SJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
694d24981d3SJuan Quintela                                      int dirty_flags)
695d24981d3SJuan Quintela {
696d24981d3SJuan Quintela     uintptr_t length;
697d24981d3SJuan Quintela 
698d24981d3SJuan Quintela     start &= TARGET_PAGE_MASK;
699d24981d3SJuan Quintela     end = TARGET_PAGE_ALIGN(end);
700d24981d3SJuan Quintela 
701d24981d3SJuan Quintela     length = end - start;
702d24981d3SJuan Quintela     if (length == 0)
703d24981d3SJuan Quintela         return;
704d24981d3SJuan Quintela     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
705d24981d3SJuan Quintela 
706d24981d3SJuan Quintela     if (tcg_enabled()) {
707d24981d3SJuan Quintela         tlb_reset_dirty_range_all(start, end, length);
708d24981d3SJuan Quintela     }
7091ccde1cbSbellard }
7101ccde1cbSbellard 
7118b9c99d9SBlue Swirl static int cpu_physical_memory_set_dirty_tracking(int enable)
71274576198Saliguori {
713f6f3fbcaSMichael S. Tsirkin     int ret = 0;
71474576198Saliguori     in_migration = enable;
715f6f3fbcaSMichael S. Tsirkin     return ret;
71674576198Saliguori }
71774576198Saliguori 
718a8170e5eSAvi Kivity hwaddr memory_region_section_get_iotlb(CPUArchState *env,
719e5548617SBlue Swirl                                        MemoryRegionSection *section,
720e5548617SBlue Swirl                                        target_ulong vaddr,
721149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
722e5548617SBlue Swirl                                        int prot,
723e5548617SBlue Swirl                                        target_ulong *address)
724e5548617SBlue Swirl {
725a8170e5eSAvi Kivity     hwaddr iotlb;
726e5548617SBlue Swirl     CPUWatchpoint *wp;
727e5548617SBlue Swirl 
728cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
729e5548617SBlue Swirl         /* Normal RAM.  */
730e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
731149f54b5SPaolo Bonzini             + xlat;
732e5548617SBlue Swirl         if (!section->readonly) {
733e5548617SBlue Swirl             iotlb |= phys_section_notdirty;
734e5548617SBlue Swirl         } else {
735e5548617SBlue Swirl             iotlb |= phys_section_rom;
736e5548617SBlue Swirl         }
737e5548617SBlue Swirl     } else {
738e5548617SBlue Swirl         iotlb = section - phys_sections;
739149f54b5SPaolo Bonzini         iotlb += xlat;
740e5548617SBlue Swirl     }
741e5548617SBlue Swirl 
742e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
743e5548617SBlue Swirl        watchpoint trap routines.  */
744e5548617SBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
745e5548617SBlue Swirl         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
746e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
747e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
748e5548617SBlue Swirl                 iotlb = phys_section_watch + paddr;
749e5548617SBlue Swirl                 *address |= TLB_MMIO;
750e5548617SBlue Swirl                 break;
751e5548617SBlue Swirl             }
752e5548617SBlue Swirl         }
753e5548617SBlue Swirl     }
754e5548617SBlue Swirl 
755e5548617SBlue Swirl     return iotlb;
756e5548617SBlue Swirl }
7579fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
75833417e70Sbellard 
759e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
7608da3ff18Spbrook 
761c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
7625312bd8bSAvi Kivity                              uint16_t section);
763acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
7645312bd8bSAvi Kivity static void destroy_page_desc(uint16_t section_index)
76554688b1eSAvi Kivity {
7665312bd8bSAvi Kivity     MemoryRegionSection *section = &phys_sections[section_index];
7675312bd8bSAvi Kivity     MemoryRegion *mr = section->mr;
76854688b1eSAvi Kivity 
76954688b1eSAvi Kivity     if (mr->subpage) {
77054688b1eSAvi Kivity         subpage_t *subpage = container_of(mr, subpage_t, iomem);
77154688b1eSAvi Kivity         memory_region_destroy(&subpage->iomem);
77254688b1eSAvi Kivity         g_free(subpage);
77354688b1eSAvi Kivity     }
77454688b1eSAvi Kivity }
77554688b1eSAvi Kivity 
7764346ae3eSAvi Kivity static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
77754688b1eSAvi Kivity {
77854688b1eSAvi Kivity     unsigned i;
779d6f2ea22SAvi Kivity     PhysPageEntry *p;
78054688b1eSAvi Kivity 
781c19e8800SAvi Kivity     if (lp->ptr == PHYS_MAP_NODE_NIL) {
78254688b1eSAvi Kivity         return;
78354688b1eSAvi Kivity     }
78454688b1eSAvi Kivity 
785c19e8800SAvi Kivity     p = phys_map_nodes[lp->ptr];
78654688b1eSAvi Kivity     for (i = 0; i < L2_SIZE; ++i) {
78707f07b31SAvi Kivity         if (!p[i].is_leaf) {
78854688b1eSAvi Kivity             destroy_l2_mapping(&p[i], level - 1);
7894346ae3eSAvi Kivity         } else {
790c19e8800SAvi Kivity             destroy_page_desc(p[i].ptr);
7914346ae3eSAvi Kivity         }
79254688b1eSAvi Kivity     }
79307f07b31SAvi Kivity     lp->is_leaf = 0;
794c19e8800SAvi Kivity     lp->ptr = PHYS_MAP_NODE_NIL;
79554688b1eSAvi Kivity }
79654688b1eSAvi Kivity 
797ac1970fbSAvi Kivity static void destroy_all_mappings(AddressSpaceDispatch *d)
79854688b1eSAvi Kivity {
799ac1970fbSAvi Kivity     destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
800d6f2ea22SAvi Kivity     phys_map_nodes_reset();
80154688b1eSAvi Kivity }
80254688b1eSAvi Kivity 
8035312bd8bSAvi Kivity static uint16_t phys_section_add(MemoryRegionSection *section)
8045312bd8bSAvi Kivity {
80568f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
80668f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
80768f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
80868f3f65bSPaolo Bonzini      */
80968f3f65bSPaolo Bonzini     assert(phys_sections_nb < TARGET_PAGE_SIZE);
81068f3f65bSPaolo Bonzini 
8115312bd8bSAvi Kivity     if (phys_sections_nb == phys_sections_nb_alloc) {
8125312bd8bSAvi Kivity         phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
8135312bd8bSAvi Kivity         phys_sections = g_renew(MemoryRegionSection, phys_sections,
8145312bd8bSAvi Kivity                                 phys_sections_nb_alloc);
8155312bd8bSAvi Kivity     }
8165312bd8bSAvi Kivity     phys_sections[phys_sections_nb] = *section;
8175312bd8bSAvi Kivity     return phys_sections_nb++;
8185312bd8bSAvi Kivity }
8195312bd8bSAvi Kivity 
8205312bd8bSAvi Kivity static void phys_sections_clear(void)
8215312bd8bSAvi Kivity {
8225312bd8bSAvi Kivity     phys_sections_nb = 0;
8235312bd8bSAvi Kivity }
8245312bd8bSAvi Kivity 
825ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
8260f0cb164SAvi Kivity {
8270f0cb164SAvi Kivity     subpage_t *subpage;
828a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
8290f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
830ac1970fbSAvi Kivity     MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
8310f0cb164SAvi Kivity     MemoryRegionSection subsection = {
8320f0cb164SAvi Kivity         .offset_within_address_space = base,
833052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
8340f0cb164SAvi Kivity     };
835a8170e5eSAvi Kivity     hwaddr start, end;
8360f0cb164SAvi Kivity 
837f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
8380f0cb164SAvi Kivity 
839f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
840acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
8410f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
842ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
8432999097bSAvi Kivity                       phys_section_add(&subsection));
8440f0cb164SAvi Kivity     } else {
845f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
8460f0cb164SAvi Kivity     }
8470f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
848052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
8490f0cb164SAvi Kivity     subpage_register(subpage, start, end, phys_section_add(section));
8500f0cb164SAvi Kivity }
8510f0cb164SAvi Kivity 
8520f0cb164SAvi Kivity 
853052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
854052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
85533417e70Sbellard {
856a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
8575312bd8bSAvi Kivity     uint16_t section_index = phys_section_add(section);
858052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
859052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
860dd81124bSAvi Kivity 
861733d5ef5SPaolo Bonzini     assert(num_pages);
862733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
86333417e70Sbellard }
86433417e70Sbellard 
865ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
8660f0cb164SAvi Kivity {
867ac1970fbSAvi Kivity     AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
86899b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
869052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
8700f0cb164SAvi Kivity 
871733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
872733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
873733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
874733d5ef5SPaolo Bonzini 
875052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
876ac1970fbSAvi Kivity         register_subpage(d, &now);
877733d5ef5SPaolo Bonzini     } else {
878052e87b0SPaolo Bonzini         now.size = int128_zero();
879733d5ef5SPaolo Bonzini     }
880052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
881052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
882052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
883052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
8840f0cb164SAvi Kivity         now = remain;
885052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
886733d5ef5SPaolo Bonzini             register_subpage(d, &now);
887733d5ef5SPaolo Bonzini         } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
888052e87b0SPaolo Bonzini             now.size = page_size;
889ac1970fbSAvi Kivity             register_subpage(d, &now);
89069b67646STyler Hall         } else {
891052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
892ac1970fbSAvi Kivity             register_multipage(d, &now);
89369b67646STyler Hall         }
8940f0cb164SAvi Kivity     }
8950f0cb164SAvi Kivity }
8960f0cb164SAvi Kivity 
89762a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
89862a2744cSSheng Yang {
89962a2744cSSheng Yang     if (kvm_enabled())
90062a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
90162a2744cSSheng Yang }
90262a2744cSSheng Yang 
903b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
904b2a8658eSUmesh Deshpande {
905b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
906b2a8658eSUmesh Deshpande }
907b2a8658eSUmesh Deshpande 
908b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
909b2a8658eSUmesh Deshpande {
910b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
911b2a8658eSUmesh Deshpande }
912b2a8658eSUmesh Deshpande 
913c902760fSMarcelo Tosatti #if defined(__linux__) && !defined(TARGET_S390X)
914c902760fSMarcelo Tosatti 
915c902760fSMarcelo Tosatti #include <sys/vfs.h>
916c902760fSMarcelo Tosatti 
917c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
918c902760fSMarcelo Tosatti 
919c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
920c902760fSMarcelo Tosatti {
921c902760fSMarcelo Tosatti     struct statfs fs;
922c902760fSMarcelo Tosatti     int ret;
923c902760fSMarcelo Tosatti 
924c902760fSMarcelo Tosatti     do {
925c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
926c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
927c902760fSMarcelo Tosatti 
928c902760fSMarcelo Tosatti     if (ret != 0) {
9296adc0549SMichael Tokarev         perror(path);
930c902760fSMarcelo Tosatti         return 0;
931c902760fSMarcelo Tosatti     }
932c902760fSMarcelo Tosatti 
933c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
934c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
935c902760fSMarcelo Tosatti 
936c902760fSMarcelo Tosatti     return fs.f_bsize;
937c902760fSMarcelo Tosatti }
938c902760fSMarcelo Tosatti 
93904b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
94004b16653SAlex Williamson                             ram_addr_t memory,
94104b16653SAlex Williamson                             const char *path)
942c902760fSMarcelo Tosatti {
943c902760fSMarcelo Tosatti     char *filename;
9448ca761f6SPeter Feiner     char *sanitized_name;
9458ca761f6SPeter Feiner     char *c;
946c902760fSMarcelo Tosatti     void *area;
947c902760fSMarcelo Tosatti     int fd;
948c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
949c902760fSMarcelo Tosatti     int flags;
950c902760fSMarcelo Tosatti #endif
951c902760fSMarcelo Tosatti     unsigned long hpagesize;
952c902760fSMarcelo Tosatti 
953c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
954c902760fSMarcelo Tosatti     if (!hpagesize) {
955c902760fSMarcelo Tosatti         return NULL;
956c902760fSMarcelo Tosatti     }
957c902760fSMarcelo Tosatti 
958c902760fSMarcelo Tosatti     if (memory < hpagesize) {
959c902760fSMarcelo Tosatti         return NULL;
960c902760fSMarcelo Tosatti     }
961c902760fSMarcelo Tosatti 
962c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
963c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
964c902760fSMarcelo Tosatti         return NULL;
965c902760fSMarcelo Tosatti     }
966c902760fSMarcelo Tosatti 
9678ca761f6SPeter Feiner     /* Make name safe to use with mkstemp by replacing '/' with '_'. */
9688ca761f6SPeter Feiner     sanitized_name = g_strdup(block->mr->name);
9698ca761f6SPeter Feiner     for (c = sanitized_name; *c != '\0'; c++) {
9708ca761f6SPeter Feiner         if (*c == '/')
9718ca761f6SPeter Feiner             *c = '_';
9728ca761f6SPeter Feiner     }
9738ca761f6SPeter Feiner 
9748ca761f6SPeter Feiner     filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
9758ca761f6SPeter Feiner                                sanitized_name);
9768ca761f6SPeter Feiner     g_free(sanitized_name);
977c902760fSMarcelo Tosatti 
978c902760fSMarcelo Tosatti     fd = mkstemp(filename);
979c902760fSMarcelo Tosatti     if (fd < 0) {
9806adc0549SMichael Tokarev         perror("unable to create backing store for hugepages");
981e4ada482SStefan Weil         g_free(filename);
982c902760fSMarcelo Tosatti         return NULL;
983c902760fSMarcelo Tosatti     }
984c902760fSMarcelo Tosatti     unlink(filename);
985e4ada482SStefan Weil     g_free(filename);
986c902760fSMarcelo Tosatti 
987c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
988c902760fSMarcelo Tosatti 
989c902760fSMarcelo Tosatti     /*
990c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
991c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
992c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
993c902760fSMarcelo Tosatti      * mmap will fail.
994c902760fSMarcelo Tosatti      */
995c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
996c902760fSMarcelo Tosatti         perror("ftruncate");
997c902760fSMarcelo Tosatti 
998c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
999c902760fSMarcelo Tosatti     /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
1000c902760fSMarcelo Tosatti      * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
1001c902760fSMarcelo Tosatti      * to sidestep this quirk.
1002c902760fSMarcelo Tosatti      */
1003c902760fSMarcelo Tosatti     flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
1004c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
1005c902760fSMarcelo Tosatti #else
1006c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1007c902760fSMarcelo Tosatti #endif
1008c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
1009c902760fSMarcelo Tosatti         perror("file_ram_alloc: can't mmap RAM pages");
1010c902760fSMarcelo Tosatti         close(fd);
1011c902760fSMarcelo Tosatti         return (NULL);
1012c902760fSMarcelo Tosatti     }
101304b16653SAlex Williamson     block->fd = fd;
1014c902760fSMarcelo Tosatti     return area;
1015c902760fSMarcelo Tosatti }
1016c902760fSMarcelo Tosatti #endif
1017c902760fSMarcelo Tosatti 
1018d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1019d17b5288SAlex Williamson {
102004b16653SAlex Williamson     RAMBlock *block, *next_block;
10213e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
102204b16653SAlex Williamson 
102349cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
102449cd9ac6SStefan Hajnoczi 
1025a3161038SPaolo Bonzini     if (QTAILQ_EMPTY(&ram_list.blocks))
102604b16653SAlex Williamson         return 0;
102704b16653SAlex Williamson 
1028a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1029f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
103004b16653SAlex Williamson 
103104b16653SAlex Williamson         end = block->offset + block->length;
103204b16653SAlex Williamson 
1033a3161038SPaolo Bonzini         QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
103404b16653SAlex Williamson             if (next_block->offset >= end) {
103504b16653SAlex Williamson                 next = MIN(next, next_block->offset);
103604b16653SAlex Williamson             }
103704b16653SAlex Williamson         }
103804b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
103904b16653SAlex Williamson             offset = end;
104004b16653SAlex Williamson             mingap = next - end;
104104b16653SAlex Williamson         }
104204b16653SAlex Williamson     }
10433e837b2cSAlex Williamson 
10443e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
10453e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
10463e837b2cSAlex Williamson                 (uint64_t)size);
10473e837b2cSAlex Williamson         abort();
10483e837b2cSAlex Williamson     }
10493e837b2cSAlex Williamson 
105004b16653SAlex Williamson     return offset;
105104b16653SAlex Williamson }
105204b16653SAlex Williamson 
1053652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
105404b16653SAlex Williamson {
1055d17b5288SAlex Williamson     RAMBlock *block;
1056d17b5288SAlex Williamson     ram_addr_t last = 0;
1057d17b5288SAlex Williamson 
1058a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next)
1059d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
1060d17b5288SAlex Williamson 
1061d17b5288SAlex Williamson     return last;
1062d17b5288SAlex Williamson }
1063d17b5288SAlex Williamson 
1064ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1065ddb97f1dSJason Baron {
1066ddb97f1dSJason Baron     int ret;
1067ddb97f1dSJason Baron     QemuOpts *machine_opts;
1068ddb97f1dSJason Baron 
1069ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1070ddb97f1dSJason Baron     machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1071ddb97f1dSJason Baron     if (machine_opts &&
1072ddb97f1dSJason Baron         !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1073ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1074ddb97f1dSJason Baron         if (ret) {
1075ddb97f1dSJason Baron             perror("qemu_madvise");
1076ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1077ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1078ddb97f1dSJason Baron         }
1079ddb97f1dSJason Baron     }
1080ddb97f1dSJason Baron }
1081ddb97f1dSJason Baron 
1082c5705a77SAvi Kivity void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
108384b89d78SCam Macdonell {
108484b89d78SCam Macdonell     RAMBlock *new_block, *block;
108584b89d78SCam Macdonell 
1086c5705a77SAvi Kivity     new_block = NULL;
1087a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1088c5705a77SAvi Kivity         if (block->offset == addr) {
1089c5705a77SAvi Kivity             new_block = block;
1090c5705a77SAvi Kivity             break;
1091c5705a77SAvi Kivity         }
1092c5705a77SAvi Kivity     }
1093c5705a77SAvi Kivity     assert(new_block);
1094c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
109584b89d78SCam Macdonell 
109609e5ab63SAnthony Liguori     if (dev) {
109709e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
109884b89d78SCam Macdonell         if (id) {
109984b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
11007267c094SAnthony Liguori             g_free(id);
110184b89d78SCam Macdonell         }
110284b89d78SCam Macdonell     }
110384b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
110484b89d78SCam Macdonell 
1105b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1106b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1107a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1108c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
110984b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
111084b89d78SCam Macdonell                     new_block->idstr);
111184b89d78SCam Macdonell             abort();
111284b89d78SCam Macdonell         }
111384b89d78SCam Macdonell     }
1114b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1115c5705a77SAvi Kivity }
1116c5705a77SAvi Kivity 
11178490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
11188490fc78SLuiz Capitulino {
11198490fc78SLuiz Capitulino     QemuOpts *opts;
11208490fc78SLuiz Capitulino 
11218490fc78SLuiz Capitulino     opts = qemu_opts_find(qemu_find_opts("machine"), 0);
11228490fc78SLuiz Capitulino     if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
11238490fc78SLuiz Capitulino         /* disabled by the user */
11248490fc78SLuiz Capitulino         return 0;
11258490fc78SLuiz Capitulino     }
11268490fc78SLuiz Capitulino 
11278490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
11288490fc78SLuiz Capitulino }
11298490fc78SLuiz Capitulino 
1130c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1131c5705a77SAvi Kivity                                    MemoryRegion *mr)
1132c5705a77SAvi Kivity {
1133abb26d63SPaolo Bonzini     RAMBlock *block, *new_block;
1134c5705a77SAvi Kivity 
1135c5705a77SAvi Kivity     size = TARGET_PAGE_ALIGN(size);
1136c5705a77SAvi Kivity     new_block = g_malloc0(sizeof(*new_block));
113784b89d78SCam Macdonell 
1138b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1139b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
11407c637366SAvi Kivity     new_block->mr = mr;
1141432d268cSJun Nakajima     new_block->offset = find_ram_offset(size);
11426977dfe6SYoshiaki Tamura     if (host) {
114384b89d78SCam Macdonell         new_block->host = host;
1144cd19cfa2SHuang Ying         new_block->flags |= RAM_PREALLOC_MASK;
11456977dfe6SYoshiaki Tamura     } else {
1146c902760fSMarcelo Tosatti         if (mem_path) {
1147c902760fSMarcelo Tosatti #if defined (__linux__) && !defined(TARGET_S390X)
114804b16653SAlex Williamson             new_block->host = file_ram_alloc(new_block, size, mem_path);
1149618a568dSMarcelo Tosatti             if (!new_block->host) {
11506eebf958SPaolo Bonzini                 new_block->host = qemu_anon_ram_alloc(size);
11518490fc78SLuiz Capitulino                 memory_try_enable_merging(new_block->host, size);
1152618a568dSMarcelo Tosatti             }
1153c902760fSMarcelo Tosatti #else
1154c902760fSMarcelo Tosatti             fprintf(stderr, "-mem-path option unsupported\n");
1155c902760fSMarcelo Tosatti             exit(1);
1156c902760fSMarcelo Tosatti #endif
1157c902760fSMarcelo Tosatti         } else {
1158868bb33fSJan Kiszka             if (xen_enabled()) {
1159fce537d4SAvi Kivity                 xen_ram_alloc(new_block->offset, size, mr);
1160fdec9918SChristian Borntraeger             } else if (kvm_enabled()) {
1161fdec9918SChristian Borntraeger                 /* some s390/kvm configurations have special constraints */
11626eebf958SPaolo Bonzini                 new_block->host = kvm_ram_alloc(size);
1163432d268cSJun Nakajima             } else {
11646eebf958SPaolo Bonzini                 new_block->host = qemu_anon_ram_alloc(size);
1165432d268cSJun Nakajima             }
11668490fc78SLuiz Capitulino             memory_try_enable_merging(new_block->host, size);
1167c902760fSMarcelo Tosatti         }
11686977dfe6SYoshiaki Tamura     }
116994a6b54fSpbrook     new_block->length = size;
117094a6b54fSpbrook 
1171abb26d63SPaolo Bonzini     /* Keep the list sorted from biggest to smallest block.  */
1172abb26d63SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1173abb26d63SPaolo Bonzini         if (block->length < new_block->length) {
1174abb26d63SPaolo Bonzini             break;
1175abb26d63SPaolo Bonzini         }
1176abb26d63SPaolo Bonzini     }
1177abb26d63SPaolo Bonzini     if (block) {
1178abb26d63SPaolo Bonzini         QTAILQ_INSERT_BEFORE(block, new_block, next);
1179abb26d63SPaolo Bonzini     } else {
1180abb26d63SPaolo Bonzini         QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1181abb26d63SPaolo Bonzini     }
11820d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
118394a6b54fSpbrook 
1184f798b07fSUmesh Deshpande     ram_list.version++;
1185b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1186f798b07fSUmesh Deshpande 
11877267c094SAnthony Liguori     ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
118804b16653SAlex Williamson                                        last_ram_offset() >> TARGET_PAGE_BITS);
11895fda043fSIgor Mitsyanko     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
11905fda043fSIgor Mitsyanko            0, size >> TARGET_PAGE_BITS);
11911720aeeeSJuan Quintela     cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
119294a6b54fSpbrook 
1193ddb97f1dSJason Baron     qemu_ram_setup_dump(new_block->host, size);
1194ad0b5321SLuiz Capitulino     qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1195ddb97f1dSJason Baron 
11966f0437e8SJan Kiszka     if (kvm_enabled())
11976f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
11986f0437e8SJan Kiszka 
119994a6b54fSpbrook     return new_block->offset;
120094a6b54fSpbrook }
1201e9a1ab19Sbellard 
1202c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
12036977dfe6SYoshiaki Tamura {
1204c5705a77SAvi Kivity     return qemu_ram_alloc_from_ptr(size, NULL, mr);
12056977dfe6SYoshiaki Tamura }
12066977dfe6SYoshiaki Tamura 
12071f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
12081f2e98b6SAlex Williamson {
12091f2e98b6SAlex Williamson     RAMBlock *block;
12101f2e98b6SAlex Williamson 
1211b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1212b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1213a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
12141f2e98b6SAlex Williamson         if (addr == block->offset) {
1215a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
12160d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1217f798b07fSUmesh Deshpande             ram_list.version++;
12187267c094SAnthony Liguori             g_free(block);
1219b2a8658eSUmesh Deshpande             break;
12201f2e98b6SAlex Williamson         }
12211f2e98b6SAlex Williamson     }
1222b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
12231f2e98b6SAlex Williamson }
12241f2e98b6SAlex Williamson 
1225c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
1226e9a1ab19Sbellard {
122704b16653SAlex Williamson     RAMBlock *block;
122804b16653SAlex Williamson 
1229b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1230b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1231a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
123204b16653SAlex Williamson         if (addr == block->offset) {
1233a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
12340d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1235f798b07fSUmesh Deshpande             ram_list.version++;
1236cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
1237cd19cfa2SHuang Ying                 ;
1238cd19cfa2SHuang Ying             } else if (mem_path) {
123904b16653SAlex Williamson #if defined (__linux__) && !defined(TARGET_S390X)
124004b16653SAlex Williamson                 if (block->fd) {
124104b16653SAlex Williamson                     munmap(block->host, block->length);
124204b16653SAlex Williamson                     close(block->fd);
124304b16653SAlex Williamson                 } else {
1244e7a09b92SPaolo Bonzini                     qemu_anon_ram_free(block->host, block->length);
124504b16653SAlex Williamson                 }
1246fd28aa13SJan Kiszka #else
1247fd28aa13SJan Kiszka                 abort();
124804b16653SAlex Williamson #endif
124904b16653SAlex Williamson             } else {
1250868bb33fSJan Kiszka                 if (xen_enabled()) {
1251e41d7c69SJan Kiszka                     xen_invalidate_map_cache_entry(block->host);
1252432d268cSJun Nakajima                 } else {
1253e7a09b92SPaolo Bonzini                     qemu_anon_ram_free(block->host, block->length);
1254432d268cSJun Nakajima                 }
125504b16653SAlex Williamson             }
12567267c094SAnthony Liguori             g_free(block);
1257b2a8658eSUmesh Deshpande             break;
125804b16653SAlex Williamson         }
125904b16653SAlex Williamson     }
1260b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
126104b16653SAlex Williamson 
1262e9a1ab19Sbellard }
1263e9a1ab19Sbellard 
1264cd19cfa2SHuang Ying #ifndef _WIN32
1265cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1266cd19cfa2SHuang Ying {
1267cd19cfa2SHuang Ying     RAMBlock *block;
1268cd19cfa2SHuang Ying     ram_addr_t offset;
1269cd19cfa2SHuang Ying     int flags;
1270cd19cfa2SHuang Ying     void *area, *vaddr;
1271cd19cfa2SHuang Ying 
1272a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1273cd19cfa2SHuang Ying         offset = addr - block->offset;
1274cd19cfa2SHuang Ying         if (offset < block->length) {
1275cd19cfa2SHuang Ying             vaddr = block->host + offset;
1276cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
1277cd19cfa2SHuang Ying                 ;
1278cd19cfa2SHuang Ying             } else {
1279cd19cfa2SHuang Ying                 flags = MAP_FIXED;
1280cd19cfa2SHuang Ying                 munmap(vaddr, length);
1281cd19cfa2SHuang Ying                 if (mem_path) {
1282cd19cfa2SHuang Ying #if defined(__linux__) && !defined(TARGET_S390X)
1283cd19cfa2SHuang Ying                     if (block->fd) {
1284cd19cfa2SHuang Ying #ifdef MAP_POPULATE
1285cd19cfa2SHuang Ying                         flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1286cd19cfa2SHuang Ying                             MAP_PRIVATE;
1287cd19cfa2SHuang Ying #else
1288cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE;
1289cd19cfa2SHuang Ying #endif
1290cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1291cd19cfa2SHuang Ying                                     flags, block->fd, offset);
1292cd19cfa2SHuang Ying                     } else {
1293cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1294cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1295cd19cfa2SHuang Ying                                     flags, -1, 0);
1296cd19cfa2SHuang Ying                     }
1297fd28aa13SJan Kiszka #else
1298fd28aa13SJan Kiszka                     abort();
1299cd19cfa2SHuang Ying #endif
1300cd19cfa2SHuang Ying                 } else {
1301cd19cfa2SHuang Ying #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1302cd19cfa2SHuang Ying                     flags |= MAP_SHARED | MAP_ANONYMOUS;
1303cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1304cd19cfa2SHuang Ying                                 flags, -1, 0);
1305cd19cfa2SHuang Ying #else
1306cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1307cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1308cd19cfa2SHuang Ying                                 flags, -1, 0);
1309cd19cfa2SHuang Ying #endif
1310cd19cfa2SHuang Ying                 }
1311cd19cfa2SHuang Ying                 if (area != vaddr) {
1312f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1313f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1314cd19cfa2SHuang Ying                             length, addr);
1315cd19cfa2SHuang Ying                     exit(1);
1316cd19cfa2SHuang Ying                 }
13178490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1318ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1319cd19cfa2SHuang Ying             }
1320cd19cfa2SHuang Ying             return;
1321cd19cfa2SHuang Ying         }
1322cd19cfa2SHuang Ying     }
1323cd19cfa2SHuang Ying }
1324cd19cfa2SHuang Ying #endif /* !_WIN32 */
1325cd19cfa2SHuang Ying 
1326dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
13275579c7f3Spbrook    With the exception of the softmmu code in this file, this should
13285579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
13295579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
13305579c7f3Spbrook 
13315579c7f3Spbrook    It should not be used for general purpose DMA.
13325579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
13335579c7f3Spbrook  */
1334c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr)
1335dc828ca1Spbrook {
133694a6b54fSpbrook     RAMBlock *block;
133794a6b54fSpbrook 
1338b2a8658eSUmesh Deshpande     /* The list is protected by the iothread lock here.  */
13390d6d3c87SPaolo Bonzini     block = ram_list.mru_block;
13400d6d3c87SPaolo Bonzini     if (block && addr - block->offset < block->length) {
13410d6d3c87SPaolo Bonzini         goto found;
13420d6d3c87SPaolo Bonzini     }
1343a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1344f471a17eSAlex Williamson         if (addr - block->offset < block->length) {
13450d6d3c87SPaolo Bonzini             goto found;
13467d82af38SVincent Palatin         }
13470d6d3c87SPaolo Bonzini     }
13480d6d3c87SPaolo Bonzini 
13490d6d3c87SPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
13500d6d3c87SPaolo Bonzini     abort();
13510d6d3c87SPaolo Bonzini 
13520d6d3c87SPaolo Bonzini found:
13530d6d3c87SPaolo Bonzini     ram_list.mru_block = block;
1354868bb33fSJan Kiszka     if (xen_enabled()) {
1355432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1356432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1357712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1358432d268cSJun Nakajima          */
1359432d268cSJun Nakajima         if (block->offset == 0) {
1360e41d7c69SJan Kiszka             return xen_map_cache(addr, 0, 0);
1361432d268cSJun Nakajima         } else if (block->host == NULL) {
1362e41d7c69SJan Kiszka             block->host =
1363e41d7c69SJan Kiszka                 xen_map_cache(block->offset, block->length, 1);
1364432d268cSJun Nakajima         }
1365432d268cSJun Nakajima     }
1366f471a17eSAlex Williamson     return block->host + (addr - block->offset);
136794a6b54fSpbrook }
1368f471a17eSAlex Williamson 
13690d6d3c87SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.  Same as
13700d6d3c87SPaolo Bonzini  * qemu_get_ram_ptr but do not touch ram_list.mru_block.
13710d6d3c87SPaolo Bonzini  *
13720d6d3c87SPaolo Bonzini  * ??? Is this still necessary?
1373b2e0a138SMichael S. Tsirkin  */
13748b9c99d9SBlue Swirl static void *qemu_safe_ram_ptr(ram_addr_t addr)
1375b2e0a138SMichael S. Tsirkin {
1376b2e0a138SMichael S. Tsirkin     RAMBlock *block;
1377b2e0a138SMichael S. Tsirkin 
1378b2a8658eSUmesh Deshpande     /* The list is protected by the iothread lock here.  */
1379a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1380b2e0a138SMichael S. Tsirkin         if (addr - block->offset < block->length) {
1381868bb33fSJan Kiszka             if (xen_enabled()) {
1382432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
1383432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
1384712c2b41SStefano Stabellini                  * In that case just map until the end of the page.
1385432d268cSJun Nakajima                  */
1386432d268cSJun Nakajima                 if (block->offset == 0) {
1387e41d7c69SJan Kiszka                     return xen_map_cache(addr, 0, 0);
1388432d268cSJun Nakajima                 } else if (block->host == NULL) {
1389e41d7c69SJan Kiszka                     block->host =
1390e41d7c69SJan Kiszka                         xen_map_cache(block->offset, block->length, 1);
1391432d268cSJun Nakajima                 }
1392432d268cSJun Nakajima             }
1393b2e0a138SMichael S. Tsirkin             return block->host + (addr - block->offset);
1394b2e0a138SMichael S. Tsirkin         }
1395b2e0a138SMichael S. Tsirkin     }
1396b2e0a138SMichael S. Tsirkin 
1397b2e0a138SMichael S. Tsirkin     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1398b2e0a138SMichael S. Tsirkin     abort();
1399b2e0a138SMichael S. Tsirkin 
1400b2e0a138SMichael S. Tsirkin     return NULL;
1401b2e0a138SMichael S. Tsirkin }
1402b2e0a138SMichael S. Tsirkin 
140338bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
140438bee5dcSStefano Stabellini  * but takes a size argument */
14058b9c99d9SBlue Swirl static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
140638bee5dcSStefano Stabellini {
14078ab934f9SStefano Stabellini     if (*size == 0) {
14088ab934f9SStefano Stabellini         return NULL;
14098ab934f9SStefano Stabellini     }
1410868bb33fSJan Kiszka     if (xen_enabled()) {
1411e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
1412868bb33fSJan Kiszka     } else {
141338bee5dcSStefano Stabellini         RAMBlock *block;
141438bee5dcSStefano Stabellini 
1415a3161038SPaolo Bonzini         QTAILQ_FOREACH(block, &ram_list.blocks, next) {
141638bee5dcSStefano Stabellini             if (addr - block->offset < block->length) {
141738bee5dcSStefano Stabellini                 if (addr - block->offset + *size > block->length)
141838bee5dcSStefano Stabellini                     *size = block->length - addr + block->offset;
141938bee5dcSStefano Stabellini                 return block->host + (addr - block->offset);
142038bee5dcSStefano Stabellini             }
142138bee5dcSStefano Stabellini         }
142238bee5dcSStefano Stabellini 
142338bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
142438bee5dcSStefano Stabellini         abort();
142538bee5dcSStefano Stabellini     }
142638bee5dcSStefano Stabellini }
142738bee5dcSStefano Stabellini 
1428e890261fSMarcelo Tosatti int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
14295579c7f3Spbrook {
143094a6b54fSpbrook     RAMBlock *block;
143194a6b54fSpbrook     uint8_t *host = ptr;
143294a6b54fSpbrook 
1433868bb33fSJan Kiszka     if (xen_enabled()) {
1434e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
1435712c2b41SStefano Stabellini         return 0;
1436712c2b41SStefano Stabellini     }
1437712c2b41SStefano Stabellini 
1438a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1439432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1440432d268cSJun Nakajima         if (block->host == NULL) {
1441432d268cSJun Nakajima             continue;
1442432d268cSJun Nakajima         }
1443f471a17eSAlex Williamson         if (host - block->host < block->length) {
1444e890261fSMarcelo Tosatti             *ram_addr = block->offset + (host - block->host);
1445e890261fSMarcelo Tosatti             return 0;
144694a6b54fSpbrook         }
1447f471a17eSAlex Williamson     }
1448432d268cSJun Nakajima 
1449e890261fSMarcelo Tosatti     return -1;
1450e890261fSMarcelo Tosatti }
1451f471a17eSAlex Williamson 
1452e890261fSMarcelo Tosatti /* Some of the softmmu routines need to translate from a host pointer
1453e890261fSMarcelo Tosatti    (typically a TLB entry) back to a ram offset.  */
1454e890261fSMarcelo Tosatti ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1455e890261fSMarcelo Tosatti {
1456e890261fSMarcelo Tosatti     ram_addr_t ram_addr;
1457e890261fSMarcelo Tosatti 
1458e890261fSMarcelo Tosatti     if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
145994a6b54fSpbrook         fprintf(stderr, "Bad ram pointer %p\n", ptr);
146094a6b54fSpbrook         abort();
1461e890261fSMarcelo Tosatti     }
1462e890261fSMarcelo Tosatti     return ram_addr;
14635579c7f3Spbrook }
14645579c7f3Spbrook 
1465a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
14660e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
14671ccde1cbSbellard {
14683a7d929eSbellard     int dirty_flags;
1469f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
14703a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
14710e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
1472f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
14733a7d929eSbellard     }
14740e0df1e2SAvi Kivity     switch (size) {
14750e0df1e2SAvi Kivity     case 1:
14765579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
14770e0df1e2SAvi Kivity         break;
14780e0df1e2SAvi Kivity     case 2:
14795579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
14800e0df1e2SAvi Kivity         break;
14810e0df1e2SAvi Kivity     case 4:
14825579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
14830e0df1e2SAvi Kivity         break;
14840e0df1e2SAvi Kivity     default:
14850e0df1e2SAvi Kivity         abort();
14860e0df1e2SAvi Kivity     }
1487f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1488f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1489f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1490f23db169Sbellard        flushed */
1491f23db169Sbellard     if (dirty_flags == 0xff)
14922e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
14931ccde1cbSbellard }
14941ccde1cbSbellard 
1495b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1496b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
1497b018ddf6SPaolo Bonzini {
1498b018ddf6SPaolo Bonzini     return is_write;
1499b018ddf6SPaolo Bonzini }
1500b018ddf6SPaolo Bonzini 
15010e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
15020e0df1e2SAvi Kivity     .write = notdirty_mem_write,
1503b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
15040e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
15051ccde1cbSbellard };
15061ccde1cbSbellard 
15070f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
1508b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
15090f459d16Spbrook {
15109349b4f9SAndreas Färber     CPUArchState *env = cpu_single_env;
151106d55cc1Saliguori     target_ulong pc, cs_base;
15120f459d16Spbrook     target_ulong vaddr;
1513a1d1bb31Saliguori     CPUWatchpoint *wp;
151406d55cc1Saliguori     int cpu_flags;
15150f459d16Spbrook 
151606d55cc1Saliguori     if (env->watchpoint_hit) {
151706d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
151806d55cc1Saliguori          * the debug interrupt so that is will trigger after the
151906d55cc1Saliguori          * current instruction. */
1520c3affe56SAndreas Färber         cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
152106d55cc1Saliguori         return;
152206d55cc1Saliguori     }
15232e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
152472cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1525b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
1526b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
15276e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
15286e140f28Saliguori             if (!env->watchpoint_hit) {
1529a1d1bb31Saliguori                 env->watchpoint_hit = wp;
15305a316526SBlue Swirl                 tb_check_watchpoint(env);
153106d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
153206d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
1533488d6577SMax Filippov                     cpu_loop_exit(env);
153406d55cc1Saliguori                 } else {
153506d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
153606d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
153706d55cc1Saliguori                     cpu_resume_from_signal(env, NULL);
15380f459d16Spbrook                 }
1539488d6577SMax Filippov             }
15406e140f28Saliguori         } else {
15416e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
15426e140f28Saliguori         }
15430f459d16Spbrook     }
15440f459d16Spbrook }
15450f459d16Spbrook 
15466658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
15476658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
15486658ffb8Spbrook    phys routines.  */
1549a8170e5eSAvi Kivity static uint64_t watch_mem_read(void *opaque, hwaddr addr,
15501ec9b909SAvi Kivity                                unsigned size)
15516658ffb8Spbrook {
15521ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
15531ec9b909SAvi Kivity     switch (size) {
15541ec9b909SAvi Kivity     case 1: return ldub_phys(addr);
15551ec9b909SAvi Kivity     case 2: return lduw_phys(addr);
15561ec9b909SAvi Kivity     case 4: return ldl_phys(addr);
15571ec9b909SAvi Kivity     default: abort();
15581ec9b909SAvi Kivity     }
15596658ffb8Spbrook }
15606658ffb8Spbrook 
1561a8170e5eSAvi Kivity static void watch_mem_write(void *opaque, hwaddr addr,
15621ec9b909SAvi Kivity                             uint64_t val, unsigned size)
15636658ffb8Spbrook {
15641ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
15651ec9b909SAvi Kivity     switch (size) {
156667364150SMax Filippov     case 1:
156767364150SMax Filippov         stb_phys(addr, val);
156867364150SMax Filippov         break;
156967364150SMax Filippov     case 2:
157067364150SMax Filippov         stw_phys(addr, val);
157167364150SMax Filippov         break;
157267364150SMax Filippov     case 4:
157367364150SMax Filippov         stl_phys(addr, val);
157467364150SMax Filippov         break;
15751ec9b909SAvi Kivity     default: abort();
15761ec9b909SAvi Kivity     }
15776658ffb8Spbrook }
15786658ffb8Spbrook 
15791ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
15801ec9b909SAvi Kivity     .read = watch_mem_read,
15811ec9b909SAvi Kivity     .write = watch_mem_write,
15821ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
15836658ffb8Spbrook };
15846658ffb8Spbrook 
1585a8170e5eSAvi Kivity static uint64_t subpage_read(void *opaque, hwaddr addr,
158670c68e44SAvi Kivity                              unsigned len)
1587db7b5426Sblueswir1 {
1588acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1589acc9d80bSJan Kiszka     uint8_t buf[4];
1590791af8c8SPaolo Bonzini 
1591db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1592acc9d80bSJan Kiszka     printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1593acc9d80bSJan Kiszka            subpage, len, addr);
1594db7b5426Sblueswir1 #endif
1595acc9d80bSJan Kiszka     address_space_read(subpage->as, addr + subpage->base, buf, len);
1596acc9d80bSJan Kiszka     switch (len) {
1597acc9d80bSJan Kiszka     case 1:
1598acc9d80bSJan Kiszka         return ldub_p(buf);
1599acc9d80bSJan Kiszka     case 2:
1600acc9d80bSJan Kiszka         return lduw_p(buf);
1601acc9d80bSJan Kiszka     case 4:
1602acc9d80bSJan Kiszka         return ldl_p(buf);
1603acc9d80bSJan Kiszka     default:
1604acc9d80bSJan Kiszka         abort();
1605acc9d80bSJan Kiszka     }
1606db7b5426Sblueswir1 }
1607db7b5426Sblueswir1 
1608a8170e5eSAvi Kivity static void subpage_write(void *opaque, hwaddr addr,
160970c68e44SAvi Kivity                           uint64_t value, unsigned len)
1610db7b5426Sblueswir1 {
1611acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1612acc9d80bSJan Kiszka     uint8_t buf[4];
1613acc9d80bSJan Kiszka 
1614db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
161570c68e44SAvi Kivity     printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1616acc9d80bSJan Kiszka            " value %"PRIx64"\n",
1617acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
1618db7b5426Sblueswir1 #endif
1619acc9d80bSJan Kiszka     switch (len) {
1620acc9d80bSJan Kiszka     case 1:
1621acc9d80bSJan Kiszka         stb_p(buf, value);
1622acc9d80bSJan Kiszka         break;
1623acc9d80bSJan Kiszka     case 2:
1624acc9d80bSJan Kiszka         stw_p(buf, value);
1625acc9d80bSJan Kiszka         break;
1626acc9d80bSJan Kiszka     case 4:
1627acc9d80bSJan Kiszka         stl_p(buf, value);
1628acc9d80bSJan Kiszka         break;
1629acc9d80bSJan Kiszka     default:
1630acc9d80bSJan Kiszka         abort();
1631acc9d80bSJan Kiszka     }
1632acc9d80bSJan Kiszka     address_space_write(subpage->as, addr + subpage->base, buf, len);
1633db7b5426Sblueswir1 }
1634db7b5426Sblueswir1 
1635c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
1636c353e4ccSPaolo Bonzini                             unsigned size, bool is_write)
1637c353e4ccSPaolo Bonzini {
1638acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1639c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
1640acc9d80bSJan Kiszka     printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1641acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
1642c353e4ccSPaolo Bonzini #endif
1643c353e4ccSPaolo Bonzini 
1644acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
1645acc9d80bSJan Kiszka                                       size, is_write);
1646c353e4ccSPaolo Bonzini }
1647c353e4ccSPaolo Bonzini 
164870c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
164970c68e44SAvi Kivity     .read = subpage_read,
165070c68e44SAvi Kivity     .write = subpage_write,
1651c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
165270c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
1653db7b5426Sblueswir1 };
1654db7b5426Sblueswir1 
1655c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
16565312bd8bSAvi Kivity                              uint16_t section)
1657db7b5426Sblueswir1 {
1658db7b5426Sblueswir1     int idx, eidx;
1659db7b5426Sblueswir1 
1660db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1661db7b5426Sblueswir1         return -1;
1662db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
1663db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
1664db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
16650bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1666db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
1667db7b5426Sblueswir1 #endif
1668db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
16695312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
1670db7b5426Sblueswir1     }
1671db7b5426Sblueswir1 
1672db7b5426Sblueswir1     return 0;
1673db7b5426Sblueswir1 }
1674db7b5426Sblueswir1 
1675acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1676db7b5426Sblueswir1 {
1677c227f099SAnthony Liguori     subpage_t *mmio;
1678db7b5426Sblueswir1 
16797267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
16801eec614bSaliguori 
1681acc9d80bSJan Kiszka     mmio->as = as;
1682db7b5426Sblueswir1     mmio->base = base;
168370c68e44SAvi Kivity     memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
168470c68e44SAvi Kivity                           "subpage", TARGET_PAGE_SIZE);
1685b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
1686db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1687db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1688db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1689db7b5426Sblueswir1 #endif
16900f0cb164SAvi Kivity     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
1691db7b5426Sblueswir1 
1692db7b5426Sblueswir1     return mmio;
1693db7b5426Sblueswir1 }
1694db7b5426Sblueswir1 
16955312bd8bSAvi Kivity static uint16_t dummy_section(MemoryRegion *mr)
16965312bd8bSAvi Kivity {
16975312bd8bSAvi Kivity     MemoryRegionSection section = {
16985312bd8bSAvi Kivity         .mr = mr,
16995312bd8bSAvi Kivity         .offset_within_address_space = 0,
17005312bd8bSAvi Kivity         .offset_within_region = 0,
1701052e87b0SPaolo Bonzini         .size = int128_2_64(),
17025312bd8bSAvi Kivity     };
17035312bd8bSAvi Kivity 
17045312bd8bSAvi Kivity     return phys_section_add(&section);
17055312bd8bSAvi Kivity }
17065312bd8bSAvi Kivity 
1707a8170e5eSAvi Kivity MemoryRegion *iotlb_to_region(hwaddr index)
1708aa102231SAvi Kivity {
170937ec01d4SAvi Kivity     return phys_sections[index & ~TARGET_PAGE_MASK].mr;
1710aa102231SAvi Kivity }
1711aa102231SAvi Kivity 
1712e9179ce1SAvi Kivity static void io_mem_init(void)
1713e9179ce1SAvi Kivity {
1714bf8d5166SPaolo Bonzini     memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
17150e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
17160e0df1e2SAvi Kivity                           "unassigned", UINT64_MAX);
17170e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
17180e0df1e2SAvi Kivity                           "notdirty", UINT64_MAX);
17191ec9b909SAvi Kivity     memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
17201ec9b909SAvi Kivity                           "watch", UINT64_MAX);
1721e9179ce1SAvi Kivity }
1722e9179ce1SAvi Kivity 
1723ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
1724ac1970fbSAvi Kivity {
1725ac1970fbSAvi Kivity     AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1726ac1970fbSAvi Kivity 
1727ac1970fbSAvi Kivity     destroy_all_mappings(d);
1728ac1970fbSAvi Kivity     d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1729ac1970fbSAvi Kivity }
1730ac1970fbSAvi Kivity 
173150c1e149SAvi Kivity static void core_begin(MemoryListener *listener)
173250c1e149SAvi Kivity {
17335312bd8bSAvi Kivity     phys_sections_clear();
17345312bd8bSAvi Kivity     phys_section_unassigned = dummy_section(&io_mem_unassigned);
1735aa102231SAvi Kivity     phys_section_notdirty = dummy_section(&io_mem_notdirty);
1736aa102231SAvi Kivity     phys_section_rom = dummy_section(&io_mem_rom);
1737aa102231SAvi Kivity     phys_section_watch = dummy_section(&io_mem_watch);
173850c1e149SAvi Kivity }
173950c1e149SAvi Kivity 
17401d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
174150c1e149SAvi Kivity {
17429349b4f9SAndreas Färber     CPUArchState *env;
1743117712c3SAvi Kivity 
1744117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
1745117712c3SAvi Kivity        reset the modified entries */
1746117712c3SAvi Kivity     /* XXX: slow ! */
1747117712c3SAvi Kivity     for(env = first_cpu; env != NULL; env = env->next_cpu) {
1748117712c3SAvi Kivity         tlb_flush(env, 1);
1749117712c3SAvi Kivity     }
175050c1e149SAvi Kivity }
175150c1e149SAvi Kivity 
175293632747SAvi Kivity static void core_log_global_start(MemoryListener *listener)
175393632747SAvi Kivity {
175493632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(1);
175593632747SAvi Kivity }
175693632747SAvi Kivity 
175793632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener)
175893632747SAvi Kivity {
175993632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(0);
176093632747SAvi Kivity }
176193632747SAvi Kivity 
17624855d41aSAvi Kivity static void io_region_add(MemoryListener *listener,
17634855d41aSAvi Kivity                           MemoryRegionSection *section)
17644855d41aSAvi Kivity {
1765a2d33521SAvi Kivity     MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1766a2d33521SAvi Kivity 
1767a2d33521SAvi Kivity     mrio->mr = section->mr;
1768a2d33521SAvi Kivity     mrio->offset = section->offset_within_region;
1769a2d33521SAvi Kivity     iorange_init(&mrio->iorange, &memory_region_iorange_ops,
1770052e87b0SPaolo Bonzini                  section->offset_within_address_space,
1771052e87b0SPaolo Bonzini                  int128_get64(section->size));
1772a2d33521SAvi Kivity     ioport_register(&mrio->iorange);
17734855d41aSAvi Kivity }
17744855d41aSAvi Kivity 
17754855d41aSAvi Kivity static void io_region_del(MemoryListener *listener,
17764855d41aSAvi Kivity                           MemoryRegionSection *section)
17774855d41aSAvi Kivity {
1778052e87b0SPaolo Bonzini     isa_unassign_ioport(section->offset_within_address_space,
1779052e87b0SPaolo Bonzini                         int128_get64(section->size));
17804855d41aSAvi Kivity }
17814855d41aSAvi Kivity 
178293632747SAvi Kivity static MemoryListener core_memory_listener = {
178350c1e149SAvi Kivity     .begin = core_begin,
178493632747SAvi Kivity     .log_global_start = core_log_global_start,
178593632747SAvi Kivity     .log_global_stop = core_log_global_stop,
1786ac1970fbSAvi Kivity     .priority = 1,
178793632747SAvi Kivity };
178893632747SAvi Kivity 
17894855d41aSAvi Kivity static MemoryListener io_memory_listener = {
17904855d41aSAvi Kivity     .region_add = io_region_add,
17914855d41aSAvi Kivity     .region_del = io_region_del,
17924855d41aSAvi Kivity     .priority = 0,
17934855d41aSAvi Kivity };
17944855d41aSAvi Kivity 
17951d71148eSAvi Kivity static MemoryListener tcg_memory_listener = {
17961d71148eSAvi Kivity     .commit = tcg_commit,
17971d71148eSAvi Kivity };
17981d71148eSAvi Kivity 
1799ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
1800ac1970fbSAvi Kivity {
1801ac1970fbSAvi Kivity     AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1802ac1970fbSAvi Kivity 
1803ac1970fbSAvi Kivity     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1804ac1970fbSAvi Kivity     d->listener = (MemoryListener) {
1805ac1970fbSAvi Kivity         .begin = mem_begin,
1806ac1970fbSAvi Kivity         .region_add = mem_add,
1807ac1970fbSAvi Kivity         .region_nop = mem_add,
1808ac1970fbSAvi Kivity         .priority = 0,
1809ac1970fbSAvi Kivity     };
1810acc9d80bSJan Kiszka     d->as = as;
1811ac1970fbSAvi Kivity     as->dispatch = d;
1812ac1970fbSAvi Kivity     memory_listener_register(&d->listener, as);
1813ac1970fbSAvi Kivity }
1814ac1970fbSAvi Kivity 
181583f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
181683f3c251SAvi Kivity {
181783f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
181883f3c251SAvi Kivity 
181983f3c251SAvi Kivity     memory_listener_unregister(&d->listener);
182083f3c251SAvi Kivity     destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
182183f3c251SAvi Kivity     g_free(d);
182283f3c251SAvi Kivity     as->dispatch = NULL;
182383f3c251SAvi Kivity }
182483f3c251SAvi Kivity 
182562152b8aSAvi Kivity static void memory_map_init(void)
182662152b8aSAvi Kivity {
18277267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
18288417cebfSAvi Kivity     memory_region_init(system_memory, "system", INT64_MAX);
18297dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
1830309cb471SAvi Kivity 
18317267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
1832309cb471SAvi Kivity     memory_region_init(system_io, "io", 65536);
18337dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
183493632747SAvi Kivity 
1835f6790af6SAvi Kivity     memory_listener_register(&core_memory_listener, &address_space_memory);
1836f6790af6SAvi Kivity     memory_listener_register(&io_memory_listener, &address_space_io);
1837f6790af6SAvi Kivity     memory_listener_register(&tcg_memory_listener, &address_space_memory);
183862152b8aSAvi Kivity }
183962152b8aSAvi Kivity 
184062152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
184162152b8aSAvi Kivity {
184262152b8aSAvi Kivity     return system_memory;
184362152b8aSAvi Kivity }
184462152b8aSAvi Kivity 
1845309cb471SAvi Kivity MemoryRegion *get_system_io(void)
1846309cb471SAvi Kivity {
1847309cb471SAvi Kivity     return system_io;
1848309cb471SAvi Kivity }
1849309cb471SAvi Kivity 
1850e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
1851e2eef170Spbrook 
185213eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
185313eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
18549349b4f9SAndreas Färber int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
1855a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
185613eb76e0Sbellard {
185713eb76e0Sbellard     int l, flags;
185813eb76e0Sbellard     target_ulong page;
185953a5960aSpbrook     void * p;
186013eb76e0Sbellard 
186113eb76e0Sbellard     while (len > 0) {
186213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
186313eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
186413eb76e0Sbellard         if (l > len)
186513eb76e0Sbellard             l = len;
186613eb76e0Sbellard         flags = page_get_flags(page);
186713eb76e0Sbellard         if (!(flags & PAGE_VALID))
1868a68fe89cSPaul Brook             return -1;
186913eb76e0Sbellard         if (is_write) {
187013eb76e0Sbellard             if (!(flags & PAGE_WRITE))
1871a68fe89cSPaul Brook                 return -1;
1872579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
187372fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1874a68fe89cSPaul Brook                 return -1;
187572fb7daaSaurel32             memcpy(p, buf, l);
187672fb7daaSaurel32             unlock_user(p, addr, l);
187713eb76e0Sbellard         } else {
187813eb76e0Sbellard             if (!(flags & PAGE_READ))
1879a68fe89cSPaul Brook                 return -1;
1880579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
188172fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1882a68fe89cSPaul Brook                 return -1;
188372fb7daaSaurel32             memcpy(buf, p, l);
18845b257578Saurel32             unlock_user(p, addr, 0);
188513eb76e0Sbellard         }
188613eb76e0Sbellard         len -= l;
188713eb76e0Sbellard         buf += l;
188813eb76e0Sbellard         addr += l;
188913eb76e0Sbellard     }
1890a68fe89cSPaul Brook     return 0;
189113eb76e0Sbellard }
18928df1cd07Sbellard 
189313eb76e0Sbellard #else
189451d7a9ebSAnthony PERARD 
1895a8170e5eSAvi Kivity static void invalidate_and_set_dirty(hwaddr addr,
1896a8170e5eSAvi Kivity                                      hwaddr length)
189751d7a9ebSAnthony PERARD {
189851d7a9ebSAnthony PERARD     if (!cpu_physical_memory_is_dirty(addr)) {
189951d7a9ebSAnthony PERARD         /* invalidate code */
190051d7a9ebSAnthony PERARD         tb_invalidate_phys_page_range(addr, addr + length, 0);
190151d7a9ebSAnthony PERARD         /* set dirty bit */
190251d7a9ebSAnthony PERARD         cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
190351d7a9ebSAnthony PERARD     }
1904e226939dSAnthony PERARD     xen_modified_memory(addr, length);
190551d7a9ebSAnthony PERARD }
190651d7a9ebSAnthony PERARD 
19072bbfa05dSPaolo Bonzini static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
19082bbfa05dSPaolo Bonzini {
19092bbfa05dSPaolo Bonzini     if (memory_region_is_ram(mr)) {
19102bbfa05dSPaolo Bonzini         return !(is_write && mr->readonly);
19112bbfa05dSPaolo Bonzini     }
19122bbfa05dSPaolo Bonzini     if (memory_region_is_romd(mr)) {
19132bbfa05dSPaolo Bonzini         return !is_write;
19142bbfa05dSPaolo Bonzini     }
19152bbfa05dSPaolo Bonzini 
19162bbfa05dSPaolo Bonzini     return false;
19172bbfa05dSPaolo Bonzini }
19182bbfa05dSPaolo Bonzini 
1919f52cc467SJan Kiszka static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
192082f2563fSPaolo Bonzini {
1921f52cc467SJan Kiszka     if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
192282f2563fSPaolo Bonzini         return 4;
192382f2563fSPaolo Bonzini     }
1924f52cc467SJan Kiszka     if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
192582f2563fSPaolo Bonzini         return 2;
192682f2563fSPaolo Bonzini     }
192782f2563fSPaolo Bonzini     return 1;
192882f2563fSPaolo Bonzini }
192982f2563fSPaolo Bonzini 
1930fd8aaa76SPaolo Bonzini bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1931ac1970fbSAvi Kivity                       int len, bool is_write)
193213eb76e0Sbellard {
1933149f54b5SPaolo Bonzini     hwaddr l;
193413eb76e0Sbellard     uint8_t *ptr;
1935791af8c8SPaolo Bonzini     uint64_t val;
1936149f54b5SPaolo Bonzini     hwaddr addr1;
19375c8a00ceSPaolo Bonzini     MemoryRegion *mr;
1938fd8aaa76SPaolo Bonzini     bool error = false;
193913eb76e0Sbellard 
194013eb76e0Sbellard     while (len > 0) {
194113eb76e0Sbellard         l = len;
19425c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, is_write);
194313eb76e0Sbellard 
194413eb76e0Sbellard         if (is_write) {
19455c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
19465c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
19476a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
19486a00d601Sbellard                    potential bugs */
194982f2563fSPaolo Bonzini                 if (l == 4) {
19501c213d19Sbellard                     /* 32 bit write access */
1951c27004ecSbellard                     val = ldl_p(buf);
19525c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 4);
195382f2563fSPaolo Bonzini                 } else if (l == 2) {
19541c213d19Sbellard                     /* 16 bit write access */
1955c27004ecSbellard                     val = lduw_p(buf);
19565c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 2);
195713eb76e0Sbellard                 } else {
19581c213d19Sbellard                     /* 8 bit write access */
1959c27004ecSbellard                     val = ldub_p(buf);
19605c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 1);
196113eb76e0Sbellard                 }
19622bbfa05dSPaolo Bonzini             } else {
19635c8a00ceSPaolo Bonzini                 addr1 += memory_region_get_ram_addr(mr);
196413eb76e0Sbellard                 /* RAM case */
19655579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
196613eb76e0Sbellard                 memcpy(ptr, buf, l);
196751d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
19683a7d929eSbellard             }
196913eb76e0Sbellard         } else {
19705c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
197113eb76e0Sbellard                 /* I/O case */
19725c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
197382f2563fSPaolo Bonzini                 if (l == 4) {
197413eb76e0Sbellard                     /* 32 bit read access */
19755c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 4);
1976c27004ecSbellard                     stl_p(buf, val);
197782f2563fSPaolo Bonzini                 } else if (l == 2) {
197813eb76e0Sbellard                     /* 16 bit read access */
19795c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 2);
1980c27004ecSbellard                     stw_p(buf, val);
198113eb76e0Sbellard                 } else {
19821c213d19Sbellard                     /* 8 bit read access */
19835c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 1);
1984c27004ecSbellard                     stb_p(buf, val);
198513eb76e0Sbellard                 }
198613eb76e0Sbellard             } else {
198713eb76e0Sbellard                 /* RAM case */
19885c8a00ceSPaolo Bonzini                 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
1989f3705d53SAvi Kivity                 memcpy(buf, ptr, l);
199013eb76e0Sbellard             }
199113eb76e0Sbellard         }
199213eb76e0Sbellard         len -= l;
199313eb76e0Sbellard         buf += l;
199413eb76e0Sbellard         addr += l;
199513eb76e0Sbellard     }
1996fd8aaa76SPaolo Bonzini 
1997fd8aaa76SPaolo Bonzini     return error;
199813eb76e0Sbellard }
19998df1cd07Sbellard 
2000fd8aaa76SPaolo Bonzini bool address_space_write(AddressSpace *as, hwaddr addr,
2001ac1970fbSAvi Kivity                          const uint8_t *buf, int len)
2002ac1970fbSAvi Kivity {
2003fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2004ac1970fbSAvi Kivity }
2005ac1970fbSAvi Kivity 
2006fd8aaa76SPaolo Bonzini bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2007ac1970fbSAvi Kivity {
2008fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, buf, len, false);
2009ac1970fbSAvi Kivity }
2010ac1970fbSAvi Kivity 
2011ac1970fbSAvi Kivity 
2012a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2013ac1970fbSAvi Kivity                             int len, int is_write)
2014ac1970fbSAvi Kivity {
2015fd8aaa76SPaolo Bonzini     address_space_rw(&address_space_memory, addr, buf, len, is_write);
2016ac1970fbSAvi Kivity }
2017ac1970fbSAvi Kivity 
2018d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
2019a8170e5eSAvi Kivity void cpu_physical_memory_write_rom(hwaddr addr,
2020d0ecd2aaSbellard                                    const uint8_t *buf, int len)
2021d0ecd2aaSbellard {
2022149f54b5SPaolo Bonzini     hwaddr l;
2023d0ecd2aaSbellard     uint8_t *ptr;
2024149f54b5SPaolo Bonzini     hwaddr addr1;
20255c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2026d0ecd2aaSbellard 
2027d0ecd2aaSbellard     while (len > 0) {
2028d0ecd2aaSbellard         l = len;
20295c8a00ceSPaolo Bonzini         mr = address_space_translate(&address_space_memory,
2030149f54b5SPaolo Bonzini                                      addr, &addr1, &l, true);
2031d0ecd2aaSbellard 
20325c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
20335c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2034d0ecd2aaSbellard             /* do nothing */
2035d0ecd2aaSbellard         } else {
20365c8a00ceSPaolo Bonzini             addr1 += memory_region_get_ram_addr(mr);
2037d0ecd2aaSbellard             /* ROM/RAM case */
20385579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
2039d0ecd2aaSbellard             memcpy(ptr, buf, l);
204051d7a9ebSAnthony PERARD             invalidate_and_set_dirty(addr1, l);
2041d0ecd2aaSbellard         }
2042d0ecd2aaSbellard         len -= l;
2043d0ecd2aaSbellard         buf += l;
2044d0ecd2aaSbellard         addr += l;
2045d0ecd2aaSbellard     }
2046d0ecd2aaSbellard }
2047d0ecd2aaSbellard 
20486d16c2f8Saliguori typedef struct {
20496d16c2f8Saliguori     void *buffer;
2050a8170e5eSAvi Kivity     hwaddr addr;
2051a8170e5eSAvi Kivity     hwaddr len;
20526d16c2f8Saliguori } BounceBuffer;
20536d16c2f8Saliguori 
20546d16c2f8Saliguori static BounceBuffer bounce;
20556d16c2f8Saliguori 
2056ba223c29Saliguori typedef struct MapClient {
2057ba223c29Saliguori     void *opaque;
2058ba223c29Saliguori     void (*callback)(void *opaque);
205972cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2060ba223c29Saliguori } MapClient;
2061ba223c29Saliguori 
206272cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
206372cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2064ba223c29Saliguori 
2065ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2066ba223c29Saliguori {
20677267c094SAnthony Liguori     MapClient *client = g_malloc(sizeof(*client));
2068ba223c29Saliguori 
2069ba223c29Saliguori     client->opaque = opaque;
2070ba223c29Saliguori     client->callback = callback;
207172cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
2072ba223c29Saliguori     return client;
2073ba223c29Saliguori }
2074ba223c29Saliguori 
20758b9c99d9SBlue Swirl static void cpu_unregister_map_client(void *_client)
2076ba223c29Saliguori {
2077ba223c29Saliguori     MapClient *client = (MapClient *)_client;
2078ba223c29Saliguori 
207972cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
20807267c094SAnthony Liguori     g_free(client);
2081ba223c29Saliguori }
2082ba223c29Saliguori 
2083ba223c29Saliguori static void cpu_notify_map_clients(void)
2084ba223c29Saliguori {
2085ba223c29Saliguori     MapClient *client;
2086ba223c29Saliguori 
208772cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
208872cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2089ba223c29Saliguori         client->callback(client->opaque);
209034d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
2091ba223c29Saliguori     }
2092ba223c29Saliguori }
2093ba223c29Saliguori 
209451644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
209551644ab7SPaolo Bonzini {
20965c8a00ceSPaolo Bonzini     MemoryRegion *mr;
209751644ab7SPaolo Bonzini     hwaddr l, xlat;
209851644ab7SPaolo Bonzini 
209951644ab7SPaolo Bonzini     while (len > 0) {
210051644ab7SPaolo Bonzini         l = len;
21015c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
21025c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
21035c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
21045c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
210551644ab7SPaolo Bonzini                 return false;
210651644ab7SPaolo Bonzini             }
210751644ab7SPaolo Bonzini         }
210851644ab7SPaolo Bonzini 
210951644ab7SPaolo Bonzini         len -= l;
211051644ab7SPaolo Bonzini         addr += l;
211151644ab7SPaolo Bonzini     }
211251644ab7SPaolo Bonzini     return true;
211351644ab7SPaolo Bonzini }
211451644ab7SPaolo Bonzini 
21156d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
21166d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
21176d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
21186d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2119ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2120ba223c29Saliguori  * likely to succeed.
21216d16c2f8Saliguori  */
2122ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2123a8170e5eSAvi Kivity                         hwaddr addr,
2124a8170e5eSAvi Kivity                         hwaddr *plen,
2125ac1970fbSAvi Kivity                         bool is_write)
21266d16c2f8Saliguori {
2127a8170e5eSAvi Kivity     hwaddr len = *plen;
2128a8170e5eSAvi Kivity     hwaddr todo = 0;
2129149f54b5SPaolo Bonzini     hwaddr l, xlat;
21305c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2131f15fbc4bSAnthony PERARD     ram_addr_t raddr = RAM_ADDR_MAX;
21328ab934f9SStefano Stabellini     ram_addr_t rlen;
21338ab934f9SStefano Stabellini     void *ret;
21346d16c2f8Saliguori 
21356d16c2f8Saliguori     while (len > 0) {
21366d16c2f8Saliguori         l = len;
21375c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
21386d16c2f8Saliguori 
21395c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
214038bee5dcSStefano Stabellini             if (todo || bounce.buffer) {
21416d16c2f8Saliguori                 break;
21426d16c2f8Saliguori             }
21436d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
21446d16c2f8Saliguori             bounce.addr = addr;
21456d16c2f8Saliguori             bounce.len = l;
21466d16c2f8Saliguori             if (!is_write) {
2147ac1970fbSAvi Kivity                 address_space_read(as, addr, bounce.buffer, l);
21486d16c2f8Saliguori             }
214938bee5dcSStefano Stabellini 
215038bee5dcSStefano Stabellini             *plen = l;
215138bee5dcSStefano Stabellini             return bounce.buffer;
21526d16c2f8Saliguori         }
21538ab934f9SStefano Stabellini         if (!todo) {
21545c8a00ceSPaolo Bonzini             raddr = memory_region_get_ram_addr(mr) + xlat;
2155149f54b5SPaolo Bonzini         } else {
21565c8a00ceSPaolo Bonzini             if (memory_region_get_ram_addr(mr) + xlat != raddr + todo) {
2157149f54b5SPaolo Bonzini                 break;
2158149f54b5SPaolo Bonzini             }
21598ab934f9SStefano Stabellini         }
21606d16c2f8Saliguori 
21616d16c2f8Saliguori         len -= l;
21626d16c2f8Saliguori         addr += l;
216338bee5dcSStefano Stabellini         todo += l;
21646d16c2f8Saliguori     }
21658ab934f9SStefano Stabellini     rlen = todo;
21668ab934f9SStefano Stabellini     ret = qemu_ram_ptr_length(raddr, &rlen);
21678ab934f9SStefano Stabellini     *plen = rlen;
21688ab934f9SStefano Stabellini     return ret;
21696d16c2f8Saliguori }
21706d16c2f8Saliguori 
2171ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
21726d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
21736d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
21746d16c2f8Saliguori  */
2175a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2176a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
21776d16c2f8Saliguori {
21786d16c2f8Saliguori     if (buffer != bounce.buffer) {
21796d16c2f8Saliguori         if (is_write) {
2180e890261fSMarcelo Tosatti             ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
21816d16c2f8Saliguori             while (access_len) {
21826d16c2f8Saliguori                 unsigned l;
21836d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
21846d16c2f8Saliguori                 if (l > access_len)
21856d16c2f8Saliguori                     l = access_len;
218651d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
21876d16c2f8Saliguori                 addr1 += l;
21886d16c2f8Saliguori                 access_len -= l;
21896d16c2f8Saliguori             }
21906d16c2f8Saliguori         }
2191868bb33fSJan Kiszka         if (xen_enabled()) {
2192e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
2193050a0ddfSAnthony PERARD         }
21946d16c2f8Saliguori         return;
21956d16c2f8Saliguori     }
21966d16c2f8Saliguori     if (is_write) {
2197ac1970fbSAvi Kivity         address_space_write(as, bounce.addr, bounce.buffer, access_len);
21986d16c2f8Saliguori     }
2199f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
22006d16c2f8Saliguori     bounce.buffer = NULL;
2201ba223c29Saliguori     cpu_notify_map_clients();
22026d16c2f8Saliguori }
2203d0ecd2aaSbellard 
2204a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
2205a8170e5eSAvi Kivity                               hwaddr *plen,
2206ac1970fbSAvi Kivity                               int is_write)
2207ac1970fbSAvi Kivity {
2208ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
2209ac1970fbSAvi Kivity }
2210ac1970fbSAvi Kivity 
2211a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2212a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
2213ac1970fbSAvi Kivity {
2214ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2215ac1970fbSAvi Kivity }
2216ac1970fbSAvi Kivity 
22178df1cd07Sbellard /* warning: addr must be aligned */
2218a8170e5eSAvi Kivity static inline uint32_t ldl_phys_internal(hwaddr addr,
22191e78bcc1SAlexander Graf                                          enum device_endian endian)
22208df1cd07Sbellard {
22218df1cd07Sbellard     uint8_t *ptr;
2222791af8c8SPaolo Bonzini     uint64_t val;
22235c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2224149f54b5SPaolo Bonzini     hwaddr l = 4;
2225149f54b5SPaolo Bonzini     hwaddr addr1;
22268df1cd07Sbellard 
22275c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2228149f54b5SPaolo Bonzini                                  false);
22295c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
22308df1cd07Sbellard         /* I/O case */
22315c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 4);
22321e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
22331e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
22341e78bcc1SAlexander Graf             val = bswap32(val);
22351e78bcc1SAlexander Graf         }
22361e78bcc1SAlexander Graf #else
22371e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
22381e78bcc1SAlexander Graf             val = bswap32(val);
22391e78bcc1SAlexander Graf         }
22401e78bcc1SAlexander Graf #endif
22418df1cd07Sbellard     } else {
22428df1cd07Sbellard         /* RAM case */
22435c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
224406ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2245149f54b5SPaolo Bonzini                                + addr1);
22461e78bcc1SAlexander Graf         switch (endian) {
22471e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
22481e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
22491e78bcc1SAlexander Graf             break;
22501e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
22511e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
22521e78bcc1SAlexander Graf             break;
22531e78bcc1SAlexander Graf         default:
22548df1cd07Sbellard             val = ldl_p(ptr);
22551e78bcc1SAlexander Graf             break;
22561e78bcc1SAlexander Graf         }
22578df1cd07Sbellard     }
22588df1cd07Sbellard     return val;
22598df1cd07Sbellard }
22608df1cd07Sbellard 
2261a8170e5eSAvi Kivity uint32_t ldl_phys(hwaddr addr)
22621e78bcc1SAlexander Graf {
22631e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
22641e78bcc1SAlexander Graf }
22651e78bcc1SAlexander Graf 
2266a8170e5eSAvi Kivity uint32_t ldl_le_phys(hwaddr addr)
22671e78bcc1SAlexander Graf {
22681e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
22691e78bcc1SAlexander Graf }
22701e78bcc1SAlexander Graf 
2271a8170e5eSAvi Kivity uint32_t ldl_be_phys(hwaddr addr)
22721e78bcc1SAlexander Graf {
22731e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
22741e78bcc1SAlexander Graf }
22751e78bcc1SAlexander Graf 
227684b7b8e7Sbellard /* warning: addr must be aligned */
2277a8170e5eSAvi Kivity static inline uint64_t ldq_phys_internal(hwaddr addr,
22781e78bcc1SAlexander Graf                                          enum device_endian endian)
227984b7b8e7Sbellard {
228084b7b8e7Sbellard     uint8_t *ptr;
228184b7b8e7Sbellard     uint64_t val;
22825c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2283149f54b5SPaolo Bonzini     hwaddr l = 8;
2284149f54b5SPaolo Bonzini     hwaddr addr1;
228584b7b8e7Sbellard 
22865c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2287149f54b5SPaolo Bonzini                                  false);
22885c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
228984b7b8e7Sbellard         /* I/O case */
22905c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 8);
2291968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
2292968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
2293968a5627SPaolo Bonzini             val = bswap64(val);
2294968a5627SPaolo Bonzini         }
2295968a5627SPaolo Bonzini #else
2296968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
2297968a5627SPaolo Bonzini             val = bswap64(val);
2298968a5627SPaolo Bonzini         }
2299968a5627SPaolo Bonzini #endif
230084b7b8e7Sbellard     } else {
230184b7b8e7Sbellard         /* RAM case */
23025c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
230306ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2304149f54b5SPaolo Bonzini                                + addr1);
23051e78bcc1SAlexander Graf         switch (endian) {
23061e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
23071e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
23081e78bcc1SAlexander Graf             break;
23091e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
23101e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
23111e78bcc1SAlexander Graf             break;
23121e78bcc1SAlexander Graf         default:
231384b7b8e7Sbellard             val = ldq_p(ptr);
23141e78bcc1SAlexander Graf             break;
23151e78bcc1SAlexander Graf         }
231684b7b8e7Sbellard     }
231784b7b8e7Sbellard     return val;
231884b7b8e7Sbellard }
231984b7b8e7Sbellard 
2320a8170e5eSAvi Kivity uint64_t ldq_phys(hwaddr addr)
23211e78bcc1SAlexander Graf {
23221e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
23231e78bcc1SAlexander Graf }
23241e78bcc1SAlexander Graf 
2325a8170e5eSAvi Kivity uint64_t ldq_le_phys(hwaddr addr)
23261e78bcc1SAlexander Graf {
23271e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
23281e78bcc1SAlexander Graf }
23291e78bcc1SAlexander Graf 
2330a8170e5eSAvi Kivity uint64_t ldq_be_phys(hwaddr addr)
23311e78bcc1SAlexander Graf {
23321e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
23331e78bcc1SAlexander Graf }
23341e78bcc1SAlexander Graf 
2335aab33094Sbellard /* XXX: optimize */
2336a8170e5eSAvi Kivity uint32_t ldub_phys(hwaddr addr)
2337aab33094Sbellard {
2338aab33094Sbellard     uint8_t val;
2339aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2340aab33094Sbellard     return val;
2341aab33094Sbellard }
2342aab33094Sbellard 
2343733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
2344a8170e5eSAvi Kivity static inline uint32_t lduw_phys_internal(hwaddr addr,
23451e78bcc1SAlexander Graf                                           enum device_endian endian)
2346aab33094Sbellard {
2347733f0b02SMichael S. Tsirkin     uint8_t *ptr;
2348733f0b02SMichael S. Tsirkin     uint64_t val;
23495c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2350149f54b5SPaolo Bonzini     hwaddr l = 2;
2351149f54b5SPaolo Bonzini     hwaddr addr1;
2352733f0b02SMichael S. Tsirkin 
23535c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2354149f54b5SPaolo Bonzini                                  false);
23555c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
2356733f0b02SMichael S. Tsirkin         /* I/O case */
23575c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 2);
23581e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
23591e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
23601e78bcc1SAlexander Graf             val = bswap16(val);
23611e78bcc1SAlexander Graf         }
23621e78bcc1SAlexander Graf #else
23631e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
23641e78bcc1SAlexander Graf             val = bswap16(val);
23651e78bcc1SAlexander Graf         }
23661e78bcc1SAlexander Graf #endif
2367733f0b02SMichael S. Tsirkin     } else {
2368733f0b02SMichael S. Tsirkin         /* RAM case */
23695c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
237006ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2371149f54b5SPaolo Bonzini                                + addr1);
23721e78bcc1SAlexander Graf         switch (endian) {
23731e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
23741e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
23751e78bcc1SAlexander Graf             break;
23761e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
23771e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
23781e78bcc1SAlexander Graf             break;
23791e78bcc1SAlexander Graf         default:
2380733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
23811e78bcc1SAlexander Graf             break;
23821e78bcc1SAlexander Graf         }
2383733f0b02SMichael S. Tsirkin     }
2384733f0b02SMichael S. Tsirkin     return val;
2385aab33094Sbellard }
2386aab33094Sbellard 
2387a8170e5eSAvi Kivity uint32_t lduw_phys(hwaddr addr)
23881e78bcc1SAlexander Graf {
23891e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
23901e78bcc1SAlexander Graf }
23911e78bcc1SAlexander Graf 
2392a8170e5eSAvi Kivity uint32_t lduw_le_phys(hwaddr addr)
23931e78bcc1SAlexander Graf {
23941e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
23951e78bcc1SAlexander Graf }
23961e78bcc1SAlexander Graf 
2397a8170e5eSAvi Kivity uint32_t lduw_be_phys(hwaddr addr)
23981e78bcc1SAlexander Graf {
23991e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
24001e78bcc1SAlexander Graf }
24011e78bcc1SAlexander Graf 
24028df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
24038df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
24048df1cd07Sbellard    bits are used to track modified PTEs */
2405a8170e5eSAvi Kivity void stl_phys_notdirty(hwaddr addr, uint32_t val)
24068df1cd07Sbellard {
24078df1cd07Sbellard     uint8_t *ptr;
24085c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2409149f54b5SPaolo Bonzini     hwaddr l = 4;
2410149f54b5SPaolo Bonzini     hwaddr addr1;
24118df1cd07Sbellard 
24125c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2413149f54b5SPaolo Bonzini                                  true);
24145c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
24155c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
24168df1cd07Sbellard     } else {
24175c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
24185579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
24198df1cd07Sbellard         stl_p(ptr, val);
242074576198Saliguori 
242174576198Saliguori         if (unlikely(in_migration)) {
242274576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
242374576198Saliguori                 /* invalidate code */
242474576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
242574576198Saliguori                 /* set dirty bit */
2426f7c11b53SYoshiaki Tamura                 cpu_physical_memory_set_dirty_flags(
2427f7c11b53SYoshiaki Tamura                     addr1, (0xff & ~CODE_DIRTY_FLAG));
242874576198Saliguori             }
242974576198Saliguori         }
24308df1cd07Sbellard     }
24318df1cd07Sbellard }
24328df1cd07Sbellard 
24338df1cd07Sbellard /* warning: addr must be aligned */
2434a8170e5eSAvi Kivity static inline void stl_phys_internal(hwaddr addr, uint32_t val,
24351e78bcc1SAlexander Graf                                      enum device_endian endian)
24368df1cd07Sbellard {
24378df1cd07Sbellard     uint8_t *ptr;
24385c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2439149f54b5SPaolo Bonzini     hwaddr l = 4;
2440149f54b5SPaolo Bonzini     hwaddr addr1;
24418df1cd07Sbellard 
24425c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2443149f54b5SPaolo Bonzini                                  true);
24445c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
24451e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
24461e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
24471e78bcc1SAlexander Graf             val = bswap32(val);
24481e78bcc1SAlexander Graf         }
24491e78bcc1SAlexander Graf #else
24501e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
24511e78bcc1SAlexander Graf             val = bswap32(val);
24521e78bcc1SAlexander Graf         }
24531e78bcc1SAlexander Graf #endif
24545c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
24558df1cd07Sbellard     } else {
24568df1cd07Sbellard         /* RAM case */
24575c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
24585579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
24591e78bcc1SAlexander Graf         switch (endian) {
24601e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
24611e78bcc1SAlexander Graf             stl_le_p(ptr, val);
24621e78bcc1SAlexander Graf             break;
24631e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
24641e78bcc1SAlexander Graf             stl_be_p(ptr, val);
24651e78bcc1SAlexander Graf             break;
24661e78bcc1SAlexander Graf         default:
24678df1cd07Sbellard             stl_p(ptr, val);
24681e78bcc1SAlexander Graf             break;
24691e78bcc1SAlexander Graf         }
247051d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 4);
24718df1cd07Sbellard     }
24723a7d929eSbellard }
24738df1cd07Sbellard 
2474a8170e5eSAvi Kivity void stl_phys(hwaddr addr, uint32_t val)
24751e78bcc1SAlexander Graf {
24761e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
24771e78bcc1SAlexander Graf }
24781e78bcc1SAlexander Graf 
2479a8170e5eSAvi Kivity void stl_le_phys(hwaddr addr, uint32_t val)
24801e78bcc1SAlexander Graf {
24811e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
24821e78bcc1SAlexander Graf }
24831e78bcc1SAlexander Graf 
2484a8170e5eSAvi Kivity void stl_be_phys(hwaddr addr, uint32_t val)
24851e78bcc1SAlexander Graf {
24861e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
24871e78bcc1SAlexander Graf }
24881e78bcc1SAlexander Graf 
2489aab33094Sbellard /* XXX: optimize */
2490a8170e5eSAvi Kivity void stb_phys(hwaddr addr, uint32_t val)
2491aab33094Sbellard {
2492aab33094Sbellard     uint8_t v = val;
2493aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2494aab33094Sbellard }
2495aab33094Sbellard 
2496733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
2497a8170e5eSAvi Kivity static inline void stw_phys_internal(hwaddr addr, uint32_t val,
24981e78bcc1SAlexander Graf                                      enum device_endian endian)
2499aab33094Sbellard {
2500733f0b02SMichael S. Tsirkin     uint8_t *ptr;
25015c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2502149f54b5SPaolo Bonzini     hwaddr l = 2;
2503149f54b5SPaolo Bonzini     hwaddr addr1;
2504733f0b02SMichael S. Tsirkin 
25055c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2506149f54b5SPaolo Bonzini                                  true);
25075c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
25081e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
25091e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
25101e78bcc1SAlexander Graf             val = bswap16(val);
25111e78bcc1SAlexander Graf         }
25121e78bcc1SAlexander Graf #else
25131e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
25141e78bcc1SAlexander Graf             val = bswap16(val);
25151e78bcc1SAlexander Graf         }
25161e78bcc1SAlexander Graf #endif
25175c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 2);
2518733f0b02SMichael S. Tsirkin     } else {
2519733f0b02SMichael S. Tsirkin         /* RAM case */
25205c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2521733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
25221e78bcc1SAlexander Graf         switch (endian) {
25231e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
25241e78bcc1SAlexander Graf             stw_le_p(ptr, val);
25251e78bcc1SAlexander Graf             break;
25261e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
25271e78bcc1SAlexander Graf             stw_be_p(ptr, val);
25281e78bcc1SAlexander Graf             break;
25291e78bcc1SAlexander Graf         default:
2530733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
25311e78bcc1SAlexander Graf             break;
25321e78bcc1SAlexander Graf         }
253351d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 2);
2534733f0b02SMichael S. Tsirkin     }
2535aab33094Sbellard }
2536aab33094Sbellard 
2537a8170e5eSAvi Kivity void stw_phys(hwaddr addr, uint32_t val)
25381e78bcc1SAlexander Graf {
25391e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
25401e78bcc1SAlexander Graf }
25411e78bcc1SAlexander Graf 
2542a8170e5eSAvi Kivity void stw_le_phys(hwaddr addr, uint32_t val)
25431e78bcc1SAlexander Graf {
25441e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
25451e78bcc1SAlexander Graf }
25461e78bcc1SAlexander Graf 
2547a8170e5eSAvi Kivity void stw_be_phys(hwaddr addr, uint32_t val)
25481e78bcc1SAlexander Graf {
25491e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
25501e78bcc1SAlexander Graf }
25511e78bcc1SAlexander Graf 
2552aab33094Sbellard /* XXX: optimize */
2553a8170e5eSAvi Kivity void stq_phys(hwaddr addr, uint64_t val)
2554aab33094Sbellard {
2555aab33094Sbellard     val = tswap64(val);
255671d2b725SStefan Weil     cpu_physical_memory_write(addr, &val, 8);
2557aab33094Sbellard }
2558aab33094Sbellard 
2559a8170e5eSAvi Kivity void stq_le_phys(hwaddr addr, uint64_t val)
25601e78bcc1SAlexander Graf {
25611e78bcc1SAlexander Graf     val = cpu_to_le64(val);
25621e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
25631e78bcc1SAlexander Graf }
25641e78bcc1SAlexander Graf 
2565a8170e5eSAvi Kivity void stq_be_phys(hwaddr addr, uint64_t val)
25661e78bcc1SAlexander Graf {
25671e78bcc1SAlexander Graf     val = cpu_to_be64(val);
25681e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
25691e78bcc1SAlexander Graf }
25701e78bcc1SAlexander Graf 
25715e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
25729349b4f9SAndreas Färber int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
2573b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
257413eb76e0Sbellard {
257513eb76e0Sbellard     int l;
2576a8170e5eSAvi Kivity     hwaddr phys_addr;
25779b3c35e0Sj_mayer     target_ulong page;
257813eb76e0Sbellard 
257913eb76e0Sbellard     while (len > 0) {
258013eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
258113eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
258213eb76e0Sbellard         /* if no physical page mapped, return an error */
258313eb76e0Sbellard         if (phys_addr == -1)
258413eb76e0Sbellard             return -1;
258513eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
258613eb76e0Sbellard         if (l > len)
258713eb76e0Sbellard             l = len;
25885e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
25895e2972fdSaliguori         if (is_write)
25905e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
25915e2972fdSaliguori         else
25925e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
259313eb76e0Sbellard         len -= l;
259413eb76e0Sbellard         buf += l;
259513eb76e0Sbellard         addr += l;
259613eb76e0Sbellard     }
259713eb76e0Sbellard     return 0;
259813eb76e0Sbellard }
2599a68fe89cSPaul Brook #endif
260013eb76e0Sbellard 
26018e4a424bSBlue Swirl #if !defined(CONFIG_USER_ONLY)
26028e4a424bSBlue Swirl 
26038e4a424bSBlue Swirl /*
26048e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
26058e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
26068e4a424bSBlue Swirl  */
26078e4a424bSBlue Swirl bool virtio_is_big_endian(void);
26088e4a424bSBlue Swirl bool virtio_is_big_endian(void)
26098e4a424bSBlue Swirl {
26108e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
26118e4a424bSBlue Swirl     return true;
26128e4a424bSBlue Swirl #else
26138e4a424bSBlue Swirl     return false;
26148e4a424bSBlue Swirl #endif
26158e4a424bSBlue Swirl }
26168e4a424bSBlue Swirl 
26178e4a424bSBlue Swirl #endif
26188e4a424bSBlue Swirl 
261976f35538SWen Congyang #ifndef CONFIG_USER_ONLY
2620a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
262176f35538SWen Congyang {
26225c8a00ceSPaolo Bonzini     MemoryRegion*mr;
2623149f54b5SPaolo Bonzini     hwaddr l = 1;
262476f35538SWen Congyang 
26255c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
2626149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
262776f35538SWen Congyang 
26285c8a00ceSPaolo Bonzini     return !(memory_region_is_ram(mr) ||
26295c8a00ceSPaolo Bonzini              memory_region_is_romd(mr));
263076f35538SWen Congyang }
2631bd2fa51fSMichael R. Hines 
2632bd2fa51fSMichael R. Hines void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2633bd2fa51fSMichael R. Hines {
2634bd2fa51fSMichael R. Hines     RAMBlock *block;
2635bd2fa51fSMichael R. Hines 
2636bd2fa51fSMichael R. Hines     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2637bd2fa51fSMichael R. Hines         func(block->host, block->offset, block->length, opaque);
2638bd2fa51fSMichael R. Hines     }
2639bd2fa51fSMichael R. Hines }
2640ec3f8c99SPeter Maydell #endif
2641