xref: /qemu/system/physmem.c (revision ace694cccccf343852d9f0b34171ad475e248bbf)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard 
27055403b2SStefan Weil #include "qemu-common.h"
286180a181Sbellard #include "cpu.h"
29b67d9a52Sbellard #include "tcg.h"
30b3c7724cSpbrook #include "hw/hw.h"
31cc9e98cbSAlex Williamson #include "hw/qdev.h"
321de7afc9SPaolo Bonzini #include "qemu/osdep.h"
339c17d615SPaolo Bonzini #include "sysemu/kvm.h"
342ff3de68SMarkus Armbruster #include "sysemu/sysemu.h"
350d09e41aSPaolo Bonzini #include "hw/xen/xen.h"
361de7afc9SPaolo Bonzini #include "qemu/timer.h"
371de7afc9SPaolo Bonzini #include "qemu/config-file.h"
38022c62cbSPaolo Bonzini #include "exec/memory.h"
399c17d615SPaolo Bonzini #include "sysemu/dma.h"
40022c62cbSPaolo Bonzini #include "exec/address-spaces.h"
4153a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4253a5960aSpbrook #include <qemu.h>
43432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
449c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
456506e4f9SStefano Stabellini #include "trace.h"
4653a5960aSpbrook #endif
470d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
4854936004Sbellard 
49022c62cbSPaolo Bonzini #include "exec/cputlb.h"
505b6dd868SBlue Swirl #include "translate-all.h"
510cac1b66SBlue Swirl 
52022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
53582b55a9SAlexander Graf #include "qemu/cache-utils.h"
5467d95c15SAvi Kivity 
55b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
56b35ba30fSMichael S. Tsirkin 
57db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
581196be37Sths 
5999773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
6074576198Saliguori static int in_migration;
6194a6b54fSpbrook 
62a3161038SPaolo Bonzini RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
6362152b8aSAvi Kivity 
6462152b8aSAvi Kivity static MemoryRegion *system_memory;
65309cb471SAvi Kivity static MemoryRegion *system_io;
6662152b8aSAvi Kivity 
67f6790af6SAvi Kivity AddressSpace address_space_io;
68f6790af6SAvi Kivity AddressSpace address_space_memory;
692673a5daSAvi Kivity 
700844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
71acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
720e0df1e2SAvi Kivity 
73e2eef170Spbrook #endif
749fa3e853Sbellard 
75bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
766a00d601Sbellard /* current CPU in the current thread. It is only valid inside
776a00d601Sbellard    cpu_exec() */
784917cf44SAndreas Färber DEFINE_TLS(CPUState *, current_cpu);
792e70f6efSpbrook /* 0 = Do not count executed instructions.
80bf20dc07Sths    1 = Precise instruction counting.
812e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
825708fc66SPaolo Bonzini int use_icount;
836a00d601Sbellard 
84e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
854346ae3eSAvi Kivity 
861db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
871db8abb1SPaolo Bonzini 
881db8abb1SPaolo Bonzini struct PhysPageEntry {
899736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
908b795765SMichael S. Tsirkin     uint32_t skip : 6;
919736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
928b795765SMichael S. Tsirkin     uint32_t ptr : 26;
931db8abb1SPaolo Bonzini };
941db8abb1SPaolo Bonzini 
958b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
968b795765SMichael S. Tsirkin 
9703f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
9857271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
9903f49957SPaolo Bonzini 
100026736ceSMichael S. Tsirkin #define P_L2_BITS 9
10103f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
10203f49957SPaolo Bonzini 
10303f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
10403f49957SPaolo Bonzini 
10503f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1060475d94fSPaolo Bonzini 
10753cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
10853cb28cbSMarcel Apfelbaum     unsigned sections_nb;
10953cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
11053cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
11153cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
11253cb28cbSMarcel Apfelbaum     Node *nodes;
11353cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
11453cb28cbSMarcel Apfelbaum } PhysPageMap;
11553cb28cbSMarcel Apfelbaum 
1161db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
1171db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1181db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1191db8abb1SPaolo Bonzini      */
1201db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
12153cb28cbSMarcel Apfelbaum     PhysPageMap map;
122acc9d80bSJan Kiszka     AddressSpace *as;
1231db8abb1SPaolo Bonzini };
1241db8abb1SPaolo Bonzini 
12590260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
12690260c6cSJan Kiszka typedef struct subpage_t {
12790260c6cSJan Kiszka     MemoryRegion iomem;
128acc9d80bSJan Kiszka     AddressSpace *as;
12990260c6cSJan Kiszka     hwaddr base;
13090260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
13190260c6cSJan Kiszka } subpage_t;
13290260c6cSJan Kiszka 
133b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
134b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1
135b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2
136b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3
1375312bd8bSAvi Kivity 
138e2eef170Spbrook static void io_mem_init(void);
13962152b8aSAvi Kivity static void memory_map_init(void);
140e2eef170Spbrook 
1411ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
1426658ffb8Spbrook #endif
14354936004Sbellard 
1446d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
145d6f2ea22SAvi Kivity 
14653cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
147f7bf5461SAvi Kivity {
14853cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
14953cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
15053cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
15153cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
152f7bf5461SAvi Kivity     }
153f7bf5461SAvi Kivity }
154f7bf5461SAvi Kivity 
15553cb28cbSMarcel Apfelbaum static uint32_t phys_map_node_alloc(PhysPageMap *map)
156d6f2ea22SAvi Kivity {
157d6f2ea22SAvi Kivity     unsigned i;
1588b795765SMichael S. Tsirkin     uint32_t ret;
159d6f2ea22SAvi Kivity 
16053cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
161d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
16253cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
16303f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
16453cb28cbSMarcel Apfelbaum         map->nodes[ret][i].skip = 1;
16553cb28cbSMarcel Apfelbaum         map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
166d6f2ea22SAvi Kivity     }
167f7bf5461SAvi Kivity     return ret;
168d6f2ea22SAvi Kivity }
169d6f2ea22SAvi Kivity 
17053cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
17153cb28cbSMarcel Apfelbaum                                 hwaddr *index, hwaddr *nb, uint16_t leaf,
1722999097bSAvi Kivity                                 int level)
17392e873b9Sbellard {
174f7bf5461SAvi Kivity     PhysPageEntry *p;
175f7bf5461SAvi Kivity     int i;
17603f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
1775cd2c5b6SRichard Henderson 
1789736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
17953cb28cbSMarcel Apfelbaum         lp->ptr = phys_map_node_alloc(map);
18053cb28cbSMarcel Apfelbaum         p = map->nodes[lp->ptr];
181f7bf5461SAvi Kivity         if (level == 0) {
18203f49957SPaolo Bonzini             for (i = 0; i < P_L2_SIZE; i++) {
1839736e55bSMichael S. Tsirkin                 p[i].skip = 0;
184b41aac4fSLiu Ping Fan                 p[i].ptr = PHYS_SECTION_UNASSIGNED;
18567c4d23cSpbrook             }
18692e873b9Sbellard         }
187d6f2ea22SAvi Kivity     } else {
18853cb28cbSMarcel Apfelbaum         p = map->nodes[lp->ptr];
1894346ae3eSAvi Kivity     }
19003f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
191f7bf5461SAvi Kivity 
19203f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
19307f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
1949736e55bSMichael S. Tsirkin             lp->skip = 0;
195c19e8800SAvi Kivity             lp->ptr = leaf;
19607f07b31SAvi Kivity             *index += step;
19707f07b31SAvi Kivity             *nb -= step;
198f7bf5461SAvi Kivity         } else {
19953cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2002999097bSAvi Kivity         }
2012999097bSAvi Kivity         ++lp;
202f7bf5461SAvi Kivity     }
2034346ae3eSAvi Kivity }
2045cd2c5b6SRichard Henderson 
205ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
206a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2072999097bSAvi Kivity                           uint16_t leaf)
208f7bf5461SAvi Kivity {
2092999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
21053cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
211f7bf5461SAvi Kivity 
21253cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
21392e873b9Sbellard }
21492e873b9Sbellard 
215b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
216b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
217b35ba30fSMichael S. Tsirkin  */
218b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
219b35ba30fSMichael S. Tsirkin {
220b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
221b35ba30fSMichael S. Tsirkin     int valid = 0;
222b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
223b35ba30fSMichael S. Tsirkin     int i;
224b35ba30fSMichael S. Tsirkin 
225b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
226b35ba30fSMichael S. Tsirkin         return;
227b35ba30fSMichael S. Tsirkin     }
228b35ba30fSMichael S. Tsirkin 
229b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
230b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
231b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
232b35ba30fSMichael S. Tsirkin             continue;
233b35ba30fSMichael S. Tsirkin         }
234b35ba30fSMichael S. Tsirkin 
235b35ba30fSMichael S. Tsirkin         valid_ptr = i;
236b35ba30fSMichael S. Tsirkin         valid++;
237b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
238b35ba30fSMichael S. Tsirkin             phys_page_compact(&p[i], nodes, compacted);
239b35ba30fSMichael S. Tsirkin         }
240b35ba30fSMichael S. Tsirkin     }
241b35ba30fSMichael S. Tsirkin 
242b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
243b35ba30fSMichael S. Tsirkin     if (valid != 1) {
244b35ba30fSMichael S. Tsirkin         return;
245b35ba30fSMichael S. Tsirkin     }
246b35ba30fSMichael S. Tsirkin 
247b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
248b35ba30fSMichael S. Tsirkin 
249b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
250b35ba30fSMichael S. Tsirkin     if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
251b35ba30fSMichael S. Tsirkin         return;
252b35ba30fSMichael S. Tsirkin     }
253b35ba30fSMichael S. Tsirkin 
254b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
255b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
256b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
257b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
258b35ba30fSMichael S. Tsirkin          * should never reach here.
259b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
260b35ba30fSMichael S. Tsirkin          * change this rule.
261b35ba30fSMichael S. Tsirkin          */
262b35ba30fSMichael S. Tsirkin         lp->skip = 0;
263b35ba30fSMichael S. Tsirkin     } else {
264b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
265b35ba30fSMichael S. Tsirkin     }
266b35ba30fSMichael S. Tsirkin }
267b35ba30fSMichael S. Tsirkin 
268b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
269b35ba30fSMichael S. Tsirkin {
270b35ba30fSMichael S. Tsirkin     DECLARE_BITMAP(compacted, nodes_nb);
271b35ba30fSMichael S. Tsirkin 
272b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
27353cb28cbSMarcel Apfelbaum         phys_page_compact(&d->phys_map, d->map.nodes, compacted);
274b35ba30fSMichael S. Tsirkin     }
275b35ba30fSMichael S. Tsirkin }
276b35ba30fSMichael S. Tsirkin 
27797115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
2789affd6fcSPaolo Bonzini                                            Node *nodes, MemoryRegionSection *sections)
27992e873b9Sbellard {
28031ab2b4aSAvi Kivity     PhysPageEntry *p;
28197115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
28231ab2b4aSAvi Kivity     int i;
283f1f6e3b8SAvi Kivity 
2849736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
285c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
2869affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
287f1f6e3b8SAvi Kivity         }
2889affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
28903f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
29031ab2b4aSAvi Kivity     }
291b35ba30fSMichael S. Tsirkin 
292b35ba30fSMichael S. Tsirkin     if (sections[lp.ptr].size.hi ||
293b35ba30fSMichael S. Tsirkin         range_covers_byte(sections[lp.ptr].offset_within_address_space,
294b35ba30fSMichael S. Tsirkin                           sections[lp.ptr].size.lo, addr)) {
2959affd6fcSPaolo Bonzini         return &sections[lp.ptr];
296b35ba30fSMichael S. Tsirkin     } else {
297b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
298b35ba30fSMichael S. Tsirkin     }
299f3705d53SAvi Kivity }
300f3705d53SAvi Kivity 
301e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
302e5548617SBlue Swirl {
3032a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
304e5548617SBlue Swirl         && mr != &io_mem_watch;
305e5548617SBlue Swirl }
306149f54b5SPaolo Bonzini 
307c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
30890260c6cSJan Kiszka                                                         hwaddr addr,
30990260c6cSJan Kiszka                                                         bool resolve_subpage)
3109f029603SJan Kiszka {
31190260c6cSJan Kiszka     MemoryRegionSection *section;
31290260c6cSJan Kiszka     subpage_t *subpage;
31390260c6cSJan Kiszka 
31453cb28cbSMarcel Apfelbaum     section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
31590260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
31690260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
31753cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
31890260c6cSJan Kiszka     }
31990260c6cSJan Kiszka     return section;
3209f029603SJan Kiszka }
3219f029603SJan Kiszka 
32290260c6cSJan Kiszka static MemoryRegionSection *
323c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
32490260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
325149f54b5SPaolo Bonzini {
326149f54b5SPaolo Bonzini     MemoryRegionSection *section;
327149f54b5SPaolo Bonzini     Int128 diff;
328149f54b5SPaolo Bonzini 
329c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
330149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
331149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
332149f54b5SPaolo Bonzini 
333149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
334149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
335149f54b5SPaolo Bonzini 
336149f54b5SPaolo Bonzini     diff = int128_sub(section->mr->size, int128_make64(addr));
3373752a036SPeter Maydell     *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
338149f54b5SPaolo Bonzini     return section;
339149f54b5SPaolo Bonzini }
34090260c6cSJan Kiszka 
3415c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
34290260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
34390260c6cSJan Kiszka                                       bool is_write)
34490260c6cSJan Kiszka {
34530951157SAvi Kivity     IOMMUTLBEntry iotlb;
34630951157SAvi Kivity     MemoryRegionSection *section;
34730951157SAvi Kivity     MemoryRegion *mr;
34830951157SAvi Kivity     hwaddr len = *plen;
34930951157SAvi Kivity 
35030951157SAvi Kivity     for (;;) {
351c7086b4aSPaolo Bonzini         section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
35230951157SAvi Kivity         mr = section->mr;
35330951157SAvi Kivity 
35430951157SAvi Kivity         if (!mr->iommu_ops) {
35530951157SAvi Kivity             break;
35630951157SAvi Kivity         }
35730951157SAvi Kivity 
35830951157SAvi Kivity         iotlb = mr->iommu_ops->translate(mr, addr);
35930951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
36030951157SAvi Kivity                 | (addr & iotlb.addr_mask));
36130951157SAvi Kivity         len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
36230951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
36330951157SAvi Kivity             mr = &io_mem_unassigned;
36430951157SAvi Kivity             break;
36530951157SAvi Kivity         }
36630951157SAvi Kivity 
36730951157SAvi Kivity         as = iotlb.target_as;
36830951157SAvi Kivity     }
36930951157SAvi Kivity 
37030951157SAvi Kivity     *plen = len;
37130951157SAvi Kivity     *xlat = addr;
37230951157SAvi Kivity     return mr;
37390260c6cSJan Kiszka }
37490260c6cSJan Kiszka 
37590260c6cSJan Kiszka MemoryRegionSection *
37690260c6cSJan Kiszka address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
37790260c6cSJan Kiszka                                   hwaddr *plen)
37890260c6cSJan Kiszka {
37930951157SAvi Kivity     MemoryRegionSection *section;
380c7086b4aSPaolo Bonzini     section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
38130951157SAvi Kivity 
38230951157SAvi Kivity     assert(!section->mr->iommu_ops);
38330951157SAvi Kivity     return section;
38490260c6cSJan Kiszka }
3859fa3e853Sbellard #endif
386fd6ce8f6Sbellard 
387d5ab9713SJan Kiszka void cpu_exec_init_all(void)
388d5ab9713SJan Kiszka {
389d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY)
390b2a8658eSUmesh Deshpande     qemu_mutex_init(&ram_list.mutex);
391d5ab9713SJan Kiszka     memory_map_init();
392d5ab9713SJan Kiszka     io_mem_init();
393d5ab9713SJan Kiszka #endif
394d5ab9713SJan Kiszka }
395d5ab9713SJan Kiszka 
396b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
3979656f324Spbrook 
398e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
399e7f4eff7SJuan Quintela {
400259186a7SAndreas Färber     CPUState *cpu = opaque;
401e7f4eff7SJuan Quintela 
4023098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
4033098dba0Saurel32        version_id is increased. */
404259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
405259186a7SAndreas Färber     tlb_flush(cpu->env_ptr, 1);
4069656f324Spbrook 
4079656f324Spbrook     return 0;
4089656f324Spbrook }
409e7f4eff7SJuan Quintela 
4101a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
411e7f4eff7SJuan Quintela     .name = "cpu_common",
412e7f4eff7SJuan Quintela     .version_id = 1,
413e7f4eff7SJuan Quintela     .minimum_version_id = 1,
414e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
415e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
416e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
417259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
418259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
419e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
420e7f4eff7SJuan Quintela     }
421e7f4eff7SJuan Quintela };
4221a1562f5SAndreas Färber 
4239656f324Spbrook #endif
4249656f324Spbrook 
42538d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
426950f1472SGlauber Costa {
427bdc44640SAndreas Färber     CPUState *cpu;
428950f1472SGlauber Costa 
429bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
43055e5c285SAndreas Färber         if (cpu->cpu_index == index) {
431bdc44640SAndreas Färber             return cpu;
43255e5c285SAndreas Färber         }
433950f1472SGlauber Costa     }
434950f1472SGlauber Costa 
435bdc44640SAndreas Färber     return NULL;
436950f1472SGlauber Costa }
437950f1472SGlauber Costa 
4389349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env)
439fd6ce8f6Sbellard {
4409f09e18aSAndreas Färber     CPUState *cpu = ENV_GET_CPU(env);
441b170fce3SAndreas Färber     CPUClass *cc = CPU_GET_CLASS(cpu);
442bdc44640SAndreas Färber     CPUState *some_cpu;
4436a00d601Sbellard     int cpu_index;
4446a00d601Sbellard 
445c2764719Spbrook #if defined(CONFIG_USER_ONLY)
446c2764719Spbrook     cpu_list_lock();
447c2764719Spbrook #endif
4486a00d601Sbellard     cpu_index = 0;
449bdc44640SAndreas Färber     CPU_FOREACH(some_cpu) {
4506a00d601Sbellard         cpu_index++;
4516a00d601Sbellard     }
45255e5c285SAndreas Färber     cpu->cpu_index = cpu_index;
4531b1ed8dcSAndreas Färber     cpu->numa_node = 0;
45472cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
45572cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
456dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
4579f09e18aSAndreas Färber     cpu->thread_id = qemu_get_thread_id();
458dc7a09cfSJan Kiszka #endif
459bdc44640SAndreas Färber     QTAILQ_INSERT_TAIL(&cpus, cpu, node);
460c2764719Spbrook #if defined(CONFIG_USER_ONLY)
461c2764719Spbrook     cpu_list_unlock();
462c2764719Spbrook #endif
463e0d47944SAndreas Färber     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
464259186a7SAndreas Färber         vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
465e0d47944SAndreas Färber     }
466b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
4670be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
468b3c7724cSpbrook                     cpu_save, cpu_load, env);
469b170fce3SAndreas Färber     assert(cc->vmsd == NULL);
470e0d47944SAndreas Färber     assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
471b3c7724cSpbrook #endif
472b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
473b170fce3SAndreas Färber         vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
474b170fce3SAndreas Färber     }
475fd6ce8f6Sbellard }
476fd6ce8f6Sbellard 
4771fddef4bSbellard #if defined(TARGET_HAS_ICE)
47894df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
47900b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
48094df27fdSPaul Brook {
48194df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
48294df27fdSPaul Brook }
48394df27fdSPaul Brook #else
48400b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
4851e7855a5SMax Filippov {
486e8262a1bSMax Filippov     hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
487e8262a1bSMax Filippov     if (phys != -1) {
488e8262a1bSMax Filippov         tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
489e8262a1bSMax Filippov     }
4901e7855a5SMax Filippov }
491c27004ecSbellard #endif
49294df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
493d720b93dSbellard 
494c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
4959349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
496c527ee8fSPaul Brook 
497c527ee8fSPaul Brook {
498c527ee8fSPaul Brook }
499c527ee8fSPaul Brook 
5009349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
501c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
502c527ee8fSPaul Brook {
503c527ee8fSPaul Brook     return -ENOSYS;
504c527ee8fSPaul Brook }
505c527ee8fSPaul Brook #else
5066658ffb8Spbrook /* Add a watchpoint.  */
5079349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
508a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
5096658ffb8Spbrook {
510b4051334Saliguori     target_ulong len_mask = ~(len - 1);
511c0ce998eSaliguori     CPUWatchpoint *wp;
5126658ffb8Spbrook 
513b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
5140dc23828SMax Filippov     if ((len & (len - 1)) || (addr & ~len_mask) ||
5150dc23828SMax Filippov             len == 0 || len > TARGET_PAGE_SIZE) {
516b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
517b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
518b4051334Saliguori         return -EINVAL;
519b4051334Saliguori     }
5207267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
5216658ffb8Spbrook 
522a1d1bb31Saliguori     wp->vaddr = addr;
523b4051334Saliguori     wp->len_mask = len_mask;
524a1d1bb31Saliguori     wp->flags = flags;
525a1d1bb31Saliguori 
5262dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
527c0ce998eSaliguori     if (flags & BP_GDB)
52872cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
529c0ce998eSaliguori     else
53072cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
531a1d1bb31Saliguori 
5326658ffb8Spbrook     tlb_flush_page(env, addr);
533a1d1bb31Saliguori 
534a1d1bb31Saliguori     if (watchpoint)
535a1d1bb31Saliguori         *watchpoint = wp;
536a1d1bb31Saliguori     return 0;
5376658ffb8Spbrook }
5386658ffb8Spbrook 
539a1d1bb31Saliguori /* Remove a specific watchpoint.  */
5409349b4f9SAndreas Färber int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
541a1d1bb31Saliguori                           int flags)
5426658ffb8Spbrook {
543b4051334Saliguori     target_ulong len_mask = ~(len - 1);
544a1d1bb31Saliguori     CPUWatchpoint *wp;
5456658ffb8Spbrook 
54672cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
547b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
5486e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
549a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
5506658ffb8Spbrook             return 0;
5516658ffb8Spbrook         }
5526658ffb8Spbrook     }
553a1d1bb31Saliguori     return -ENOENT;
5546658ffb8Spbrook }
5556658ffb8Spbrook 
556a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
5579349b4f9SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
558a1d1bb31Saliguori {
55972cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
5607d03f82fSedgar_igl 
561a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
562a1d1bb31Saliguori 
5637267c094SAnthony Liguori     g_free(watchpoint);
5647d03f82fSedgar_igl }
5657d03f82fSedgar_igl 
566a1d1bb31Saliguori /* Remove all matching watchpoints.  */
5679349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
568a1d1bb31Saliguori {
569c0ce998eSaliguori     CPUWatchpoint *wp, *next;
570a1d1bb31Saliguori 
57172cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
572a1d1bb31Saliguori         if (wp->flags & mask)
573a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
574a1d1bb31Saliguori     }
575c0ce998eSaliguori }
576c527ee8fSPaul Brook #endif
577a1d1bb31Saliguori 
578a1d1bb31Saliguori /* Add a breakpoint.  */
5799349b4f9SAndreas Färber int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
580a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
5814c3a88a2Sbellard {
5821fddef4bSbellard #if defined(TARGET_HAS_ICE)
583c0ce998eSaliguori     CPUBreakpoint *bp;
5844c3a88a2Sbellard 
5857267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
5864c3a88a2Sbellard 
587a1d1bb31Saliguori     bp->pc = pc;
588a1d1bb31Saliguori     bp->flags = flags;
589a1d1bb31Saliguori 
5902dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
59100b941e5SAndreas Färber     if (flags & BP_GDB) {
59272cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
59300b941e5SAndreas Färber     } else {
59472cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
59500b941e5SAndreas Färber     }
596d720b93dSbellard 
59700b941e5SAndreas Färber     breakpoint_invalidate(ENV_GET_CPU(env), pc);
598a1d1bb31Saliguori 
59900b941e5SAndreas Färber     if (breakpoint) {
600a1d1bb31Saliguori         *breakpoint = bp;
60100b941e5SAndreas Färber     }
6024c3a88a2Sbellard     return 0;
6034c3a88a2Sbellard #else
604a1d1bb31Saliguori     return -ENOSYS;
6054c3a88a2Sbellard #endif
6064c3a88a2Sbellard }
6074c3a88a2Sbellard 
608a1d1bb31Saliguori /* Remove a specific breakpoint.  */
6099349b4f9SAndreas Färber int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
610a1d1bb31Saliguori {
6117d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
612a1d1bb31Saliguori     CPUBreakpoint *bp;
613a1d1bb31Saliguori 
61472cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
615a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
616a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
617a1d1bb31Saliguori             return 0;
6187d03f82fSedgar_igl         }
619a1d1bb31Saliguori     }
620a1d1bb31Saliguori     return -ENOENT;
621a1d1bb31Saliguori #else
622a1d1bb31Saliguori     return -ENOSYS;
6237d03f82fSedgar_igl #endif
6247d03f82fSedgar_igl }
6257d03f82fSedgar_igl 
626a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
6279349b4f9SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
6284c3a88a2Sbellard {
6291fddef4bSbellard #if defined(TARGET_HAS_ICE)
63072cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
631d720b93dSbellard 
63200b941e5SAndreas Färber     breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
633a1d1bb31Saliguori 
6347267c094SAnthony Liguori     g_free(breakpoint);
635a1d1bb31Saliguori #endif
636a1d1bb31Saliguori }
637a1d1bb31Saliguori 
638a1d1bb31Saliguori /* Remove all matching breakpoints. */
6399349b4f9SAndreas Färber void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
640a1d1bb31Saliguori {
641a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
642c0ce998eSaliguori     CPUBreakpoint *bp, *next;
643a1d1bb31Saliguori 
64472cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
645a1d1bb31Saliguori         if (bp->flags & mask)
646a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
647c0ce998eSaliguori     }
6484c3a88a2Sbellard #endif
6494c3a88a2Sbellard }
6504c3a88a2Sbellard 
651c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
652c33a346eSbellard    CPU loop after each instruction */
6533825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled)
654c33a346eSbellard {
6551fddef4bSbellard #if defined(TARGET_HAS_ICE)
656ed2803daSAndreas Färber     if (cpu->singlestep_enabled != enabled) {
657ed2803daSAndreas Färber         cpu->singlestep_enabled = enabled;
658ed2803daSAndreas Färber         if (kvm_enabled()) {
65938e478ecSStefan Weil             kvm_update_guest_debug(cpu, 0);
660ed2803daSAndreas Färber         } else {
661ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
6629fa3e853Sbellard             /* XXX: only flush what is necessary */
66338e478ecSStefan Weil             CPUArchState *env = cpu->env_ptr;
6640124311eSbellard             tb_flush(env);
665c33a346eSbellard         }
666e22a25c9Saliguori     }
667c33a346eSbellard #endif
668c33a346eSbellard }
669c33a346eSbellard 
6709349b4f9SAndreas Färber void cpu_abort(CPUArchState *env, const char *fmt, ...)
6717501267eSbellard {
672878096eeSAndreas Färber     CPUState *cpu = ENV_GET_CPU(env);
6737501267eSbellard     va_list ap;
674493ae1f0Spbrook     va_list ap2;
6757501267eSbellard 
6767501267eSbellard     va_start(ap, fmt);
677493ae1f0Spbrook     va_copy(ap2, ap);
6787501267eSbellard     fprintf(stderr, "qemu: fatal: ");
6797501267eSbellard     vfprintf(stderr, fmt, ap);
6807501267eSbellard     fprintf(stderr, "\n");
681878096eeSAndreas Färber     cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
68293fcfe39Saliguori     if (qemu_log_enabled()) {
68393fcfe39Saliguori         qemu_log("qemu: fatal: ");
68493fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
68593fcfe39Saliguori         qemu_log("\n");
686a0762859SAndreas Färber         log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
68731b1a7b4Saliguori         qemu_log_flush();
68893fcfe39Saliguori         qemu_log_close();
689924edcaeSbalrog     }
690493ae1f0Spbrook     va_end(ap2);
691f9373291Sj_mayer     va_end(ap);
692fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
693fd052bf6SRiku Voipio     {
694fd052bf6SRiku Voipio         struct sigaction act;
695fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
696fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
697fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
698fd052bf6SRiku Voipio     }
699fd052bf6SRiku Voipio #endif
7007501267eSbellard     abort();
7017501267eSbellard }
7027501267eSbellard 
7030124311eSbellard #if !defined(CONFIG_USER_ONLY)
704041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
705041603feSPaolo Bonzini {
706041603feSPaolo Bonzini     RAMBlock *block;
707041603feSPaolo Bonzini 
708041603feSPaolo Bonzini     /* The list is protected by the iothread lock here.  */
709041603feSPaolo Bonzini     block = ram_list.mru_block;
710041603feSPaolo Bonzini     if (block && addr - block->offset < block->length) {
711041603feSPaolo Bonzini         goto found;
712041603feSPaolo Bonzini     }
713041603feSPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
714041603feSPaolo Bonzini         if (addr - block->offset < block->length) {
715041603feSPaolo Bonzini             goto found;
716041603feSPaolo Bonzini         }
717041603feSPaolo Bonzini     }
718041603feSPaolo Bonzini 
719041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
720041603feSPaolo Bonzini     abort();
721041603feSPaolo Bonzini 
722041603feSPaolo Bonzini found:
723041603feSPaolo Bonzini     ram_list.mru_block = block;
724041603feSPaolo Bonzini     return block;
725041603feSPaolo Bonzini }
726041603feSPaolo Bonzini 
727d24981d3SJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
728d24981d3SJuan Quintela                                       uintptr_t length)
7291ccde1cbSbellard {
730041603feSPaolo Bonzini     RAMBlock *block;
731041603feSPaolo Bonzini     ram_addr_t start1;
732f23db169Sbellard 
733041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
734041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
735041603feSPaolo Bonzini     start1 = (uintptr_t)block->host + (start - block->offset);
736e5548617SBlue Swirl     cpu_tlb_reset_dirty_all(start1, length);
737d24981d3SJuan Quintela }
738d24981d3SJuan Quintela 
739d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
740d24981d3SJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
74152159192SJuan Quintela                                      unsigned client)
742d24981d3SJuan Quintela {
743d24981d3SJuan Quintela     uintptr_t length;
744d24981d3SJuan Quintela 
745d24981d3SJuan Quintela     start &= TARGET_PAGE_MASK;
746d24981d3SJuan Quintela     end = TARGET_PAGE_ALIGN(end);
747d24981d3SJuan Quintela 
748d24981d3SJuan Quintela     length = end - start;
749d24981d3SJuan Quintela     if (length == 0)
750d24981d3SJuan Quintela         return;
751ace694ccSJuan Quintela     cpu_physical_memory_clear_dirty_range(start, length, client);
752d24981d3SJuan Quintela 
753d24981d3SJuan Quintela     if (tcg_enabled()) {
754d24981d3SJuan Quintela         tlb_reset_dirty_range_all(start, end, length);
755d24981d3SJuan Quintela     }
7561ccde1cbSbellard }
7571ccde1cbSbellard 
7588b9c99d9SBlue Swirl static int cpu_physical_memory_set_dirty_tracking(int enable)
75974576198Saliguori {
760f6f3fbcaSMichael S. Tsirkin     int ret = 0;
76174576198Saliguori     in_migration = enable;
762f6f3fbcaSMichael S. Tsirkin     return ret;
76374576198Saliguori }
76474576198Saliguori 
765a8170e5eSAvi Kivity hwaddr memory_region_section_get_iotlb(CPUArchState *env,
766e5548617SBlue Swirl                                        MemoryRegionSection *section,
767e5548617SBlue Swirl                                        target_ulong vaddr,
768149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
769e5548617SBlue Swirl                                        int prot,
770e5548617SBlue Swirl                                        target_ulong *address)
771e5548617SBlue Swirl {
772a8170e5eSAvi Kivity     hwaddr iotlb;
773e5548617SBlue Swirl     CPUWatchpoint *wp;
774e5548617SBlue Swirl 
775cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
776e5548617SBlue Swirl         /* Normal RAM.  */
777e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
778149f54b5SPaolo Bonzini             + xlat;
779e5548617SBlue Swirl         if (!section->readonly) {
780b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_NOTDIRTY;
781e5548617SBlue Swirl         } else {
782b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_ROM;
783e5548617SBlue Swirl         }
784e5548617SBlue Swirl     } else {
78553cb28cbSMarcel Apfelbaum         iotlb = section - address_space_memory.dispatch->map.sections;
786149f54b5SPaolo Bonzini         iotlb += xlat;
787e5548617SBlue Swirl     }
788e5548617SBlue Swirl 
789e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
790e5548617SBlue Swirl        watchpoint trap routines.  */
791e5548617SBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
792e5548617SBlue Swirl         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
793e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
794e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
795b41aac4fSLiu Ping Fan                 iotlb = PHYS_SECTION_WATCH + paddr;
796e5548617SBlue Swirl                 *address |= TLB_MMIO;
797e5548617SBlue Swirl                 break;
798e5548617SBlue Swirl             }
799e5548617SBlue Swirl         }
800e5548617SBlue Swirl     }
801e5548617SBlue Swirl 
802e5548617SBlue Swirl     return iotlb;
803e5548617SBlue Swirl }
8049fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
80533417e70Sbellard 
806e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
8078da3ff18Spbrook 
808c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8095312bd8bSAvi Kivity                              uint16_t section);
810acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
81154688b1eSAvi Kivity 
812575ddeb4SStefan Weil static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
81391138037SMarkus Armbruster 
81491138037SMarkus Armbruster /*
81591138037SMarkus Armbruster  * Set a custom physical guest memory alloator.
81691138037SMarkus Armbruster  * Accelerators with unusual needs may need this.  Hopefully, we can
81791138037SMarkus Armbruster  * get rid of it eventually.
81891138037SMarkus Armbruster  */
819575ddeb4SStefan Weil void phys_mem_set_alloc(void *(*alloc)(size_t))
82091138037SMarkus Armbruster {
82191138037SMarkus Armbruster     phys_mem_alloc = alloc;
82291138037SMarkus Armbruster }
82391138037SMarkus Armbruster 
82453cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
82553cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
8265312bd8bSAvi Kivity {
82768f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
82868f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
82968f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
83068f3f65bSPaolo Bonzini      */
83153cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
83268f3f65bSPaolo Bonzini 
83353cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
83453cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
83553cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
83653cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
8375312bd8bSAvi Kivity     }
83853cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
839dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
84053cb28cbSMarcel Apfelbaum     return map->sections_nb++;
8415312bd8bSAvi Kivity }
8425312bd8bSAvi Kivity 
843058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
844058bc4b5SPaolo Bonzini {
845dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
846dfde4e6eSPaolo Bonzini 
847058bc4b5SPaolo Bonzini     if (mr->subpage) {
848058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
849058bc4b5SPaolo Bonzini         memory_region_destroy(&subpage->iomem);
850058bc4b5SPaolo Bonzini         g_free(subpage);
851058bc4b5SPaolo Bonzini     }
852058bc4b5SPaolo Bonzini }
853058bc4b5SPaolo Bonzini 
8546092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
8555312bd8bSAvi Kivity {
8569affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
8579affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
858058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
859058bc4b5SPaolo Bonzini     }
8609affd6fcSPaolo Bonzini     g_free(map->sections);
8619affd6fcSPaolo Bonzini     g_free(map->nodes);
8625312bd8bSAvi Kivity }
8635312bd8bSAvi Kivity 
864ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
8650f0cb164SAvi Kivity {
8660f0cb164SAvi Kivity     subpage_t *subpage;
867a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
8680f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
86997115a8dSMichael S. Tsirkin     MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
87053cb28cbSMarcel Apfelbaum                                                    d->map.nodes, d->map.sections);
8710f0cb164SAvi Kivity     MemoryRegionSection subsection = {
8720f0cb164SAvi Kivity         .offset_within_address_space = base,
873052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
8740f0cb164SAvi Kivity     };
875a8170e5eSAvi Kivity     hwaddr start, end;
8760f0cb164SAvi Kivity 
877f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
8780f0cb164SAvi Kivity 
879f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
880acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
8810f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
882ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
88353cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
8840f0cb164SAvi Kivity     } else {
885f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
8860f0cb164SAvi Kivity     }
8870f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
888052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
88953cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
89053cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
8910f0cb164SAvi Kivity }
8920f0cb164SAvi Kivity 
8930f0cb164SAvi Kivity 
894052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
895052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
89633417e70Sbellard {
897a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
89853cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
899052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
900052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
901dd81124bSAvi Kivity 
902733d5ef5SPaolo Bonzini     assert(num_pages);
903733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
90433417e70Sbellard }
90533417e70Sbellard 
906ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
9070f0cb164SAvi Kivity {
90889ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
90900752703SPaolo Bonzini     AddressSpaceDispatch *d = as->next_dispatch;
91099b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
911052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
9120f0cb164SAvi Kivity 
913733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
914733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
915733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
916733d5ef5SPaolo Bonzini 
917052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
918ac1970fbSAvi Kivity         register_subpage(d, &now);
919733d5ef5SPaolo Bonzini     } else {
920052e87b0SPaolo Bonzini         now.size = int128_zero();
921733d5ef5SPaolo Bonzini     }
922052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
923052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
924052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
925052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
9260f0cb164SAvi Kivity         now = remain;
927052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
928733d5ef5SPaolo Bonzini             register_subpage(d, &now);
92988266249SHu Tao         } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
930052e87b0SPaolo Bonzini             now.size = page_size;
931ac1970fbSAvi Kivity             register_subpage(d, &now);
93269b67646STyler Hall         } else {
933052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
934ac1970fbSAvi Kivity             register_multipage(d, &now);
93569b67646STyler Hall         }
9360f0cb164SAvi Kivity     }
9370f0cb164SAvi Kivity }
9380f0cb164SAvi Kivity 
93962a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
94062a2744cSSheng Yang {
94162a2744cSSheng Yang     if (kvm_enabled())
94262a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
94362a2744cSSheng Yang }
94462a2744cSSheng Yang 
945b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
946b2a8658eSUmesh Deshpande {
947b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
948b2a8658eSUmesh Deshpande }
949b2a8658eSUmesh Deshpande 
950b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
951b2a8658eSUmesh Deshpande {
952b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
953b2a8658eSUmesh Deshpande }
954b2a8658eSUmesh Deshpande 
955e1e84ba0SMarkus Armbruster #ifdef __linux__
956c902760fSMarcelo Tosatti 
957c902760fSMarcelo Tosatti #include <sys/vfs.h>
958c902760fSMarcelo Tosatti 
959c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
960c902760fSMarcelo Tosatti 
961c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
962c902760fSMarcelo Tosatti {
963c902760fSMarcelo Tosatti     struct statfs fs;
964c902760fSMarcelo Tosatti     int ret;
965c902760fSMarcelo Tosatti 
966c902760fSMarcelo Tosatti     do {
967c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
968c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
969c902760fSMarcelo Tosatti 
970c902760fSMarcelo Tosatti     if (ret != 0) {
9716adc0549SMichael Tokarev         perror(path);
972c902760fSMarcelo Tosatti         return 0;
973c902760fSMarcelo Tosatti     }
974c902760fSMarcelo Tosatti 
975c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
976c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
977c902760fSMarcelo Tosatti 
978c902760fSMarcelo Tosatti     return fs.f_bsize;
979c902760fSMarcelo Tosatti }
980c902760fSMarcelo Tosatti 
981ef36fa14SMarcelo Tosatti static sigjmp_buf sigjump;
982ef36fa14SMarcelo Tosatti 
983ef36fa14SMarcelo Tosatti static void sigbus_handler(int signal)
984ef36fa14SMarcelo Tosatti {
985ef36fa14SMarcelo Tosatti     siglongjmp(sigjump, 1);
986ef36fa14SMarcelo Tosatti }
987ef36fa14SMarcelo Tosatti 
98804b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
98904b16653SAlex Williamson                             ram_addr_t memory,
99004b16653SAlex Williamson                             const char *path)
991c902760fSMarcelo Tosatti {
992c902760fSMarcelo Tosatti     char *filename;
9938ca761f6SPeter Feiner     char *sanitized_name;
9948ca761f6SPeter Feiner     char *c;
995c902760fSMarcelo Tosatti     void *area;
996c902760fSMarcelo Tosatti     int fd;
997c902760fSMarcelo Tosatti     unsigned long hpagesize;
998c902760fSMarcelo Tosatti 
999c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
1000c902760fSMarcelo Tosatti     if (!hpagesize) {
1001c902760fSMarcelo Tosatti         return NULL;
1002c902760fSMarcelo Tosatti     }
1003c902760fSMarcelo Tosatti 
1004c902760fSMarcelo Tosatti     if (memory < hpagesize) {
1005c902760fSMarcelo Tosatti         return NULL;
1006c902760fSMarcelo Tosatti     }
1007c902760fSMarcelo Tosatti 
1008c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
1009c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1010c902760fSMarcelo Tosatti         return NULL;
1011c902760fSMarcelo Tosatti     }
1012c902760fSMarcelo Tosatti 
10138ca761f6SPeter Feiner     /* Make name safe to use with mkstemp by replacing '/' with '_'. */
10148ca761f6SPeter Feiner     sanitized_name = g_strdup(block->mr->name);
10158ca761f6SPeter Feiner     for (c = sanitized_name; *c != '\0'; c++) {
10168ca761f6SPeter Feiner         if (*c == '/')
10178ca761f6SPeter Feiner             *c = '_';
10188ca761f6SPeter Feiner     }
10198ca761f6SPeter Feiner 
10208ca761f6SPeter Feiner     filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
10218ca761f6SPeter Feiner                                sanitized_name);
10228ca761f6SPeter Feiner     g_free(sanitized_name);
1023c902760fSMarcelo Tosatti 
1024c902760fSMarcelo Tosatti     fd = mkstemp(filename);
1025c902760fSMarcelo Tosatti     if (fd < 0) {
10266adc0549SMichael Tokarev         perror("unable to create backing store for hugepages");
1027e4ada482SStefan Weil         g_free(filename);
1028c902760fSMarcelo Tosatti         return NULL;
1029c902760fSMarcelo Tosatti     }
1030c902760fSMarcelo Tosatti     unlink(filename);
1031e4ada482SStefan Weil     g_free(filename);
1032c902760fSMarcelo Tosatti 
1033c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
1034c902760fSMarcelo Tosatti 
1035c902760fSMarcelo Tosatti     /*
1036c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1037c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1038c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1039c902760fSMarcelo Tosatti      * mmap will fail.
1040c902760fSMarcelo Tosatti      */
1041c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
1042c902760fSMarcelo Tosatti         perror("ftruncate");
1043c902760fSMarcelo Tosatti 
1044c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1045c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
1046c902760fSMarcelo Tosatti         perror("file_ram_alloc: can't mmap RAM pages");
1047c902760fSMarcelo Tosatti         close(fd);
1048c902760fSMarcelo Tosatti         return (NULL);
1049c902760fSMarcelo Tosatti     }
1050ef36fa14SMarcelo Tosatti 
1051ef36fa14SMarcelo Tosatti     if (mem_prealloc) {
1052ef36fa14SMarcelo Tosatti         int ret, i;
1053ef36fa14SMarcelo Tosatti         struct sigaction act, oldact;
1054ef36fa14SMarcelo Tosatti         sigset_t set, oldset;
1055ef36fa14SMarcelo Tosatti 
1056ef36fa14SMarcelo Tosatti         memset(&act, 0, sizeof(act));
1057ef36fa14SMarcelo Tosatti         act.sa_handler = &sigbus_handler;
1058ef36fa14SMarcelo Tosatti         act.sa_flags = 0;
1059ef36fa14SMarcelo Tosatti 
1060ef36fa14SMarcelo Tosatti         ret = sigaction(SIGBUS, &act, &oldact);
1061ef36fa14SMarcelo Tosatti         if (ret) {
1062ef36fa14SMarcelo Tosatti             perror("file_ram_alloc: failed to install signal handler");
1063ef36fa14SMarcelo Tosatti             exit(1);
1064ef36fa14SMarcelo Tosatti         }
1065ef36fa14SMarcelo Tosatti 
1066ef36fa14SMarcelo Tosatti         /* unblock SIGBUS */
1067ef36fa14SMarcelo Tosatti         sigemptyset(&set);
1068ef36fa14SMarcelo Tosatti         sigaddset(&set, SIGBUS);
1069ef36fa14SMarcelo Tosatti         pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1070ef36fa14SMarcelo Tosatti 
1071ef36fa14SMarcelo Tosatti         if (sigsetjmp(sigjump, 1)) {
1072ef36fa14SMarcelo Tosatti             fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1073ef36fa14SMarcelo Tosatti             exit(1);
1074ef36fa14SMarcelo Tosatti         }
1075ef36fa14SMarcelo Tosatti 
1076ef36fa14SMarcelo Tosatti         /* MAP_POPULATE silently ignores failures */
1077ef36fa14SMarcelo Tosatti         for (i = 0; i < (memory/hpagesize)-1; i++) {
1078ef36fa14SMarcelo Tosatti             memset(area + (hpagesize*i), 0, 1);
1079ef36fa14SMarcelo Tosatti         }
1080ef36fa14SMarcelo Tosatti 
1081ef36fa14SMarcelo Tosatti         ret = sigaction(SIGBUS, &oldact, NULL);
1082ef36fa14SMarcelo Tosatti         if (ret) {
1083ef36fa14SMarcelo Tosatti             perror("file_ram_alloc: failed to reinstall signal handler");
1084ef36fa14SMarcelo Tosatti             exit(1);
1085ef36fa14SMarcelo Tosatti         }
1086ef36fa14SMarcelo Tosatti 
1087ef36fa14SMarcelo Tosatti         pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1088ef36fa14SMarcelo Tosatti     }
1089ef36fa14SMarcelo Tosatti 
109004b16653SAlex Williamson     block->fd = fd;
1091c902760fSMarcelo Tosatti     return area;
1092c902760fSMarcelo Tosatti }
1093e1e84ba0SMarkus Armbruster #else
1094e1e84ba0SMarkus Armbruster static void *file_ram_alloc(RAMBlock *block,
1095e1e84ba0SMarkus Armbruster                             ram_addr_t memory,
1096e1e84ba0SMarkus Armbruster                             const char *path)
1097e1e84ba0SMarkus Armbruster {
1098e1e84ba0SMarkus Armbruster     fprintf(stderr, "-mem-path not supported on this host\n");
1099e1e84ba0SMarkus Armbruster     exit(1);
1100e1e84ba0SMarkus Armbruster }
1101c902760fSMarcelo Tosatti #endif
1102c902760fSMarcelo Tosatti 
1103d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1104d17b5288SAlex Williamson {
110504b16653SAlex Williamson     RAMBlock *block, *next_block;
11063e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
110704b16653SAlex Williamson 
110849cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
110949cd9ac6SStefan Hajnoczi 
1110a3161038SPaolo Bonzini     if (QTAILQ_EMPTY(&ram_list.blocks))
111104b16653SAlex Williamson         return 0;
111204b16653SAlex Williamson 
1113a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1114f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
111504b16653SAlex Williamson 
111604b16653SAlex Williamson         end = block->offset + block->length;
111704b16653SAlex Williamson 
1118a3161038SPaolo Bonzini         QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
111904b16653SAlex Williamson             if (next_block->offset >= end) {
112004b16653SAlex Williamson                 next = MIN(next, next_block->offset);
112104b16653SAlex Williamson             }
112204b16653SAlex Williamson         }
112304b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
112404b16653SAlex Williamson             offset = end;
112504b16653SAlex Williamson             mingap = next - end;
112604b16653SAlex Williamson         }
112704b16653SAlex Williamson     }
11283e837b2cSAlex Williamson 
11293e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
11303e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
11313e837b2cSAlex Williamson                 (uint64_t)size);
11323e837b2cSAlex Williamson         abort();
11333e837b2cSAlex Williamson     }
11343e837b2cSAlex Williamson 
113504b16653SAlex Williamson     return offset;
113604b16653SAlex Williamson }
113704b16653SAlex Williamson 
1138652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
113904b16653SAlex Williamson {
1140d17b5288SAlex Williamson     RAMBlock *block;
1141d17b5288SAlex Williamson     ram_addr_t last = 0;
1142d17b5288SAlex Williamson 
1143a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next)
1144d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
1145d17b5288SAlex Williamson 
1146d17b5288SAlex Williamson     return last;
1147d17b5288SAlex Williamson }
1148d17b5288SAlex Williamson 
1149ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1150ddb97f1dSJason Baron {
1151ddb97f1dSJason Baron     int ret;
1152ddb97f1dSJason Baron 
1153ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
11542ff3de68SMarkus Armbruster     if (!qemu_opt_get_bool(qemu_get_machine_opts(),
11552ff3de68SMarkus Armbruster                            "dump-guest-core", true)) {
1156ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1157ddb97f1dSJason Baron         if (ret) {
1158ddb97f1dSJason Baron             perror("qemu_madvise");
1159ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1160ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1161ddb97f1dSJason Baron         }
1162ddb97f1dSJason Baron     }
1163ddb97f1dSJason Baron }
1164ddb97f1dSJason Baron 
1165c5705a77SAvi Kivity void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
116684b89d78SCam Macdonell {
116784b89d78SCam Macdonell     RAMBlock *new_block, *block;
116884b89d78SCam Macdonell 
1169c5705a77SAvi Kivity     new_block = NULL;
1170a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1171c5705a77SAvi Kivity         if (block->offset == addr) {
1172c5705a77SAvi Kivity             new_block = block;
1173c5705a77SAvi Kivity             break;
1174c5705a77SAvi Kivity         }
1175c5705a77SAvi Kivity     }
1176c5705a77SAvi Kivity     assert(new_block);
1177c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
117884b89d78SCam Macdonell 
117909e5ab63SAnthony Liguori     if (dev) {
118009e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
118184b89d78SCam Macdonell         if (id) {
118284b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
11837267c094SAnthony Liguori             g_free(id);
118484b89d78SCam Macdonell         }
118584b89d78SCam Macdonell     }
118684b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
118784b89d78SCam Macdonell 
1188b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1189b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1190a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1191c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
119284b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
119384b89d78SCam Macdonell                     new_block->idstr);
119484b89d78SCam Macdonell             abort();
119584b89d78SCam Macdonell         }
119684b89d78SCam Macdonell     }
1197b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1198c5705a77SAvi Kivity }
1199c5705a77SAvi Kivity 
12008490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
12018490fc78SLuiz Capitulino {
12022ff3de68SMarkus Armbruster     if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
12038490fc78SLuiz Capitulino         /* disabled by the user */
12048490fc78SLuiz Capitulino         return 0;
12058490fc78SLuiz Capitulino     }
12068490fc78SLuiz Capitulino 
12078490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
12088490fc78SLuiz Capitulino }
12098490fc78SLuiz Capitulino 
1210c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1211c5705a77SAvi Kivity                                    MemoryRegion *mr)
1212c5705a77SAvi Kivity {
1213abb26d63SPaolo Bonzini     RAMBlock *block, *new_block;
12142152f5caSJuan Quintela     ram_addr_t old_ram_size, new_ram_size;
12152152f5caSJuan Quintela 
12162152f5caSJuan Quintela     old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1217c5705a77SAvi Kivity 
1218c5705a77SAvi Kivity     size = TARGET_PAGE_ALIGN(size);
1219c5705a77SAvi Kivity     new_block = g_malloc0(sizeof(*new_block));
12203435f395SMarkus Armbruster     new_block->fd = -1;
122184b89d78SCam Macdonell 
1222b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1223b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
12247c637366SAvi Kivity     new_block->mr = mr;
1225432d268cSJun Nakajima     new_block->offset = find_ram_offset(size);
12266977dfe6SYoshiaki Tamura     if (host) {
122784b89d78SCam Macdonell         new_block->host = host;
1228cd19cfa2SHuang Ying         new_block->flags |= RAM_PREALLOC_MASK;
1229dfeaf2abSMarkus Armbruster     } else if (xen_enabled()) {
1230dfeaf2abSMarkus Armbruster         if (mem_path) {
1231dfeaf2abSMarkus Armbruster             fprintf(stderr, "-mem-path not supported with Xen\n");
1232dfeaf2abSMarkus Armbruster             exit(1);
1233dfeaf2abSMarkus Armbruster         }
1234dfeaf2abSMarkus Armbruster         xen_ram_alloc(new_block->offset, size, mr);
12356977dfe6SYoshiaki Tamura     } else {
1236c902760fSMarcelo Tosatti         if (mem_path) {
1237e1e84ba0SMarkus Armbruster             if (phys_mem_alloc != qemu_anon_ram_alloc) {
1238e1e84ba0SMarkus Armbruster                 /*
1239e1e84ba0SMarkus Armbruster                  * file_ram_alloc() needs to allocate just like
1240e1e84ba0SMarkus Armbruster                  * phys_mem_alloc, but we haven't bothered to provide
1241e1e84ba0SMarkus Armbruster                  * a hook there.
1242e1e84ba0SMarkus Armbruster                  */
1243e1e84ba0SMarkus Armbruster                 fprintf(stderr,
1244e1e84ba0SMarkus Armbruster                         "-mem-path not supported with this accelerator\n");
1245c902760fSMarcelo Tosatti                 exit(1);
1246e1e84ba0SMarkus Armbruster             }
1247e1e84ba0SMarkus Armbruster             new_block->host = file_ram_alloc(new_block, size, mem_path);
12480628c182SMarkus Armbruster         }
12490628c182SMarkus Armbruster         if (!new_block->host) {
125091138037SMarkus Armbruster             new_block->host = phys_mem_alloc(size);
125139228250SMarkus Armbruster             if (!new_block->host) {
125239228250SMarkus Armbruster                 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
125339228250SMarkus Armbruster                         new_block->mr->name, strerror(errno));
125439228250SMarkus Armbruster                 exit(1);
125539228250SMarkus Armbruster             }
12568490fc78SLuiz Capitulino             memory_try_enable_merging(new_block->host, size);
1257c902760fSMarcelo Tosatti         }
12586977dfe6SYoshiaki Tamura     }
125994a6b54fSpbrook     new_block->length = size;
126094a6b54fSpbrook 
1261abb26d63SPaolo Bonzini     /* Keep the list sorted from biggest to smallest block.  */
1262abb26d63SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1263abb26d63SPaolo Bonzini         if (block->length < new_block->length) {
1264abb26d63SPaolo Bonzini             break;
1265abb26d63SPaolo Bonzini         }
1266abb26d63SPaolo Bonzini     }
1267abb26d63SPaolo Bonzini     if (block) {
1268abb26d63SPaolo Bonzini         QTAILQ_INSERT_BEFORE(block, new_block, next);
1269abb26d63SPaolo Bonzini     } else {
1270abb26d63SPaolo Bonzini         QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1271abb26d63SPaolo Bonzini     }
12720d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
127394a6b54fSpbrook 
1274f798b07fSUmesh Deshpande     ram_list.version++;
1275b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1276f798b07fSUmesh Deshpande 
12772152f5caSJuan Quintela     new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
12782152f5caSJuan Quintela 
12792152f5caSJuan Quintela     if (new_ram_size > old_ram_size) {
12801ab4c8ceSJuan Quintela         int i;
12811ab4c8ceSJuan Quintela         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
12821ab4c8ceSJuan Quintela             ram_list.dirty_memory[i] =
12831ab4c8ceSJuan Quintela                 bitmap_zero_extend(ram_list.dirty_memory[i],
12841ab4c8ceSJuan Quintela                                    old_ram_size, new_ram_size);
12851ab4c8ceSJuan Quintela        }
12862152f5caSJuan Quintela     }
128775218e7fSJuan Quintela     cpu_physical_memory_set_dirty_range(new_block->offset, size);
128894a6b54fSpbrook 
1289ddb97f1dSJason Baron     qemu_ram_setup_dump(new_block->host, size);
1290ad0b5321SLuiz Capitulino     qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
12913e469dbfSAndrea Arcangeli     qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
1292ddb97f1dSJason Baron 
12936f0437e8SJan Kiszka     if (kvm_enabled())
12946f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
12956f0437e8SJan Kiszka 
129694a6b54fSpbrook     return new_block->offset;
129794a6b54fSpbrook }
1298e9a1ab19Sbellard 
1299c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
13006977dfe6SYoshiaki Tamura {
1301c5705a77SAvi Kivity     return qemu_ram_alloc_from_ptr(size, NULL, mr);
13026977dfe6SYoshiaki Tamura }
13036977dfe6SYoshiaki Tamura 
13041f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
13051f2e98b6SAlex Williamson {
13061f2e98b6SAlex Williamson     RAMBlock *block;
13071f2e98b6SAlex Williamson 
1308b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1309b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1310a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
13111f2e98b6SAlex Williamson         if (addr == block->offset) {
1312a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
13130d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1314f798b07fSUmesh Deshpande             ram_list.version++;
13157267c094SAnthony Liguori             g_free(block);
1316b2a8658eSUmesh Deshpande             break;
13171f2e98b6SAlex Williamson         }
13181f2e98b6SAlex Williamson     }
1319b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
13201f2e98b6SAlex Williamson }
13211f2e98b6SAlex Williamson 
1322c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
1323e9a1ab19Sbellard {
132404b16653SAlex Williamson     RAMBlock *block;
132504b16653SAlex Williamson 
1326b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1327b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1328a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
132904b16653SAlex Williamson         if (addr == block->offset) {
1330a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
13310d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1332f798b07fSUmesh Deshpande             ram_list.version++;
1333cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
1334cd19cfa2SHuang Ying                 ;
1335dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1336dfeaf2abSMarkus Armbruster                 xen_invalidate_map_cache_entry(block->host);
1337089f3f76SStefan Weil #ifndef _WIN32
13383435f395SMarkus Armbruster             } else if (block->fd >= 0) {
133904b16653SAlex Williamson                 munmap(block->host, block->length);
134004b16653SAlex Williamson                 close(block->fd);
1341089f3f76SStefan Weil #endif
134204b16653SAlex Williamson             } else {
1343e7a09b92SPaolo Bonzini                 qemu_anon_ram_free(block->host, block->length);
134404b16653SAlex Williamson             }
13457267c094SAnthony Liguori             g_free(block);
1346b2a8658eSUmesh Deshpande             break;
134704b16653SAlex Williamson         }
134804b16653SAlex Williamson     }
1349b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
135004b16653SAlex Williamson 
1351e9a1ab19Sbellard }
1352e9a1ab19Sbellard 
1353cd19cfa2SHuang Ying #ifndef _WIN32
1354cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1355cd19cfa2SHuang Ying {
1356cd19cfa2SHuang Ying     RAMBlock *block;
1357cd19cfa2SHuang Ying     ram_addr_t offset;
1358cd19cfa2SHuang Ying     int flags;
1359cd19cfa2SHuang Ying     void *area, *vaddr;
1360cd19cfa2SHuang Ying 
1361a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1362cd19cfa2SHuang Ying         offset = addr - block->offset;
1363cd19cfa2SHuang Ying         if (offset < block->length) {
1364cd19cfa2SHuang Ying             vaddr = block->host + offset;
1365cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
1366cd19cfa2SHuang Ying                 ;
1367dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1368dfeaf2abSMarkus Armbruster                 abort();
1369cd19cfa2SHuang Ying             } else {
1370cd19cfa2SHuang Ying                 flags = MAP_FIXED;
1371cd19cfa2SHuang Ying                 munmap(vaddr, length);
13723435f395SMarkus Armbruster                 if (block->fd >= 0) {
1373cd19cfa2SHuang Ying #ifdef MAP_POPULATE
1374cd19cfa2SHuang Ying                     flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1375cd19cfa2SHuang Ying                         MAP_PRIVATE;
1376cd19cfa2SHuang Ying #else
1377cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE;
1378cd19cfa2SHuang Ying #endif
1379cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1380cd19cfa2SHuang Ying                                 flags, block->fd, offset);
1381cd19cfa2SHuang Ying                 } else {
13822eb9fbaaSMarkus Armbruster                     /*
13832eb9fbaaSMarkus Armbruster                      * Remap needs to match alloc.  Accelerators that
13842eb9fbaaSMarkus Armbruster                      * set phys_mem_alloc never remap.  If they did,
13852eb9fbaaSMarkus Armbruster                      * we'd need a remap hook here.
13862eb9fbaaSMarkus Armbruster                      */
13872eb9fbaaSMarkus Armbruster                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
13882eb9fbaaSMarkus Armbruster 
1389cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1390cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1391cd19cfa2SHuang Ying                                 flags, -1, 0);
1392cd19cfa2SHuang Ying                 }
1393cd19cfa2SHuang Ying                 if (area != vaddr) {
1394f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1395f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1396cd19cfa2SHuang Ying                             length, addr);
1397cd19cfa2SHuang Ying                     exit(1);
1398cd19cfa2SHuang Ying                 }
13998490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1400ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1401cd19cfa2SHuang Ying             }
1402cd19cfa2SHuang Ying             return;
1403cd19cfa2SHuang Ying         }
1404cd19cfa2SHuang Ying     }
1405cd19cfa2SHuang Ying }
1406cd19cfa2SHuang Ying #endif /* !_WIN32 */
1407cd19cfa2SHuang Ying 
14081b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.
14091b5ec234SPaolo Bonzini    With the exception of the softmmu code in this file, this should
14101b5ec234SPaolo Bonzini    only be used for local memory (e.g. video ram) that the device owns,
14111b5ec234SPaolo Bonzini    and knows it isn't going to access beyond the end of the block.
14121b5ec234SPaolo Bonzini 
14131b5ec234SPaolo Bonzini    It should not be used for general purpose DMA.
14141b5ec234SPaolo Bonzini    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
14151b5ec234SPaolo Bonzini  */
14161b5ec234SPaolo Bonzini void *qemu_get_ram_ptr(ram_addr_t addr)
14171b5ec234SPaolo Bonzini {
14181b5ec234SPaolo Bonzini     RAMBlock *block = qemu_get_ram_block(addr);
14191b5ec234SPaolo Bonzini 
1420868bb33fSJan Kiszka     if (xen_enabled()) {
1421432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1422432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1423712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1424432d268cSJun Nakajima          */
1425432d268cSJun Nakajima         if (block->offset == 0) {
1426e41d7c69SJan Kiszka             return xen_map_cache(addr, 0, 0);
1427432d268cSJun Nakajima         } else if (block->host == NULL) {
1428e41d7c69SJan Kiszka             block->host =
1429e41d7c69SJan Kiszka                 xen_map_cache(block->offset, block->length, 1);
1430432d268cSJun Nakajima         }
1431432d268cSJun Nakajima     }
1432f471a17eSAlex Williamson     return block->host + (addr - block->offset);
143394a6b54fSpbrook }
1434f471a17eSAlex Williamson 
143538bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
143638bee5dcSStefano Stabellini  * but takes a size argument */
1437cb85f7abSPeter Maydell static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
143838bee5dcSStefano Stabellini {
14398ab934f9SStefano Stabellini     if (*size == 0) {
14408ab934f9SStefano Stabellini         return NULL;
14418ab934f9SStefano Stabellini     }
1442868bb33fSJan Kiszka     if (xen_enabled()) {
1443e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
1444868bb33fSJan Kiszka     } else {
144538bee5dcSStefano Stabellini         RAMBlock *block;
144638bee5dcSStefano Stabellini 
1447a3161038SPaolo Bonzini         QTAILQ_FOREACH(block, &ram_list.blocks, next) {
144838bee5dcSStefano Stabellini             if (addr - block->offset < block->length) {
144938bee5dcSStefano Stabellini                 if (addr - block->offset + *size > block->length)
145038bee5dcSStefano Stabellini                     *size = block->length - addr + block->offset;
145138bee5dcSStefano Stabellini                 return block->host + (addr - block->offset);
145238bee5dcSStefano Stabellini             }
145338bee5dcSStefano Stabellini         }
145438bee5dcSStefano Stabellini 
145538bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
145638bee5dcSStefano Stabellini         abort();
145738bee5dcSStefano Stabellini     }
145838bee5dcSStefano Stabellini }
145938bee5dcSStefano Stabellini 
14607443b437SPaolo Bonzini /* Some of the softmmu routines need to translate from a host pointer
14617443b437SPaolo Bonzini    (typically a TLB entry) back to a ram offset.  */
14621b5ec234SPaolo Bonzini MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
14635579c7f3Spbrook {
146494a6b54fSpbrook     RAMBlock *block;
146594a6b54fSpbrook     uint8_t *host = ptr;
146694a6b54fSpbrook 
1467868bb33fSJan Kiszka     if (xen_enabled()) {
1468e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
14691b5ec234SPaolo Bonzini         return qemu_get_ram_block(*ram_addr)->mr;
1470712c2b41SStefano Stabellini     }
1471712c2b41SStefano Stabellini 
147223887b79SPaolo Bonzini     block = ram_list.mru_block;
147323887b79SPaolo Bonzini     if (block && block->host && host - block->host < block->length) {
147423887b79SPaolo Bonzini         goto found;
147523887b79SPaolo Bonzini     }
147623887b79SPaolo Bonzini 
1477a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1478432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1479432d268cSJun Nakajima         if (block->host == NULL) {
1480432d268cSJun Nakajima             continue;
1481432d268cSJun Nakajima         }
1482f471a17eSAlex Williamson         if (host - block->host < block->length) {
148323887b79SPaolo Bonzini             goto found;
148494a6b54fSpbrook         }
1485f471a17eSAlex Williamson     }
1486432d268cSJun Nakajima 
14871b5ec234SPaolo Bonzini     return NULL;
148823887b79SPaolo Bonzini 
148923887b79SPaolo Bonzini found:
149023887b79SPaolo Bonzini     *ram_addr = block->offset + (host - block->host);
14911b5ec234SPaolo Bonzini     return block->mr;
1492e890261fSMarcelo Tosatti }
1493f471a17eSAlex Williamson 
1494a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
14950e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
14961ccde1cbSbellard {
149752159192SJuan Quintela     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
14980e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
14993a7d929eSbellard     }
15000e0df1e2SAvi Kivity     switch (size) {
15010e0df1e2SAvi Kivity     case 1:
15025579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
15030e0df1e2SAvi Kivity         break;
15040e0df1e2SAvi Kivity     case 2:
15055579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
15060e0df1e2SAvi Kivity         break;
15070e0df1e2SAvi Kivity     case 4:
15085579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
15090e0df1e2SAvi Kivity         break;
15100e0df1e2SAvi Kivity     default:
15110e0df1e2SAvi Kivity         abort();
15120e0df1e2SAvi Kivity     }
151352159192SJuan Quintela     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
151452159192SJuan Quintela     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
1515f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1516f23db169Sbellard        flushed */
151706567942SJuan Quintela     if (cpu_physical_memory_is_dirty(ram_addr)) {
15184917cf44SAndreas Färber         CPUArchState *env = current_cpu->env_ptr;
15194917cf44SAndreas Färber         tlb_set_dirty(env, env->mem_io_vaddr);
15204917cf44SAndreas Färber     }
15211ccde1cbSbellard }
15221ccde1cbSbellard 
1523b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1524b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
1525b018ddf6SPaolo Bonzini {
1526b018ddf6SPaolo Bonzini     return is_write;
1527b018ddf6SPaolo Bonzini }
1528b018ddf6SPaolo Bonzini 
15290e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
15300e0df1e2SAvi Kivity     .write = notdirty_mem_write,
1531b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
15320e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
15331ccde1cbSbellard };
15341ccde1cbSbellard 
15350f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
1536b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
15370f459d16Spbrook {
15384917cf44SAndreas Färber     CPUArchState *env = current_cpu->env_ptr;
153906d55cc1Saliguori     target_ulong pc, cs_base;
15400f459d16Spbrook     target_ulong vaddr;
1541a1d1bb31Saliguori     CPUWatchpoint *wp;
154206d55cc1Saliguori     int cpu_flags;
15430f459d16Spbrook 
154406d55cc1Saliguori     if (env->watchpoint_hit) {
154506d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
154606d55cc1Saliguori          * the debug interrupt so that is will trigger after the
154706d55cc1Saliguori          * current instruction. */
1548c3affe56SAndreas Färber         cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
154906d55cc1Saliguori         return;
155006d55cc1Saliguori     }
15512e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
155272cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1553b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
1554b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
15556e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
15566e140f28Saliguori             if (!env->watchpoint_hit) {
1557a1d1bb31Saliguori                 env->watchpoint_hit = wp;
15585a316526SBlue Swirl                 tb_check_watchpoint(env);
155906d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
156006d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
1561488d6577SMax Filippov                     cpu_loop_exit(env);
156206d55cc1Saliguori                 } else {
156306d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
156406d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
156506d55cc1Saliguori                     cpu_resume_from_signal(env, NULL);
15660f459d16Spbrook                 }
1567488d6577SMax Filippov             }
15686e140f28Saliguori         } else {
15696e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
15706e140f28Saliguori         }
15710f459d16Spbrook     }
15720f459d16Spbrook }
15730f459d16Spbrook 
15746658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
15756658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
15766658ffb8Spbrook    phys routines.  */
1577a8170e5eSAvi Kivity static uint64_t watch_mem_read(void *opaque, hwaddr addr,
15781ec9b909SAvi Kivity                                unsigned size)
15796658ffb8Spbrook {
15801ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
15811ec9b909SAvi Kivity     switch (size) {
15821ec9b909SAvi Kivity     case 1: return ldub_phys(addr);
15831ec9b909SAvi Kivity     case 2: return lduw_phys(addr);
15841ec9b909SAvi Kivity     case 4: return ldl_phys(addr);
15851ec9b909SAvi Kivity     default: abort();
15861ec9b909SAvi Kivity     }
15876658ffb8Spbrook }
15886658ffb8Spbrook 
1589a8170e5eSAvi Kivity static void watch_mem_write(void *opaque, hwaddr addr,
15901ec9b909SAvi Kivity                             uint64_t val, unsigned size)
15916658ffb8Spbrook {
15921ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
15931ec9b909SAvi Kivity     switch (size) {
159467364150SMax Filippov     case 1:
159567364150SMax Filippov         stb_phys(addr, val);
159667364150SMax Filippov         break;
159767364150SMax Filippov     case 2:
159867364150SMax Filippov         stw_phys(addr, val);
159967364150SMax Filippov         break;
160067364150SMax Filippov     case 4:
160167364150SMax Filippov         stl_phys(addr, val);
160267364150SMax Filippov         break;
16031ec9b909SAvi Kivity     default: abort();
16041ec9b909SAvi Kivity     }
16056658ffb8Spbrook }
16066658ffb8Spbrook 
16071ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
16081ec9b909SAvi Kivity     .read = watch_mem_read,
16091ec9b909SAvi Kivity     .write = watch_mem_write,
16101ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
16116658ffb8Spbrook };
16126658ffb8Spbrook 
1613a8170e5eSAvi Kivity static uint64_t subpage_read(void *opaque, hwaddr addr,
161470c68e44SAvi Kivity                              unsigned len)
1615db7b5426Sblueswir1 {
1616acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1617acc9d80bSJan Kiszka     uint8_t buf[4];
1618791af8c8SPaolo Bonzini 
1619db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1620016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1621acc9d80bSJan Kiszka            subpage, len, addr);
1622db7b5426Sblueswir1 #endif
1623acc9d80bSJan Kiszka     address_space_read(subpage->as, addr + subpage->base, buf, len);
1624acc9d80bSJan Kiszka     switch (len) {
1625acc9d80bSJan Kiszka     case 1:
1626acc9d80bSJan Kiszka         return ldub_p(buf);
1627acc9d80bSJan Kiszka     case 2:
1628acc9d80bSJan Kiszka         return lduw_p(buf);
1629acc9d80bSJan Kiszka     case 4:
1630acc9d80bSJan Kiszka         return ldl_p(buf);
1631acc9d80bSJan Kiszka     default:
1632acc9d80bSJan Kiszka         abort();
1633acc9d80bSJan Kiszka     }
1634db7b5426Sblueswir1 }
1635db7b5426Sblueswir1 
1636a8170e5eSAvi Kivity static void subpage_write(void *opaque, hwaddr addr,
163770c68e44SAvi Kivity                           uint64_t value, unsigned len)
1638db7b5426Sblueswir1 {
1639acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1640acc9d80bSJan Kiszka     uint8_t buf[4];
1641acc9d80bSJan Kiszka 
1642db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1643016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1644acc9d80bSJan Kiszka            " value %"PRIx64"\n",
1645acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
1646db7b5426Sblueswir1 #endif
1647acc9d80bSJan Kiszka     switch (len) {
1648acc9d80bSJan Kiszka     case 1:
1649acc9d80bSJan Kiszka         stb_p(buf, value);
1650acc9d80bSJan Kiszka         break;
1651acc9d80bSJan Kiszka     case 2:
1652acc9d80bSJan Kiszka         stw_p(buf, value);
1653acc9d80bSJan Kiszka         break;
1654acc9d80bSJan Kiszka     case 4:
1655acc9d80bSJan Kiszka         stl_p(buf, value);
1656acc9d80bSJan Kiszka         break;
1657acc9d80bSJan Kiszka     default:
1658acc9d80bSJan Kiszka         abort();
1659acc9d80bSJan Kiszka     }
1660acc9d80bSJan Kiszka     address_space_write(subpage->as, addr + subpage->base, buf, len);
1661db7b5426Sblueswir1 }
1662db7b5426Sblueswir1 
1663c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
1664016e9d62SAmos Kong                             unsigned len, bool is_write)
1665c353e4ccSPaolo Bonzini {
1666acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1667c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
1668016e9d62SAmos Kong     printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1669acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
1670c353e4ccSPaolo Bonzini #endif
1671c353e4ccSPaolo Bonzini 
1672acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
1673016e9d62SAmos Kong                                       len, is_write);
1674c353e4ccSPaolo Bonzini }
1675c353e4ccSPaolo Bonzini 
167670c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
167770c68e44SAvi Kivity     .read = subpage_read,
167870c68e44SAvi Kivity     .write = subpage_write,
1679c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
168070c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
1681db7b5426Sblueswir1 };
1682db7b5426Sblueswir1 
1683c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
16845312bd8bSAvi Kivity                              uint16_t section)
1685db7b5426Sblueswir1 {
1686db7b5426Sblueswir1     int idx, eidx;
1687db7b5426Sblueswir1 
1688db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1689db7b5426Sblueswir1         return -1;
1690db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
1691db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
1692db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1693016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1694016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
1695db7b5426Sblueswir1 #endif
1696db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
16975312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
1698db7b5426Sblueswir1     }
1699db7b5426Sblueswir1 
1700db7b5426Sblueswir1     return 0;
1701db7b5426Sblueswir1 }
1702db7b5426Sblueswir1 
1703acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1704db7b5426Sblueswir1 {
1705c227f099SAnthony Liguori     subpage_t *mmio;
1706db7b5426Sblueswir1 
17077267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
17081eec614bSaliguori 
1709acc9d80bSJan Kiszka     mmio->as = as;
1710db7b5426Sblueswir1     mmio->base = base;
17112c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
171270c68e44SAvi Kivity                           "subpage", TARGET_PAGE_SIZE);
1713b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
1714db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1715016e9d62SAmos Kong     printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1716016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
1717db7b5426Sblueswir1 #endif
1718b41aac4fSLiu Ping Fan     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1719db7b5426Sblueswir1 
1720db7b5426Sblueswir1     return mmio;
1721db7b5426Sblueswir1 }
1722db7b5426Sblueswir1 
172353cb28cbSMarcel Apfelbaum static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
17245312bd8bSAvi Kivity {
17255312bd8bSAvi Kivity     MemoryRegionSection section = {
17265312bd8bSAvi Kivity         .mr = mr,
17275312bd8bSAvi Kivity         .offset_within_address_space = 0,
17285312bd8bSAvi Kivity         .offset_within_region = 0,
1729052e87b0SPaolo Bonzini         .size = int128_2_64(),
17305312bd8bSAvi Kivity     };
17315312bd8bSAvi Kivity 
173253cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
17335312bd8bSAvi Kivity }
17345312bd8bSAvi Kivity 
1735a8170e5eSAvi Kivity MemoryRegion *iotlb_to_region(hwaddr index)
1736aa102231SAvi Kivity {
173753cb28cbSMarcel Apfelbaum     return address_space_memory.dispatch->map.sections[
173853cb28cbSMarcel Apfelbaum            index & ~TARGET_PAGE_MASK].mr;
1739aa102231SAvi Kivity }
1740aa102231SAvi Kivity 
1741e9179ce1SAvi Kivity static void io_mem_init(void)
1742e9179ce1SAvi Kivity {
17432c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
17442c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
17450e0df1e2SAvi Kivity                           "unassigned", UINT64_MAX);
17462c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
17470e0df1e2SAvi Kivity                           "notdirty", UINT64_MAX);
17482c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
17491ec9b909SAvi Kivity                           "watch", UINT64_MAX);
1750e9179ce1SAvi Kivity }
1751e9179ce1SAvi Kivity 
1752ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
1753ac1970fbSAvi Kivity {
175489ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
175553cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
175653cb28cbSMarcel Apfelbaum     uint16_t n;
175753cb28cbSMarcel Apfelbaum 
175853cb28cbSMarcel Apfelbaum     n = dummy_section(&d->map, &io_mem_unassigned);
175953cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
176053cb28cbSMarcel Apfelbaum     n = dummy_section(&d->map, &io_mem_notdirty);
176153cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_NOTDIRTY);
176253cb28cbSMarcel Apfelbaum     n = dummy_section(&d->map, &io_mem_rom);
176353cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_ROM);
176453cb28cbSMarcel Apfelbaum     n = dummy_section(&d->map, &io_mem_watch);
176553cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_WATCH);
176600752703SPaolo Bonzini 
17679736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
176800752703SPaolo Bonzini     d->as = as;
176900752703SPaolo Bonzini     as->next_dispatch = d;
177000752703SPaolo Bonzini }
177100752703SPaolo Bonzini 
177200752703SPaolo Bonzini static void mem_commit(MemoryListener *listener)
177300752703SPaolo Bonzini {
177400752703SPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
17750475d94fSPaolo Bonzini     AddressSpaceDispatch *cur = as->dispatch;
17760475d94fSPaolo Bonzini     AddressSpaceDispatch *next = as->next_dispatch;
1777ac1970fbSAvi Kivity 
177853cb28cbSMarcel Apfelbaum     phys_page_compact_all(next, next->map.nodes_nb);
1779b35ba30fSMichael S. Tsirkin 
17800475d94fSPaolo Bonzini     as->dispatch = next;
178153cb28cbSMarcel Apfelbaum 
178253cb28cbSMarcel Apfelbaum     if (cur) {
178353cb28cbSMarcel Apfelbaum         phys_sections_free(&cur->map);
17840475d94fSPaolo Bonzini         g_free(cur);
1785ac1970fbSAvi Kivity     }
17869affd6fcSPaolo Bonzini }
17879affd6fcSPaolo Bonzini 
17881d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
178950c1e149SAvi Kivity {
1790182735efSAndreas Färber     CPUState *cpu;
1791117712c3SAvi Kivity 
1792117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
1793117712c3SAvi Kivity        reset the modified entries */
1794117712c3SAvi Kivity     /* XXX: slow ! */
1795bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
1796182735efSAndreas Färber         CPUArchState *env = cpu->env_ptr;
1797182735efSAndreas Färber 
1798117712c3SAvi Kivity         tlb_flush(env, 1);
1799117712c3SAvi Kivity     }
180050c1e149SAvi Kivity }
180150c1e149SAvi Kivity 
180293632747SAvi Kivity static void core_log_global_start(MemoryListener *listener)
180393632747SAvi Kivity {
180493632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(1);
180593632747SAvi Kivity }
180693632747SAvi Kivity 
180793632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener)
180893632747SAvi Kivity {
180993632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(0);
181093632747SAvi Kivity }
181193632747SAvi Kivity 
181293632747SAvi Kivity static MemoryListener core_memory_listener = {
181393632747SAvi Kivity     .log_global_start = core_log_global_start,
181493632747SAvi Kivity     .log_global_stop = core_log_global_stop,
1815ac1970fbSAvi Kivity     .priority = 1,
181693632747SAvi Kivity };
181793632747SAvi Kivity 
18181d71148eSAvi Kivity static MemoryListener tcg_memory_listener = {
18191d71148eSAvi Kivity     .commit = tcg_commit,
18201d71148eSAvi Kivity };
18211d71148eSAvi Kivity 
1822ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
1823ac1970fbSAvi Kivity {
182400752703SPaolo Bonzini     as->dispatch = NULL;
182589ae337aSPaolo Bonzini     as->dispatch_listener = (MemoryListener) {
1826ac1970fbSAvi Kivity         .begin = mem_begin,
182700752703SPaolo Bonzini         .commit = mem_commit,
1828ac1970fbSAvi Kivity         .region_add = mem_add,
1829ac1970fbSAvi Kivity         .region_nop = mem_add,
1830ac1970fbSAvi Kivity         .priority = 0,
1831ac1970fbSAvi Kivity     };
183289ae337aSPaolo Bonzini     memory_listener_register(&as->dispatch_listener, as);
1833ac1970fbSAvi Kivity }
1834ac1970fbSAvi Kivity 
183583f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
183683f3c251SAvi Kivity {
183783f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
183883f3c251SAvi Kivity 
183989ae337aSPaolo Bonzini     memory_listener_unregister(&as->dispatch_listener);
184083f3c251SAvi Kivity     g_free(d);
184183f3c251SAvi Kivity     as->dispatch = NULL;
184283f3c251SAvi Kivity }
184383f3c251SAvi Kivity 
184462152b8aSAvi Kivity static void memory_map_init(void)
184562152b8aSAvi Kivity {
18467267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
184703f49957SPaolo Bonzini 
184857271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
18497dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
1850309cb471SAvi Kivity 
18517267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
18523bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
18533bb28b72SJan Kiszka                           65536);
18547dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
185593632747SAvi Kivity 
1856f6790af6SAvi Kivity     memory_listener_register(&core_memory_listener, &address_space_memory);
18572641689aSliguang     if (tcg_enabled()) {
1858f6790af6SAvi Kivity         memory_listener_register(&tcg_memory_listener, &address_space_memory);
185962152b8aSAvi Kivity     }
18602641689aSliguang }
186162152b8aSAvi Kivity 
186262152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
186362152b8aSAvi Kivity {
186462152b8aSAvi Kivity     return system_memory;
186562152b8aSAvi Kivity }
186662152b8aSAvi Kivity 
1867309cb471SAvi Kivity MemoryRegion *get_system_io(void)
1868309cb471SAvi Kivity {
1869309cb471SAvi Kivity     return system_io;
1870309cb471SAvi Kivity }
1871309cb471SAvi Kivity 
1872e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
1873e2eef170Spbrook 
187413eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
187513eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
1876f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1877a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
187813eb76e0Sbellard {
187913eb76e0Sbellard     int l, flags;
188013eb76e0Sbellard     target_ulong page;
188153a5960aSpbrook     void * p;
188213eb76e0Sbellard 
188313eb76e0Sbellard     while (len > 0) {
188413eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
188513eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
188613eb76e0Sbellard         if (l > len)
188713eb76e0Sbellard             l = len;
188813eb76e0Sbellard         flags = page_get_flags(page);
188913eb76e0Sbellard         if (!(flags & PAGE_VALID))
1890a68fe89cSPaul Brook             return -1;
189113eb76e0Sbellard         if (is_write) {
189213eb76e0Sbellard             if (!(flags & PAGE_WRITE))
1893a68fe89cSPaul Brook                 return -1;
1894579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
189572fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1896a68fe89cSPaul Brook                 return -1;
189772fb7daaSaurel32             memcpy(p, buf, l);
189872fb7daaSaurel32             unlock_user(p, addr, l);
189913eb76e0Sbellard         } else {
190013eb76e0Sbellard             if (!(flags & PAGE_READ))
1901a68fe89cSPaul Brook                 return -1;
1902579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
190372fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1904a68fe89cSPaul Brook                 return -1;
190572fb7daaSaurel32             memcpy(buf, p, l);
19065b257578Saurel32             unlock_user(p, addr, 0);
190713eb76e0Sbellard         }
190813eb76e0Sbellard         len -= l;
190913eb76e0Sbellard         buf += l;
191013eb76e0Sbellard         addr += l;
191113eb76e0Sbellard     }
1912a68fe89cSPaul Brook     return 0;
191313eb76e0Sbellard }
19148df1cd07Sbellard 
191513eb76e0Sbellard #else
191651d7a9ebSAnthony PERARD 
1917a8170e5eSAvi Kivity static void invalidate_and_set_dirty(hwaddr addr,
1918a8170e5eSAvi Kivity                                      hwaddr length)
191951d7a9ebSAnthony PERARD {
192051d7a9ebSAnthony PERARD     if (!cpu_physical_memory_is_dirty(addr)) {
192151d7a9ebSAnthony PERARD         /* invalidate code */
192251d7a9ebSAnthony PERARD         tb_invalidate_phys_page_range(addr, addr + length, 0);
192351d7a9ebSAnthony PERARD         /* set dirty bit */
192452159192SJuan Quintela         cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
192552159192SJuan Quintela         cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
192651d7a9ebSAnthony PERARD     }
1927e226939dSAnthony PERARD     xen_modified_memory(addr, length);
192851d7a9ebSAnthony PERARD }
192951d7a9ebSAnthony PERARD 
19302bbfa05dSPaolo Bonzini static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
19312bbfa05dSPaolo Bonzini {
19322bbfa05dSPaolo Bonzini     if (memory_region_is_ram(mr)) {
19332bbfa05dSPaolo Bonzini         return !(is_write && mr->readonly);
19342bbfa05dSPaolo Bonzini     }
19352bbfa05dSPaolo Bonzini     if (memory_region_is_romd(mr)) {
19362bbfa05dSPaolo Bonzini         return !is_write;
19372bbfa05dSPaolo Bonzini     }
19382bbfa05dSPaolo Bonzini 
19392bbfa05dSPaolo Bonzini     return false;
19402bbfa05dSPaolo Bonzini }
19412bbfa05dSPaolo Bonzini 
194223326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
194382f2563fSPaolo Bonzini {
1944e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
194523326164SRichard Henderson 
194623326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
194723326164SRichard Henderson        otherwise specified.  */
194823326164SRichard Henderson     if (access_size_max == 0) {
194923326164SRichard Henderson         access_size_max = 4;
195082f2563fSPaolo Bonzini     }
195123326164SRichard Henderson 
195223326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
195323326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
195423326164SRichard Henderson         unsigned align_size_max = addr & -addr;
195523326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
195623326164SRichard Henderson             access_size_max = align_size_max;
195723326164SRichard Henderson         }
195823326164SRichard Henderson     }
195923326164SRichard Henderson 
196023326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
196123326164SRichard Henderson     if (l > access_size_max) {
196223326164SRichard Henderson         l = access_size_max;
196323326164SRichard Henderson     }
1964098178f2SPaolo Bonzini     if (l & (l - 1)) {
1965098178f2SPaolo Bonzini         l = 1 << (qemu_fls(l) - 1);
1966098178f2SPaolo Bonzini     }
196723326164SRichard Henderson 
196823326164SRichard Henderson     return l;
196982f2563fSPaolo Bonzini }
197082f2563fSPaolo Bonzini 
1971fd8aaa76SPaolo Bonzini bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1972ac1970fbSAvi Kivity                       int len, bool is_write)
197313eb76e0Sbellard {
1974149f54b5SPaolo Bonzini     hwaddr l;
197513eb76e0Sbellard     uint8_t *ptr;
1976791af8c8SPaolo Bonzini     uint64_t val;
1977149f54b5SPaolo Bonzini     hwaddr addr1;
19785c8a00ceSPaolo Bonzini     MemoryRegion *mr;
1979fd8aaa76SPaolo Bonzini     bool error = false;
198013eb76e0Sbellard 
198113eb76e0Sbellard     while (len > 0) {
198213eb76e0Sbellard         l = len;
19835c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, is_write);
198413eb76e0Sbellard 
198513eb76e0Sbellard         if (is_write) {
19865c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
19875c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
19884917cf44SAndreas Färber                 /* XXX: could force current_cpu to NULL to avoid
19896a00d601Sbellard                    potential bugs */
199023326164SRichard Henderson                 switch (l) {
199123326164SRichard Henderson                 case 8:
199223326164SRichard Henderson                     /* 64 bit write access */
199323326164SRichard Henderson                     val = ldq_p(buf);
199423326164SRichard Henderson                     error |= io_mem_write(mr, addr1, val, 8);
199523326164SRichard Henderson                     break;
199623326164SRichard Henderson                 case 4:
19971c213d19Sbellard                     /* 32 bit write access */
1998c27004ecSbellard                     val = ldl_p(buf);
19995c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 4);
200023326164SRichard Henderson                     break;
200123326164SRichard Henderson                 case 2:
20021c213d19Sbellard                     /* 16 bit write access */
2003c27004ecSbellard                     val = lduw_p(buf);
20045c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 2);
200523326164SRichard Henderson                     break;
200623326164SRichard Henderson                 case 1:
20071c213d19Sbellard                     /* 8 bit write access */
2008c27004ecSbellard                     val = ldub_p(buf);
20095c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 1);
201023326164SRichard Henderson                     break;
201123326164SRichard Henderson                 default:
201223326164SRichard Henderson                     abort();
201313eb76e0Sbellard                 }
20142bbfa05dSPaolo Bonzini             } else {
20155c8a00ceSPaolo Bonzini                 addr1 += memory_region_get_ram_addr(mr);
201613eb76e0Sbellard                 /* RAM case */
20175579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
201813eb76e0Sbellard                 memcpy(ptr, buf, l);
201951d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
20203a7d929eSbellard             }
202113eb76e0Sbellard         } else {
20225c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
202313eb76e0Sbellard                 /* I/O case */
20245c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
202523326164SRichard Henderson                 switch (l) {
202623326164SRichard Henderson                 case 8:
202723326164SRichard Henderson                     /* 64 bit read access */
202823326164SRichard Henderson                     error |= io_mem_read(mr, addr1, &val, 8);
202923326164SRichard Henderson                     stq_p(buf, val);
203023326164SRichard Henderson                     break;
203123326164SRichard Henderson                 case 4:
203213eb76e0Sbellard                     /* 32 bit read access */
20335c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 4);
2034c27004ecSbellard                     stl_p(buf, val);
203523326164SRichard Henderson                     break;
203623326164SRichard Henderson                 case 2:
203713eb76e0Sbellard                     /* 16 bit read access */
20385c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 2);
2039c27004ecSbellard                     stw_p(buf, val);
204023326164SRichard Henderson                     break;
204123326164SRichard Henderson                 case 1:
20421c213d19Sbellard                     /* 8 bit read access */
20435c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 1);
2044c27004ecSbellard                     stb_p(buf, val);
204523326164SRichard Henderson                     break;
204623326164SRichard Henderson                 default:
204723326164SRichard Henderson                     abort();
204813eb76e0Sbellard                 }
204913eb76e0Sbellard             } else {
205013eb76e0Sbellard                 /* RAM case */
20515c8a00ceSPaolo Bonzini                 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2052f3705d53SAvi Kivity                 memcpy(buf, ptr, l);
205313eb76e0Sbellard             }
205413eb76e0Sbellard         }
205513eb76e0Sbellard         len -= l;
205613eb76e0Sbellard         buf += l;
205713eb76e0Sbellard         addr += l;
205813eb76e0Sbellard     }
2059fd8aaa76SPaolo Bonzini 
2060fd8aaa76SPaolo Bonzini     return error;
206113eb76e0Sbellard }
20628df1cd07Sbellard 
2063fd8aaa76SPaolo Bonzini bool address_space_write(AddressSpace *as, hwaddr addr,
2064ac1970fbSAvi Kivity                          const uint8_t *buf, int len)
2065ac1970fbSAvi Kivity {
2066fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2067ac1970fbSAvi Kivity }
2068ac1970fbSAvi Kivity 
2069fd8aaa76SPaolo Bonzini bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2070ac1970fbSAvi Kivity {
2071fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, buf, len, false);
2072ac1970fbSAvi Kivity }
2073ac1970fbSAvi Kivity 
2074ac1970fbSAvi Kivity 
2075a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2076ac1970fbSAvi Kivity                             int len, int is_write)
2077ac1970fbSAvi Kivity {
2078fd8aaa76SPaolo Bonzini     address_space_rw(&address_space_memory, addr, buf, len, is_write);
2079ac1970fbSAvi Kivity }
2080ac1970fbSAvi Kivity 
2081582b55a9SAlexander Graf enum write_rom_type {
2082582b55a9SAlexander Graf     WRITE_DATA,
2083582b55a9SAlexander Graf     FLUSH_CACHE,
2084582b55a9SAlexander Graf };
2085582b55a9SAlexander Graf 
2086582b55a9SAlexander Graf static inline void cpu_physical_memory_write_rom_internal(
2087582b55a9SAlexander Graf     hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2088d0ecd2aaSbellard {
2089149f54b5SPaolo Bonzini     hwaddr l;
2090d0ecd2aaSbellard     uint8_t *ptr;
2091149f54b5SPaolo Bonzini     hwaddr addr1;
20925c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2093d0ecd2aaSbellard 
2094d0ecd2aaSbellard     while (len > 0) {
2095d0ecd2aaSbellard         l = len;
20965c8a00ceSPaolo Bonzini         mr = address_space_translate(&address_space_memory,
2097149f54b5SPaolo Bonzini                                      addr, &addr1, &l, true);
2098d0ecd2aaSbellard 
20995c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
21005c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2101d0ecd2aaSbellard             /* do nothing */
2102d0ecd2aaSbellard         } else {
21035c8a00ceSPaolo Bonzini             addr1 += memory_region_get_ram_addr(mr);
2104d0ecd2aaSbellard             /* ROM/RAM case */
21055579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
2106582b55a9SAlexander Graf             switch (type) {
2107582b55a9SAlexander Graf             case WRITE_DATA:
2108d0ecd2aaSbellard                 memcpy(ptr, buf, l);
210951d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
2110582b55a9SAlexander Graf                 break;
2111582b55a9SAlexander Graf             case FLUSH_CACHE:
2112582b55a9SAlexander Graf                 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2113582b55a9SAlexander Graf                 break;
2114582b55a9SAlexander Graf             }
2115d0ecd2aaSbellard         }
2116d0ecd2aaSbellard         len -= l;
2117d0ecd2aaSbellard         buf += l;
2118d0ecd2aaSbellard         addr += l;
2119d0ecd2aaSbellard     }
2120d0ecd2aaSbellard }
2121d0ecd2aaSbellard 
2122582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
2123582b55a9SAlexander Graf void cpu_physical_memory_write_rom(hwaddr addr,
2124582b55a9SAlexander Graf                                    const uint8_t *buf, int len)
2125582b55a9SAlexander Graf {
2126582b55a9SAlexander Graf     cpu_physical_memory_write_rom_internal(addr, buf, len, WRITE_DATA);
2127582b55a9SAlexander Graf }
2128582b55a9SAlexander Graf 
2129582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len)
2130582b55a9SAlexander Graf {
2131582b55a9SAlexander Graf     /*
2132582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
2133582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
2134582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
2135582b55a9SAlexander Graf      * the host's instruction cache at least.
2136582b55a9SAlexander Graf      */
2137582b55a9SAlexander Graf     if (tcg_enabled()) {
2138582b55a9SAlexander Graf         return;
2139582b55a9SAlexander Graf     }
2140582b55a9SAlexander Graf 
2141582b55a9SAlexander Graf     cpu_physical_memory_write_rom_internal(start, NULL, len, FLUSH_CACHE);
2142582b55a9SAlexander Graf }
2143582b55a9SAlexander Graf 
21446d16c2f8Saliguori typedef struct {
2145d3e71559SPaolo Bonzini     MemoryRegion *mr;
21466d16c2f8Saliguori     void *buffer;
2147a8170e5eSAvi Kivity     hwaddr addr;
2148a8170e5eSAvi Kivity     hwaddr len;
21496d16c2f8Saliguori } BounceBuffer;
21506d16c2f8Saliguori 
21516d16c2f8Saliguori static BounceBuffer bounce;
21526d16c2f8Saliguori 
2153ba223c29Saliguori typedef struct MapClient {
2154ba223c29Saliguori     void *opaque;
2155ba223c29Saliguori     void (*callback)(void *opaque);
215672cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2157ba223c29Saliguori } MapClient;
2158ba223c29Saliguori 
215972cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
216072cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2161ba223c29Saliguori 
2162ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2163ba223c29Saliguori {
21647267c094SAnthony Liguori     MapClient *client = g_malloc(sizeof(*client));
2165ba223c29Saliguori 
2166ba223c29Saliguori     client->opaque = opaque;
2167ba223c29Saliguori     client->callback = callback;
216872cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
2169ba223c29Saliguori     return client;
2170ba223c29Saliguori }
2171ba223c29Saliguori 
21728b9c99d9SBlue Swirl static void cpu_unregister_map_client(void *_client)
2173ba223c29Saliguori {
2174ba223c29Saliguori     MapClient *client = (MapClient *)_client;
2175ba223c29Saliguori 
217672cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
21777267c094SAnthony Liguori     g_free(client);
2178ba223c29Saliguori }
2179ba223c29Saliguori 
2180ba223c29Saliguori static void cpu_notify_map_clients(void)
2181ba223c29Saliguori {
2182ba223c29Saliguori     MapClient *client;
2183ba223c29Saliguori 
218472cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
218572cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2186ba223c29Saliguori         client->callback(client->opaque);
218734d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
2188ba223c29Saliguori     }
2189ba223c29Saliguori }
2190ba223c29Saliguori 
219151644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
219251644ab7SPaolo Bonzini {
21935c8a00ceSPaolo Bonzini     MemoryRegion *mr;
219451644ab7SPaolo Bonzini     hwaddr l, xlat;
219551644ab7SPaolo Bonzini 
219651644ab7SPaolo Bonzini     while (len > 0) {
219751644ab7SPaolo Bonzini         l = len;
21985c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
21995c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
22005c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
22015c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
220251644ab7SPaolo Bonzini                 return false;
220351644ab7SPaolo Bonzini             }
220451644ab7SPaolo Bonzini         }
220551644ab7SPaolo Bonzini 
220651644ab7SPaolo Bonzini         len -= l;
220751644ab7SPaolo Bonzini         addr += l;
220851644ab7SPaolo Bonzini     }
220951644ab7SPaolo Bonzini     return true;
221051644ab7SPaolo Bonzini }
221151644ab7SPaolo Bonzini 
22126d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
22136d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
22146d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
22156d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2216ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2217ba223c29Saliguori  * likely to succeed.
22186d16c2f8Saliguori  */
2219ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2220a8170e5eSAvi Kivity                         hwaddr addr,
2221a8170e5eSAvi Kivity                         hwaddr *plen,
2222ac1970fbSAvi Kivity                         bool is_write)
22236d16c2f8Saliguori {
2224a8170e5eSAvi Kivity     hwaddr len = *plen;
2225e3127ae0SPaolo Bonzini     hwaddr done = 0;
2226e3127ae0SPaolo Bonzini     hwaddr l, xlat, base;
2227e3127ae0SPaolo Bonzini     MemoryRegion *mr, *this_mr;
2228e3127ae0SPaolo Bonzini     ram_addr_t raddr;
22296d16c2f8Saliguori 
2230e3127ae0SPaolo Bonzini     if (len == 0) {
2231e3127ae0SPaolo Bonzini         return NULL;
2232e3127ae0SPaolo Bonzini     }
2233e3127ae0SPaolo Bonzini 
22346d16c2f8Saliguori     l = len;
22355c8a00ceSPaolo Bonzini     mr = address_space_translate(as, addr, &xlat, &l, is_write);
22365c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
2237e3127ae0SPaolo Bonzini         if (bounce.buffer) {
2238e3127ae0SPaolo Bonzini             return NULL;
22396d16c2f8Saliguori         }
2240e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
2241e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
2242e85d9db5SKevin Wolf         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
22436d16c2f8Saliguori         bounce.addr = addr;
22446d16c2f8Saliguori         bounce.len = l;
2245d3e71559SPaolo Bonzini 
2246d3e71559SPaolo Bonzini         memory_region_ref(mr);
2247d3e71559SPaolo Bonzini         bounce.mr = mr;
22486d16c2f8Saliguori         if (!is_write) {
2249ac1970fbSAvi Kivity             address_space_read(as, addr, bounce.buffer, l);
22506d16c2f8Saliguori         }
225138bee5dcSStefano Stabellini 
225238bee5dcSStefano Stabellini         *plen = l;
225338bee5dcSStefano Stabellini         return bounce.buffer;
22546d16c2f8Saliguori     }
2255e3127ae0SPaolo Bonzini 
2256e3127ae0SPaolo Bonzini     base = xlat;
2257e3127ae0SPaolo Bonzini     raddr = memory_region_get_ram_addr(mr);
2258e3127ae0SPaolo Bonzini 
2259e3127ae0SPaolo Bonzini     for (;;) {
2260e3127ae0SPaolo Bonzini         len -= l;
2261e3127ae0SPaolo Bonzini         addr += l;
2262e3127ae0SPaolo Bonzini         done += l;
2263e3127ae0SPaolo Bonzini         if (len == 0) {
2264e3127ae0SPaolo Bonzini             break;
2265e3127ae0SPaolo Bonzini         }
2266e3127ae0SPaolo Bonzini 
2267e3127ae0SPaolo Bonzini         l = len;
2268e3127ae0SPaolo Bonzini         this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2269e3127ae0SPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
2270149f54b5SPaolo Bonzini             break;
2271149f54b5SPaolo Bonzini         }
22728ab934f9SStefano Stabellini     }
22736d16c2f8Saliguori 
2274d3e71559SPaolo Bonzini     memory_region_ref(mr);
2275e3127ae0SPaolo Bonzini     *plen = done;
2276e3127ae0SPaolo Bonzini     return qemu_ram_ptr_length(raddr + base, plen);
22776d16c2f8Saliguori }
22786d16c2f8Saliguori 
2279ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
22806d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
22816d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
22826d16c2f8Saliguori  */
2283a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2284a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
22856d16c2f8Saliguori {
22866d16c2f8Saliguori     if (buffer != bounce.buffer) {
2287d3e71559SPaolo Bonzini         MemoryRegion *mr;
22887443b437SPaolo Bonzini         ram_addr_t addr1;
2289d3e71559SPaolo Bonzini 
2290d3e71559SPaolo Bonzini         mr = qemu_ram_addr_from_host(buffer, &addr1);
22911b5ec234SPaolo Bonzini         assert(mr != NULL);
2292d3e71559SPaolo Bonzini         if (is_write) {
22936d16c2f8Saliguori             while (access_len) {
22946d16c2f8Saliguori                 unsigned l;
22956d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
22966d16c2f8Saliguori                 if (l > access_len)
22976d16c2f8Saliguori                     l = access_len;
229851d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
22996d16c2f8Saliguori                 addr1 += l;
23006d16c2f8Saliguori                 access_len -= l;
23016d16c2f8Saliguori             }
23026d16c2f8Saliguori         }
2303868bb33fSJan Kiszka         if (xen_enabled()) {
2304e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
2305050a0ddfSAnthony PERARD         }
2306d3e71559SPaolo Bonzini         memory_region_unref(mr);
23076d16c2f8Saliguori         return;
23086d16c2f8Saliguori     }
23096d16c2f8Saliguori     if (is_write) {
2310ac1970fbSAvi Kivity         address_space_write(as, bounce.addr, bounce.buffer, access_len);
23116d16c2f8Saliguori     }
2312f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
23136d16c2f8Saliguori     bounce.buffer = NULL;
2314d3e71559SPaolo Bonzini     memory_region_unref(bounce.mr);
2315ba223c29Saliguori     cpu_notify_map_clients();
23166d16c2f8Saliguori }
2317d0ecd2aaSbellard 
2318a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
2319a8170e5eSAvi Kivity                               hwaddr *plen,
2320ac1970fbSAvi Kivity                               int is_write)
2321ac1970fbSAvi Kivity {
2322ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
2323ac1970fbSAvi Kivity }
2324ac1970fbSAvi Kivity 
2325a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2326a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
2327ac1970fbSAvi Kivity {
2328ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2329ac1970fbSAvi Kivity }
2330ac1970fbSAvi Kivity 
23318df1cd07Sbellard /* warning: addr must be aligned */
2332a8170e5eSAvi Kivity static inline uint32_t ldl_phys_internal(hwaddr addr,
23331e78bcc1SAlexander Graf                                          enum device_endian endian)
23348df1cd07Sbellard {
23358df1cd07Sbellard     uint8_t *ptr;
2336791af8c8SPaolo Bonzini     uint64_t val;
23375c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2338149f54b5SPaolo Bonzini     hwaddr l = 4;
2339149f54b5SPaolo Bonzini     hwaddr addr1;
23408df1cd07Sbellard 
23415c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2342149f54b5SPaolo Bonzini                                  false);
23435c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
23448df1cd07Sbellard         /* I/O case */
23455c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 4);
23461e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
23471e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
23481e78bcc1SAlexander Graf             val = bswap32(val);
23491e78bcc1SAlexander Graf         }
23501e78bcc1SAlexander Graf #else
23511e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
23521e78bcc1SAlexander Graf             val = bswap32(val);
23531e78bcc1SAlexander Graf         }
23541e78bcc1SAlexander Graf #endif
23558df1cd07Sbellard     } else {
23568df1cd07Sbellard         /* RAM case */
23575c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
235806ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2359149f54b5SPaolo Bonzini                                + addr1);
23601e78bcc1SAlexander Graf         switch (endian) {
23611e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
23621e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
23631e78bcc1SAlexander Graf             break;
23641e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
23651e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
23661e78bcc1SAlexander Graf             break;
23671e78bcc1SAlexander Graf         default:
23688df1cd07Sbellard             val = ldl_p(ptr);
23691e78bcc1SAlexander Graf             break;
23701e78bcc1SAlexander Graf         }
23718df1cd07Sbellard     }
23728df1cd07Sbellard     return val;
23738df1cd07Sbellard }
23748df1cd07Sbellard 
2375a8170e5eSAvi Kivity uint32_t ldl_phys(hwaddr addr)
23761e78bcc1SAlexander Graf {
23771e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
23781e78bcc1SAlexander Graf }
23791e78bcc1SAlexander Graf 
2380a8170e5eSAvi Kivity uint32_t ldl_le_phys(hwaddr addr)
23811e78bcc1SAlexander Graf {
23821e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
23831e78bcc1SAlexander Graf }
23841e78bcc1SAlexander Graf 
2385a8170e5eSAvi Kivity uint32_t ldl_be_phys(hwaddr addr)
23861e78bcc1SAlexander Graf {
23871e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
23881e78bcc1SAlexander Graf }
23891e78bcc1SAlexander Graf 
239084b7b8e7Sbellard /* warning: addr must be aligned */
2391a8170e5eSAvi Kivity static inline uint64_t ldq_phys_internal(hwaddr addr,
23921e78bcc1SAlexander Graf                                          enum device_endian endian)
239384b7b8e7Sbellard {
239484b7b8e7Sbellard     uint8_t *ptr;
239584b7b8e7Sbellard     uint64_t val;
23965c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2397149f54b5SPaolo Bonzini     hwaddr l = 8;
2398149f54b5SPaolo Bonzini     hwaddr addr1;
239984b7b8e7Sbellard 
24005c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2401149f54b5SPaolo Bonzini                                  false);
24025c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
240384b7b8e7Sbellard         /* I/O case */
24045c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 8);
2405968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
2406968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
2407968a5627SPaolo Bonzini             val = bswap64(val);
2408968a5627SPaolo Bonzini         }
2409968a5627SPaolo Bonzini #else
2410968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
2411968a5627SPaolo Bonzini             val = bswap64(val);
2412968a5627SPaolo Bonzini         }
2413968a5627SPaolo Bonzini #endif
241484b7b8e7Sbellard     } else {
241584b7b8e7Sbellard         /* RAM case */
24165c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
241706ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2418149f54b5SPaolo Bonzini                                + addr1);
24191e78bcc1SAlexander Graf         switch (endian) {
24201e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
24211e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
24221e78bcc1SAlexander Graf             break;
24231e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
24241e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
24251e78bcc1SAlexander Graf             break;
24261e78bcc1SAlexander Graf         default:
242784b7b8e7Sbellard             val = ldq_p(ptr);
24281e78bcc1SAlexander Graf             break;
24291e78bcc1SAlexander Graf         }
243084b7b8e7Sbellard     }
243184b7b8e7Sbellard     return val;
243284b7b8e7Sbellard }
243384b7b8e7Sbellard 
2434a8170e5eSAvi Kivity uint64_t ldq_phys(hwaddr addr)
24351e78bcc1SAlexander Graf {
24361e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
24371e78bcc1SAlexander Graf }
24381e78bcc1SAlexander Graf 
2439a8170e5eSAvi Kivity uint64_t ldq_le_phys(hwaddr addr)
24401e78bcc1SAlexander Graf {
24411e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
24421e78bcc1SAlexander Graf }
24431e78bcc1SAlexander Graf 
2444a8170e5eSAvi Kivity uint64_t ldq_be_phys(hwaddr addr)
24451e78bcc1SAlexander Graf {
24461e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
24471e78bcc1SAlexander Graf }
24481e78bcc1SAlexander Graf 
2449aab33094Sbellard /* XXX: optimize */
2450a8170e5eSAvi Kivity uint32_t ldub_phys(hwaddr addr)
2451aab33094Sbellard {
2452aab33094Sbellard     uint8_t val;
2453aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2454aab33094Sbellard     return val;
2455aab33094Sbellard }
2456aab33094Sbellard 
2457733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
2458a8170e5eSAvi Kivity static inline uint32_t lduw_phys_internal(hwaddr addr,
24591e78bcc1SAlexander Graf                                           enum device_endian endian)
2460aab33094Sbellard {
2461733f0b02SMichael S. Tsirkin     uint8_t *ptr;
2462733f0b02SMichael S. Tsirkin     uint64_t val;
24635c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2464149f54b5SPaolo Bonzini     hwaddr l = 2;
2465149f54b5SPaolo Bonzini     hwaddr addr1;
2466733f0b02SMichael S. Tsirkin 
24675c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2468149f54b5SPaolo Bonzini                                  false);
24695c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
2470733f0b02SMichael S. Tsirkin         /* I/O case */
24715c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 2);
24721e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
24731e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
24741e78bcc1SAlexander Graf             val = bswap16(val);
24751e78bcc1SAlexander Graf         }
24761e78bcc1SAlexander Graf #else
24771e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
24781e78bcc1SAlexander Graf             val = bswap16(val);
24791e78bcc1SAlexander Graf         }
24801e78bcc1SAlexander Graf #endif
2481733f0b02SMichael S. Tsirkin     } else {
2482733f0b02SMichael S. Tsirkin         /* RAM case */
24835c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
248406ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2485149f54b5SPaolo Bonzini                                + addr1);
24861e78bcc1SAlexander Graf         switch (endian) {
24871e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
24881e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
24891e78bcc1SAlexander Graf             break;
24901e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
24911e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
24921e78bcc1SAlexander Graf             break;
24931e78bcc1SAlexander Graf         default:
2494733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
24951e78bcc1SAlexander Graf             break;
24961e78bcc1SAlexander Graf         }
2497733f0b02SMichael S. Tsirkin     }
2498733f0b02SMichael S. Tsirkin     return val;
2499aab33094Sbellard }
2500aab33094Sbellard 
2501a8170e5eSAvi Kivity uint32_t lduw_phys(hwaddr addr)
25021e78bcc1SAlexander Graf {
25031e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
25041e78bcc1SAlexander Graf }
25051e78bcc1SAlexander Graf 
2506a8170e5eSAvi Kivity uint32_t lduw_le_phys(hwaddr addr)
25071e78bcc1SAlexander Graf {
25081e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
25091e78bcc1SAlexander Graf }
25101e78bcc1SAlexander Graf 
2511a8170e5eSAvi Kivity uint32_t lduw_be_phys(hwaddr addr)
25121e78bcc1SAlexander Graf {
25131e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
25141e78bcc1SAlexander Graf }
25151e78bcc1SAlexander Graf 
25168df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
25178df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
25188df1cd07Sbellard    bits are used to track modified PTEs */
2519a8170e5eSAvi Kivity void stl_phys_notdirty(hwaddr addr, uint32_t val)
25208df1cd07Sbellard {
25218df1cd07Sbellard     uint8_t *ptr;
25225c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2523149f54b5SPaolo Bonzini     hwaddr l = 4;
2524149f54b5SPaolo Bonzini     hwaddr addr1;
25258df1cd07Sbellard 
25265c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2527149f54b5SPaolo Bonzini                                  true);
25285c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
25295c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
25308df1cd07Sbellard     } else {
25315c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
25325579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
25338df1cd07Sbellard         stl_p(ptr, val);
253474576198Saliguori 
253574576198Saliguori         if (unlikely(in_migration)) {
253674576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
253774576198Saliguori                 /* invalidate code */
253874576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
253974576198Saliguori                 /* set dirty bit */
254052159192SJuan Quintela                 cpu_physical_memory_set_dirty_flag(addr1,
254152159192SJuan Quintela                                                    DIRTY_MEMORY_MIGRATION);
254252159192SJuan Quintela                 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
254374576198Saliguori             }
254474576198Saliguori         }
25458df1cd07Sbellard     }
25468df1cd07Sbellard }
25478df1cd07Sbellard 
25488df1cd07Sbellard /* warning: addr must be aligned */
2549a8170e5eSAvi Kivity static inline void stl_phys_internal(hwaddr addr, uint32_t val,
25501e78bcc1SAlexander Graf                                      enum device_endian endian)
25518df1cd07Sbellard {
25528df1cd07Sbellard     uint8_t *ptr;
25535c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2554149f54b5SPaolo Bonzini     hwaddr l = 4;
2555149f54b5SPaolo Bonzini     hwaddr addr1;
25568df1cd07Sbellard 
25575c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2558149f54b5SPaolo Bonzini                                  true);
25595c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
25601e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
25611e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
25621e78bcc1SAlexander Graf             val = bswap32(val);
25631e78bcc1SAlexander Graf         }
25641e78bcc1SAlexander Graf #else
25651e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
25661e78bcc1SAlexander Graf             val = bswap32(val);
25671e78bcc1SAlexander Graf         }
25681e78bcc1SAlexander Graf #endif
25695c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
25708df1cd07Sbellard     } else {
25718df1cd07Sbellard         /* RAM case */
25725c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
25735579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
25741e78bcc1SAlexander Graf         switch (endian) {
25751e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
25761e78bcc1SAlexander Graf             stl_le_p(ptr, val);
25771e78bcc1SAlexander Graf             break;
25781e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
25791e78bcc1SAlexander Graf             stl_be_p(ptr, val);
25801e78bcc1SAlexander Graf             break;
25811e78bcc1SAlexander Graf         default:
25828df1cd07Sbellard             stl_p(ptr, val);
25831e78bcc1SAlexander Graf             break;
25841e78bcc1SAlexander Graf         }
258551d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 4);
25868df1cd07Sbellard     }
25873a7d929eSbellard }
25888df1cd07Sbellard 
2589a8170e5eSAvi Kivity void stl_phys(hwaddr addr, uint32_t val)
25901e78bcc1SAlexander Graf {
25911e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
25921e78bcc1SAlexander Graf }
25931e78bcc1SAlexander Graf 
2594a8170e5eSAvi Kivity void stl_le_phys(hwaddr addr, uint32_t val)
25951e78bcc1SAlexander Graf {
25961e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
25971e78bcc1SAlexander Graf }
25981e78bcc1SAlexander Graf 
2599a8170e5eSAvi Kivity void stl_be_phys(hwaddr addr, uint32_t val)
26001e78bcc1SAlexander Graf {
26011e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
26021e78bcc1SAlexander Graf }
26031e78bcc1SAlexander Graf 
2604aab33094Sbellard /* XXX: optimize */
2605a8170e5eSAvi Kivity void stb_phys(hwaddr addr, uint32_t val)
2606aab33094Sbellard {
2607aab33094Sbellard     uint8_t v = val;
2608aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2609aab33094Sbellard }
2610aab33094Sbellard 
2611733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
2612a8170e5eSAvi Kivity static inline void stw_phys_internal(hwaddr addr, uint32_t val,
26131e78bcc1SAlexander Graf                                      enum device_endian endian)
2614aab33094Sbellard {
2615733f0b02SMichael S. Tsirkin     uint8_t *ptr;
26165c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2617149f54b5SPaolo Bonzini     hwaddr l = 2;
2618149f54b5SPaolo Bonzini     hwaddr addr1;
2619733f0b02SMichael S. Tsirkin 
26205c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2621149f54b5SPaolo Bonzini                                  true);
26225c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
26231e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
26241e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
26251e78bcc1SAlexander Graf             val = bswap16(val);
26261e78bcc1SAlexander Graf         }
26271e78bcc1SAlexander Graf #else
26281e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
26291e78bcc1SAlexander Graf             val = bswap16(val);
26301e78bcc1SAlexander Graf         }
26311e78bcc1SAlexander Graf #endif
26325c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 2);
2633733f0b02SMichael S. Tsirkin     } else {
2634733f0b02SMichael S. Tsirkin         /* RAM case */
26355c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2636733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
26371e78bcc1SAlexander Graf         switch (endian) {
26381e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
26391e78bcc1SAlexander Graf             stw_le_p(ptr, val);
26401e78bcc1SAlexander Graf             break;
26411e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
26421e78bcc1SAlexander Graf             stw_be_p(ptr, val);
26431e78bcc1SAlexander Graf             break;
26441e78bcc1SAlexander Graf         default:
2645733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
26461e78bcc1SAlexander Graf             break;
26471e78bcc1SAlexander Graf         }
264851d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 2);
2649733f0b02SMichael S. Tsirkin     }
2650aab33094Sbellard }
2651aab33094Sbellard 
2652a8170e5eSAvi Kivity void stw_phys(hwaddr addr, uint32_t val)
26531e78bcc1SAlexander Graf {
26541e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
26551e78bcc1SAlexander Graf }
26561e78bcc1SAlexander Graf 
2657a8170e5eSAvi Kivity void stw_le_phys(hwaddr addr, uint32_t val)
26581e78bcc1SAlexander Graf {
26591e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
26601e78bcc1SAlexander Graf }
26611e78bcc1SAlexander Graf 
2662a8170e5eSAvi Kivity void stw_be_phys(hwaddr addr, uint32_t val)
26631e78bcc1SAlexander Graf {
26641e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
26651e78bcc1SAlexander Graf }
26661e78bcc1SAlexander Graf 
2667aab33094Sbellard /* XXX: optimize */
2668a8170e5eSAvi Kivity void stq_phys(hwaddr addr, uint64_t val)
2669aab33094Sbellard {
2670aab33094Sbellard     val = tswap64(val);
267171d2b725SStefan Weil     cpu_physical_memory_write(addr, &val, 8);
2672aab33094Sbellard }
2673aab33094Sbellard 
2674a8170e5eSAvi Kivity void stq_le_phys(hwaddr addr, uint64_t val)
26751e78bcc1SAlexander Graf {
26761e78bcc1SAlexander Graf     val = cpu_to_le64(val);
26771e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
26781e78bcc1SAlexander Graf }
26791e78bcc1SAlexander Graf 
2680a8170e5eSAvi Kivity void stq_be_phys(hwaddr addr, uint64_t val)
26811e78bcc1SAlexander Graf {
26821e78bcc1SAlexander Graf     val = cpu_to_be64(val);
26831e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
26841e78bcc1SAlexander Graf }
26851e78bcc1SAlexander Graf 
26865e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
2687f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2688b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
268913eb76e0Sbellard {
269013eb76e0Sbellard     int l;
2691a8170e5eSAvi Kivity     hwaddr phys_addr;
26929b3c35e0Sj_mayer     target_ulong page;
269313eb76e0Sbellard 
269413eb76e0Sbellard     while (len > 0) {
269513eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
2696f17ec444SAndreas Färber         phys_addr = cpu_get_phys_page_debug(cpu, page);
269713eb76e0Sbellard         /* if no physical page mapped, return an error */
269813eb76e0Sbellard         if (phys_addr == -1)
269913eb76e0Sbellard             return -1;
270013eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
270113eb76e0Sbellard         if (l > len)
270213eb76e0Sbellard             l = len;
27035e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
27045e2972fdSaliguori         if (is_write)
27055e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
27065e2972fdSaliguori         else
27075e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
270813eb76e0Sbellard         len -= l;
270913eb76e0Sbellard         buf += l;
271013eb76e0Sbellard         addr += l;
271113eb76e0Sbellard     }
271213eb76e0Sbellard     return 0;
271313eb76e0Sbellard }
2714a68fe89cSPaul Brook #endif
271513eb76e0Sbellard 
27168e4a424bSBlue Swirl #if !defined(CONFIG_USER_ONLY)
27178e4a424bSBlue Swirl 
27188e4a424bSBlue Swirl /*
27198e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
27208e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
27218e4a424bSBlue Swirl  */
27228e4a424bSBlue Swirl bool virtio_is_big_endian(void);
27238e4a424bSBlue Swirl bool virtio_is_big_endian(void)
27248e4a424bSBlue Swirl {
27258e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
27268e4a424bSBlue Swirl     return true;
27278e4a424bSBlue Swirl #else
27288e4a424bSBlue Swirl     return false;
27298e4a424bSBlue Swirl #endif
27308e4a424bSBlue Swirl }
27318e4a424bSBlue Swirl 
27328e4a424bSBlue Swirl #endif
27338e4a424bSBlue Swirl 
273476f35538SWen Congyang #ifndef CONFIG_USER_ONLY
2735a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
273676f35538SWen Congyang {
27375c8a00ceSPaolo Bonzini     MemoryRegion*mr;
2738149f54b5SPaolo Bonzini     hwaddr l = 1;
273976f35538SWen Congyang 
27405c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
2741149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
274276f35538SWen Congyang 
27435c8a00ceSPaolo Bonzini     return !(memory_region_is_ram(mr) ||
27445c8a00ceSPaolo Bonzini              memory_region_is_romd(mr));
274576f35538SWen Congyang }
2746bd2fa51fSMichael R. Hines 
2747bd2fa51fSMichael R. Hines void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2748bd2fa51fSMichael R. Hines {
2749bd2fa51fSMichael R. Hines     RAMBlock *block;
2750bd2fa51fSMichael R. Hines 
2751bd2fa51fSMichael R. Hines     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2752bd2fa51fSMichael R. Hines         func(block->host, block->offset, block->length, opaque);
2753bd2fa51fSMichael R. Hines     }
2754bd2fa51fSMichael R. Hines }
2755ec3f8c99SPeter Maydell #endif
2756