xref: /qemu/system/physmem.c (revision 1f6245e5aba94ff7acd34f8514da7dfb9712935d)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20777872e5SStefan Weil #ifndef _WIN32
21a98d49b1Sbellard #include <sys/types.h>
22d5a8f07cSbellard #include <sys/mman.h>
23d5a8f07cSbellard #endif
2454936004Sbellard 
25055403b2SStefan Weil #include "qemu-common.h"
266180a181Sbellard #include "cpu.h"
27b67d9a52Sbellard #include "tcg.h"
28b3c7724cSpbrook #include "hw/hw.h"
29cc9e98cbSAlex Williamson #include "hw/qdev.h"
301de7afc9SPaolo Bonzini #include "qemu/osdep.h"
319c17d615SPaolo Bonzini #include "sysemu/kvm.h"
322ff3de68SMarkus Armbruster #include "sysemu/sysemu.h"
330d09e41aSPaolo Bonzini #include "hw/xen/xen.h"
341de7afc9SPaolo Bonzini #include "qemu/timer.h"
351de7afc9SPaolo Bonzini #include "qemu/config-file.h"
3675a34036SAndreas Färber #include "qemu/error-report.h"
37022c62cbSPaolo Bonzini #include "exec/memory.h"
389c17d615SPaolo Bonzini #include "sysemu/dma.h"
39022c62cbSPaolo Bonzini #include "exec/address-spaces.h"
4053a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4153a5960aSpbrook #include <qemu.h>
42432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
439c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
446506e4f9SStefano Stabellini #include "trace.h"
4553a5960aSpbrook #endif
460d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
4754936004Sbellard 
48022c62cbSPaolo Bonzini #include "exec/cputlb.h"
495b6dd868SBlue Swirl #include "translate-all.h"
500cac1b66SBlue Swirl 
51022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
52220c3ebdSJuan Quintela #include "exec/ram_addr.h"
5367d95c15SAvi Kivity 
54b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
55b35ba30fSMichael S. Tsirkin 
56db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
571196be37Sths 
5899773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
59981fdf23SJuan Quintela static bool in_migration;
6094a6b54fSpbrook 
61a3161038SPaolo Bonzini RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
6262152b8aSAvi Kivity 
6362152b8aSAvi Kivity static MemoryRegion *system_memory;
64309cb471SAvi Kivity static MemoryRegion *system_io;
6562152b8aSAvi Kivity 
66f6790af6SAvi Kivity AddressSpace address_space_io;
67f6790af6SAvi Kivity AddressSpace address_space_memory;
682673a5daSAvi Kivity 
690844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
70acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
710e0df1e2SAvi Kivity 
727bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
737bd4f430SPaolo Bonzini #define RAM_PREALLOC   (1 << 0)
747bd4f430SPaolo Bonzini 
75dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */
76dbcb8981SPaolo Bonzini #define RAM_SHARED     (1 << 1)
77dbcb8981SPaolo Bonzini 
78e2eef170Spbrook #endif
799fa3e853Sbellard 
80bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
816a00d601Sbellard /* current CPU in the current thread. It is only valid inside
826a00d601Sbellard    cpu_exec() */
834917cf44SAndreas Färber DEFINE_TLS(CPUState *, current_cpu);
842e70f6efSpbrook /* 0 = Do not count executed instructions.
85bf20dc07Sths    1 = Precise instruction counting.
862e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
875708fc66SPaolo Bonzini int use_icount;
886a00d601Sbellard 
89e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
904346ae3eSAvi Kivity 
911db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
921db8abb1SPaolo Bonzini 
931db8abb1SPaolo Bonzini struct PhysPageEntry {
949736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
958b795765SMichael S. Tsirkin     uint32_t skip : 6;
969736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
978b795765SMichael S. Tsirkin     uint32_t ptr : 26;
981db8abb1SPaolo Bonzini };
991db8abb1SPaolo Bonzini 
1008b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
1018b795765SMichael S. Tsirkin 
10203f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
10357271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
10403f49957SPaolo Bonzini 
105026736ceSMichael S. Tsirkin #define P_L2_BITS 9
10603f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
10703f49957SPaolo Bonzini 
10803f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
10903f49957SPaolo Bonzini 
11003f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1110475d94fSPaolo Bonzini 
11253cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
11353cb28cbSMarcel Apfelbaum     unsigned sections_nb;
11453cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
11553cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
11653cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
11753cb28cbSMarcel Apfelbaum     Node *nodes;
11853cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
11953cb28cbSMarcel Apfelbaum } PhysPageMap;
12053cb28cbSMarcel Apfelbaum 
1211db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
1221db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1231db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1241db8abb1SPaolo Bonzini      */
1251db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
12653cb28cbSMarcel Apfelbaum     PhysPageMap map;
127acc9d80bSJan Kiszka     AddressSpace *as;
1281db8abb1SPaolo Bonzini };
1291db8abb1SPaolo Bonzini 
13090260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
13190260c6cSJan Kiszka typedef struct subpage_t {
13290260c6cSJan Kiszka     MemoryRegion iomem;
133acc9d80bSJan Kiszka     AddressSpace *as;
13490260c6cSJan Kiszka     hwaddr base;
13590260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
13690260c6cSJan Kiszka } subpage_t;
13790260c6cSJan Kiszka 
138b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
139b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1
140b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2
141b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3
1425312bd8bSAvi Kivity 
143e2eef170Spbrook static void io_mem_init(void);
14462152b8aSAvi Kivity static void memory_map_init(void);
14509daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener);
146e2eef170Spbrook 
1471ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
1486658ffb8Spbrook #endif
14954936004Sbellard 
1506d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
151d6f2ea22SAvi Kivity 
15253cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
153f7bf5461SAvi Kivity {
15453cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
15553cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
15653cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
15753cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
158f7bf5461SAvi Kivity     }
159f7bf5461SAvi Kivity }
160f7bf5461SAvi Kivity 
16153cb28cbSMarcel Apfelbaum static uint32_t phys_map_node_alloc(PhysPageMap *map)
162d6f2ea22SAvi Kivity {
163d6f2ea22SAvi Kivity     unsigned i;
1648b795765SMichael S. Tsirkin     uint32_t ret;
165d6f2ea22SAvi Kivity 
16653cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
167d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
16853cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
16903f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
17053cb28cbSMarcel Apfelbaum         map->nodes[ret][i].skip = 1;
17153cb28cbSMarcel Apfelbaum         map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
172d6f2ea22SAvi Kivity     }
173f7bf5461SAvi Kivity     return ret;
174d6f2ea22SAvi Kivity }
175d6f2ea22SAvi Kivity 
17653cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
17753cb28cbSMarcel Apfelbaum                                 hwaddr *index, hwaddr *nb, uint16_t leaf,
1782999097bSAvi Kivity                                 int level)
17992e873b9Sbellard {
180f7bf5461SAvi Kivity     PhysPageEntry *p;
181f7bf5461SAvi Kivity     int i;
18203f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
1835cd2c5b6SRichard Henderson 
1849736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
18553cb28cbSMarcel Apfelbaum         lp->ptr = phys_map_node_alloc(map);
18653cb28cbSMarcel Apfelbaum         p = map->nodes[lp->ptr];
187f7bf5461SAvi Kivity         if (level == 0) {
18803f49957SPaolo Bonzini             for (i = 0; i < P_L2_SIZE; i++) {
1899736e55bSMichael S. Tsirkin                 p[i].skip = 0;
190b41aac4fSLiu Ping Fan                 p[i].ptr = PHYS_SECTION_UNASSIGNED;
19167c4d23cSpbrook             }
19292e873b9Sbellard         }
193d6f2ea22SAvi Kivity     } else {
19453cb28cbSMarcel Apfelbaum         p = map->nodes[lp->ptr];
1954346ae3eSAvi Kivity     }
19603f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
197f7bf5461SAvi Kivity 
19803f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
19907f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2009736e55bSMichael S. Tsirkin             lp->skip = 0;
201c19e8800SAvi Kivity             lp->ptr = leaf;
20207f07b31SAvi Kivity             *index += step;
20307f07b31SAvi Kivity             *nb -= step;
204f7bf5461SAvi Kivity         } else {
20553cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2062999097bSAvi Kivity         }
2072999097bSAvi Kivity         ++lp;
208f7bf5461SAvi Kivity     }
2094346ae3eSAvi Kivity }
2105cd2c5b6SRichard Henderson 
211ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
212a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2132999097bSAvi Kivity                           uint16_t leaf)
214f7bf5461SAvi Kivity {
2152999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
21653cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
217f7bf5461SAvi Kivity 
21853cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
21992e873b9Sbellard }
22092e873b9Sbellard 
221b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
223b35ba30fSMichael S. Tsirkin  */
224b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225b35ba30fSMichael S. Tsirkin {
226b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
227b35ba30fSMichael S. Tsirkin     int valid = 0;
228b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
229b35ba30fSMichael S. Tsirkin     int i;
230b35ba30fSMichael S. Tsirkin 
231b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
232b35ba30fSMichael S. Tsirkin         return;
233b35ba30fSMichael S. Tsirkin     }
234b35ba30fSMichael S. Tsirkin 
235b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
236b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
237b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238b35ba30fSMichael S. Tsirkin             continue;
239b35ba30fSMichael S. Tsirkin         }
240b35ba30fSMichael S. Tsirkin 
241b35ba30fSMichael S. Tsirkin         valid_ptr = i;
242b35ba30fSMichael S. Tsirkin         valid++;
243b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
244b35ba30fSMichael S. Tsirkin             phys_page_compact(&p[i], nodes, compacted);
245b35ba30fSMichael S. Tsirkin         }
246b35ba30fSMichael S. Tsirkin     }
247b35ba30fSMichael S. Tsirkin 
248b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
249b35ba30fSMichael S. Tsirkin     if (valid != 1) {
250b35ba30fSMichael S. Tsirkin         return;
251b35ba30fSMichael S. Tsirkin     }
252b35ba30fSMichael S. Tsirkin 
253b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
254b35ba30fSMichael S. Tsirkin 
255b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
256b35ba30fSMichael S. Tsirkin     if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257b35ba30fSMichael S. Tsirkin         return;
258b35ba30fSMichael S. Tsirkin     }
259b35ba30fSMichael S. Tsirkin 
260b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
261b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
262b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
263b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
264b35ba30fSMichael S. Tsirkin          * should never reach here.
265b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
266b35ba30fSMichael S. Tsirkin          * change this rule.
267b35ba30fSMichael S. Tsirkin          */
268b35ba30fSMichael S. Tsirkin         lp->skip = 0;
269b35ba30fSMichael S. Tsirkin     } else {
270b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
271b35ba30fSMichael S. Tsirkin     }
272b35ba30fSMichael S. Tsirkin }
273b35ba30fSMichael S. Tsirkin 
274b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275b35ba30fSMichael S. Tsirkin {
276b35ba30fSMichael S. Tsirkin     DECLARE_BITMAP(compacted, nodes_nb);
277b35ba30fSMichael S. Tsirkin 
278b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
27953cb28cbSMarcel Apfelbaum         phys_page_compact(&d->phys_map, d->map.nodes, compacted);
280b35ba30fSMichael S. Tsirkin     }
281b35ba30fSMichael S. Tsirkin }
282b35ba30fSMichael S. Tsirkin 
28397115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
2849affd6fcSPaolo Bonzini                                            Node *nodes, MemoryRegionSection *sections)
28592e873b9Sbellard {
28631ab2b4aSAvi Kivity     PhysPageEntry *p;
28797115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
28831ab2b4aSAvi Kivity     int i;
289f1f6e3b8SAvi Kivity 
2909736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
291c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
2929affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
293f1f6e3b8SAvi Kivity         }
2949affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
29503f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
29631ab2b4aSAvi Kivity     }
297b35ba30fSMichael S. Tsirkin 
298b35ba30fSMichael S. Tsirkin     if (sections[lp.ptr].size.hi ||
299b35ba30fSMichael S. Tsirkin         range_covers_byte(sections[lp.ptr].offset_within_address_space,
300b35ba30fSMichael S. Tsirkin                           sections[lp.ptr].size.lo, addr)) {
3019affd6fcSPaolo Bonzini         return &sections[lp.ptr];
302b35ba30fSMichael S. Tsirkin     } else {
303b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
304b35ba30fSMichael S. Tsirkin     }
305f3705d53SAvi Kivity }
306f3705d53SAvi Kivity 
307e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
308e5548617SBlue Swirl {
3092a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
310e5548617SBlue Swirl         && mr != &io_mem_watch;
311e5548617SBlue Swirl }
312149f54b5SPaolo Bonzini 
313c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
31490260c6cSJan Kiszka                                                         hwaddr addr,
31590260c6cSJan Kiszka                                                         bool resolve_subpage)
3169f029603SJan Kiszka {
31790260c6cSJan Kiszka     MemoryRegionSection *section;
31890260c6cSJan Kiszka     subpage_t *subpage;
31990260c6cSJan Kiszka 
32053cb28cbSMarcel Apfelbaum     section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
32190260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
32290260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
32353cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
32490260c6cSJan Kiszka     }
32590260c6cSJan Kiszka     return section;
3269f029603SJan Kiszka }
3279f029603SJan Kiszka 
32890260c6cSJan Kiszka static MemoryRegionSection *
329c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
33090260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
331149f54b5SPaolo Bonzini {
332149f54b5SPaolo Bonzini     MemoryRegionSection *section;
333a87f3954SPaolo Bonzini     Int128 diff;
334149f54b5SPaolo Bonzini 
335c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
336149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
337149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
338149f54b5SPaolo Bonzini 
339149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
340149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
341149f54b5SPaolo Bonzini 
342149f54b5SPaolo Bonzini     diff = int128_sub(section->mr->size, int128_make64(addr));
3433752a036SPeter Maydell     *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
344149f54b5SPaolo Bonzini     return section;
345149f54b5SPaolo Bonzini }
34690260c6cSJan Kiszka 
347a87f3954SPaolo Bonzini static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348a87f3954SPaolo Bonzini {
349a87f3954SPaolo Bonzini     if (memory_region_is_ram(mr)) {
350a87f3954SPaolo Bonzini         return !(is_write && mr->readonly);
351a87f3954SPaolo Bonzini     }
352a87f3954SPaolo Bonzini     if (memory_region_is_romd(mr)) {
353a87f3954SPaolo Bonzini         return !is_write;
354a87f3954SPaolo Bonzini     }
355a87f3954SPaolo Bonzini 
356a87f3954SPaolo Bonzini     return false;
357a87f3954SPaolo Bonzini }
358a87f3954SPaolo Bonzini 
3595c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
36090260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
36190260c6cSJan Kiszka                                       bool is_write)
36290260c6cSJan Kiszka {
36330951157SAvi Kivity     IOMMUTLBEntry iotlb;
36430951157SAvi Kivity     MemoryRegionSection *section;
36530951157SAvi Kivity     MemoryRegion *mr;
36630951157SAvi Kivity     hwaddr len = *plen;
36730951157SAvi Kivity 
36830951157SAvi Kivity     for (;;) {
369a87f3954SPaolo Bonzini         section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
37030951157SAvi Kivity         mr = section->mr;
37130951157SAvi Kivity 
37230951157SAvi Kivity         if (!mr->iommu_ops) {
37330951157SAvi Kivity             break;
37430951157SAvi Kivity         }
37530951157SAvi Kivity 
37630951157SAvi Kivity         iotlb = mr->iommu_ops->translate(mr, addr);
37730951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
37830951157SAvi Kivity                 | (addr & iotlb.addr_mask));
37930951157SAvi Kivity         len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
38030951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
38130951157SAvi Kivity             mr = &io_mem_unassigned;
38230951157SAvi Kivity             break;
38330951157SAvi Kivity         }
38430951157SAvi Kivity 
38530951157SAvi Kivity         as = iotlb.target_as;
38630951157SAvi Kivity     }
38730951157SAvi Kivity 
388fe680d0dSAlexey Kardashevskiy     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
389a87f3954SPaolo Bonzini         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390a87f3954SPaolo Bonzini         len = MIN(page, len);
391a87f3954SPaolo Bonzini     }
392a87f3954SPaolo Bonzini 
39330951157SAvi Kivity     *plen = len;
39430951157SAvi Kivity     *xlat = addr;
39530951157SAvi Kivity     return mr;
39690260c6cSJan Kiszka }
39790260c6cSJan Kiszka 
39890260c6cSJan Kiszka MemoryRegionSection *
39990260c6cSJan Kiszka address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
40090260c6cSJan Kiszka                                   hwaddr *plen)
40190260c6cSJan Kiszka {
40230951157SAvi Kivity     MemoryRegionSection *section;
403c7086b4aSPaolo Bonzini     section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
40430951157SAvi Kivity 
40530951157SAvi Kivity     assert(!section->mr->iommu_ops);
40630951157SAvi Kivity     return section;
40790260c6cSJan Kiszka }
4089fa3e853Sbellard #endif
409fd6ce8f6Sbellard 
410d5ab9713SJan Kiszka void cpu_exec_init_all(void)
411d5ab9713SJan Kiszka {
412d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY)
413b2a8658eSUmesh Deshpande     qemu_mutex_init(&ram_list.mutex);
414d5ab9713SJan Kiszka     memory_map_init();
415d5ab9713SJan Kiszka     io_mem_init();
416d5ab9713SJan Kiszka #endif
417d5ab9713SJan Kiszka }
418d5ab9713SJan Kiszka 
419b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
4209656f324Spbrook 
421e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
422e7f4eff7SJuan Quintela {
423259186a7SAndreas Färber     CPUState *cpu = opaque;
424e7f4eff7SJuan Quintela 
4253098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
4263098dba0Saurel32        version_id is increased. */
427259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
428c01a71c1SChristian Borntraeger     tlb_flush(cpu, 1);
4299656f324Spbrook 
4309656f324Spbrook     return 0;
4319656f324Spbrook }
432e7f4eff7SJuan Quintela 
4331a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
434e7f4eff7SJuan Quintela     .name = "cpu_common",
435e7f4eff7SJuan Quintela     .version_id = 1,
436e7f4eff7SJuan Quintela     .minimum_version_id = 1,
437e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
438e7f4eff7SJuan Quintela     .fields = (VMStateField[]) {
439259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
440259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
441e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
442e7f4eff7SJuan Quintela     }
443e7f4eff7SJuan Quintela };
4441a1562f5SAndreas Färber 
4459656f324Spbrook #endif
4469656f324Spbrook 
44738d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
448950f1472SGlauber Costa {
449bdc44640SAndreas Färber     CPUState *cpu;
450950f1472SGlauber Costa 
451bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
45255e5c285SAndreas Färber         if (cpu->cpu_index == index) {
453bdc44640SAndreas Färber             return cpu;
45455e5c285SAndreas Färber         }
455950f1472SGlauber Costa     }
456950f1472SGlauber Costa 
457bdc44640SAndreas Färber     return NULL;
458950f1472SGlauber Costa }
459950f1472SGlauber Costa 
46009daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY)
46109daed84SEdgar E. Iglesias void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
46209daed84SEdgar E. Iglesias {
46309daed84SEdgar E. Iglesias     /* We only support one address space per cpu at the moment.  */
46409daed84SEdgar E. Iglesias     assert(cpu->as == as);
46509daed84SEdgar E. Iglesias 
46609daed84SEdgar E. Iglesias     if (cpu->tcg_as_listener) {
46709daed84SEdgar E. Iglesias         memory_listener_unregister(cpu->tcg_as_listener);
46809daed84SEdgar E. Iglesias     } else {
46909daed84SEdgar E. Iglesias         cpu->tcg_as_listener = g_new0(MemoryListener, 1);
47009daed84SEdgar E. Iglesias     }
47109daed84SEdgar E. Iglesias     cpu->tcg_as_listener->commit = tcg_commit;
47209daed84SEdgar E. Iglesias     memory_listener_register(cpu->tcg_as_listener, as);
47309daed84SEdgar E. Iglesias }
47409daed84SEdgar E. Iglesias #endif
47509daed84SEdgar E. Iglesias 
4769349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env)
477fd6ce8f6Sbellard {
4789f09e18aSAndreas Färber     CPUState *cpu = ENV_GET_CPU(env);
479b170fce3SAndreas Färber     CPUClass *cc = CPU_GET_CLASS(cpu);
480bdc44640SAndreas Färber     CPUState *some_cpu;
4816a00d601Sbellard     int cpu_index;
4826a00d601Sbellard 
483c2764719Spbrook #if defined(CONFIG_USER_ONLY)
484c2764719Spbrook     cpu_list_lock();
485c2764719Spbrook #endif
4866a00d601Sbellard     cpu_index = 0;
487bdc44640SAndreas Färber     CPU_FOREACH(some_cpu) {
4886a00d601Sbellard         cpu_index++;
4896a00d601Sbellard     }
49055e5c285SAndreas Färber     cpu->cpu_index = cpu_index;
4911b1ed8dcSAndreas Färber     cpu->numa_node = 0;
492f0c3c505SAndreas Färber     QTAILQ_INIT(&cpu->breakpoints);
493ff4700b0SAndreas Färber     QTAILQ_INIT(&cpu->watchpoints);
494dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
49509daed84SEdgar E. Iglesias     cpu->as = &address_space_memory;
4969f09e18aSAndreas Färber     cpu->thread_id = qemu_get_thread_id();
497dc7a09cfSJan Kiszka #endif
498bdc44640SAndreas Färber     QTAILQ_INSERT_TAIL(&cpus, cpu, node);
499c2764719Spbrook #if defined(CONFIG_USER_ONLY)
500c2764719Spbrook     cpu_list_unlock();
501c2764719Spbrook #endif
502e0d47944SAndreas Färber     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
503259186a7SAndreas Färber         vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
504e0d47944SAndreas Färber     }
505b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5060be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
507b3c7724cSpbrook                     cpu_save, cpu_load, env);
508b170fce3SAndreas Färber     assert(cc->vmsd == NULL);
509e0d47944SAndreas Färber     assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
510b3c7724cSpbrook #endif
511b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
512b170fce3SAndreas Färber         vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
513b170fce3SAndreas Färber     }
514fd6ce8f6Sbellard }
515fd6ce8f6Sbellard 
5161fddef4bSbellard #if defined(TARGET_HAS_ICE)
51794df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
51800b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
51994df27fdSPaul Brook {
52094df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
52194df27fdSPaul Brook }
52294df27fdSPaul Brook #else
52300b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
5241e7855a5SMax Filippov {
525e8262a1bSMax Filippov     hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
526e8262a1bSMax Filippov     if (phys != -1) {
52709daed84SEdgar E. Iglesias         tb_invalidate_phys_addr(cpu->as,
52829d8ec7bSEdgar E. Iglesias                                 phys | (pc & ~TARGET_PAGE_MASK));
529e8262a1bSMax Filippov     }
5301e7855a5SMax Filippov }
531c27004ecSbellard #endif
53294df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
533d720b93dSbellard 
534c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
53575a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
536c527ee8fSPaul Brook 
537c527ee8fSPaul Brook {
538c527ee8fSPaul Brook }
539c527ee8fSPaul Brook 
54075a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
541c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
542c527ee8fSPaul Brook {
543c527ee8fSPaul Brook     return -ENOSYS;
544c527ee8fSPaul Brook }
545c527ee8fSPaul Brook #else
5466658ffb8Spbrook /* Add a watchpoint.  */
54775a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
548a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
5496658ffb8Spbrook {
55075a34036SAndreas Färber     vaddr len_mask = ~(len - 1);
551c0ce998eSaliguori     CPUWatchpoint *wp;
5526658ffb8Spbrook 
553b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
5540dc23828SMax Filippov     if ((len & (len - 1)) || (addr & ~len_mask) ||
5550dc23828SMax Filippov             len == 0 || len > TARGET_PAGE_SIZE) {
55675a34036SAndreas Färber         error_report("tried to set invalid watchpoint at %"
55775a34036SAndreas Färber                      VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
558b4051334Saliguori         return -EINVAL;
559b4051334Saliguori     }
5607267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
5616658ffb8Spbrook 
562a1d1bb31Saliguori     wp->vaddr = addr;
563b4051334Saliguori     wp->len_mask = len_mask;
564a1d1bb31Saliguori     wp->flags = flags;
565a1d1bb31Saliguori 
5662dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
567ff4700b0SAndreas Färber     if (flags & BP_GDB) {
568ff4700b0SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
569ff4700b0SAndreas Färber     } else {
570ff4700b0SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
571ff4700b0SAndreas Färber     }
572a1d1bb31Saliguori 
57331b030d4SAndreas Färber     tlb_flush_page(cpu, addr);
574a1d1bb31Saliguori 
575a1d1bb31Saliguori     if (watchpoint)
576a1d1bb31Saliguori         *watchpoint = wp;
577a1d1bb31Saliguori     return 0;
5786658ffb8Spbrook }
5796658ffb8Spbrook 
580a1d1bb31Saliguori /* Remove a specific watchpoint.  */
58175a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
582a1d1bb31Saliguori                           int flags)
5836658ffb8Spbrook {
58475a34036SAndreas Färber     vaddr len_mask = ~(len - 1);
585a1d1bb31Saliguori     CPUWatchpoint *wp;
5866658ffb8Spbrook 
587ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
588b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
5896e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
59075a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
5916658ffb8Spbrook             return 0;
5926658ffb8Spbrook         }
5936658ffb8Spbrook     }
594a1d1bb31Saliguori     return -ENOENT;
5956658ffb8Spbrook }
5966658ffb8Spbrook 
597a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
59875a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
599a1d1bb31Saliguori {
600ff4700b0SAndreas Färber     QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
6017d03f82fSedgar_igl 
60231b030d4SAndreas Färber     tlb_flush_page(cpu, watchpoint->vaddr);
603a1d1bb31Saliguori 
6047267c094SAnthony Liguori     g_free(watchpoint);
6057d03f82fSedgar_igl }
6067d03f82fSedgar_igl 
607a1d1bb31Saliguori /* Remove all matching watchpoints.  */
60875a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
609a1d1bb31Saliguori {
610c0ce998eSaliguori     CPUWatchpoint *wp, *next;
611a1d1bb31Saliguori 
612ff4700b0SAndreas Färber     QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
61375a34036SAndreas Färber         if (wp->flags & mask) {
61475a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
61575a34036SAndreas Färber         }
616a1d1bb31Saliguori     }
617c0ce998eSaliguori }
618c527ee8fSPaul Brook #endif
619a1d1bb31Saliguori 
620a1d1bb31Saliguori /* Add a breakpoint.  */
621b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
622a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
6234c3a88a2Sbellard {
6241fddef4bSbellard #if defined(TARGET_HAS_ICE)
625c0ce998eSaliguori     CPUBreakpoint *bp;
6264c3a88a2Sbellard 
6277267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
6284c3a88a2Sbellard 
629a1d1bb31Saliguori     bp->pc = pc;
630a1d1bb31Saliguori     bp->flags = flags;
631a1d1bb31Saliguori 
6322dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
63300b941e5SAndreas Färber     if (flags & BP_GDB) {
634f0c3c505SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
63500b941e5SAndreas Färber     } else {
636f0c3c505SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
63700b941e5SAndreas Färber     }
638d720b93dSbellard 
639f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, pc);
640a1d1bb31Saliguori 
64100b941e5SAndreas Färber     if (breakpoint) {
642a1d1bb31Saliguori         *breakpoint = bp;
64300b941e5SAndreas Färber     }
6444c3a88a2Sbellard     return 0;
6454c3a88a2Sbellard #else
646a1d1bb31Saliguori     return -ENOSYS;
6474c3a88a2Sbellard #endif
6484c3a88a2Sbellard }
6494c3a88a2Sbellard 
650a1d1bb31Saliguori /* Remove a specific breakpoint.  */
651b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
652a1d1bb31Saliguori {
6537d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
654a1d1bb31Saliguori     CPUBreakpoint *bp;
655a1d1bb31Saliguori 
656f0c3c505SAndreas Färber     QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
657a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
658b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
659a1d1bb31Saliguori             return 0;
6607d03f82fSedgar_igl         }
661a1d1bb31Saliguori     }
662a1d1bb31Saliguori     return -ENOENT;
663a1d1bb31Saliguori #else
664a1d1bb31Saliguori     return -ENOSYS;
6657d03f82fSedgar_igl #endif
6667d03f82fSedgar_igl }
6677d03f82fSedgar_igl 
668a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
669b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
6704c3a88a2Sbellard {
6711fddef4bSbellard #if defined(TARGET_HAS_ICE)
672f0c3c505SAndreas Färber     QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
673f0c3c505SAndreas Färber 
674f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, breakpoint->pc);
675a1d1bb31Saliguori 
6767267c094SAnthony Liguori     g_free(breakpoint);
677a1d1bb31Saliguori #endif
678a1d1bb31Saliguori }
679a1d1bb31Saliguori 
680a1d1bb31Saliguori /* Remove all matching breakpoints. */
681b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
682a1d1bb31Saliguori {
683a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
684c0ce998eSaliguori     CPUBreakpoint *bp, *next;
685a1d1bb31Saliguori 
686f0c3c505SAndreas Färber     QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
687b3310ab3SAndreas Färber         if (bp->flags & mask) {
688b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
689b3310ab3SAndreas Färber         }
690c0ce998eSaliguori     }
6914c3a88a2Sbellard #endif
6924c3a88a2Sbellard }
6934c3a88a2Sbellard 
694c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
695c33a346eSbellard    CPU loop after each instruction */
6963825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled)
697c33a346eSbellard {
6981fddef4bSbellard #if defined(TARGET_HAS_ICE)
699ed2803daSAndreas Färber     if (cpu->singlestep_enabled != enabled) {
700ed2803daSAndreas Färber         cpu->singlestep_enabled = enabled;
701ed2803daSAndreas Färber         if (kvm_enabled()) {
70238e478ecSStefan Weil             kvm_update_guest_debug(cpu, 0);
703ed2803daSAndreas Färber         } else {
704ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
7059fa3e853Sbellard             /* XXX: only flush what is necessary */
70638e478ecSStefan Weil             CPUArchState *env = cpu->env_ptr;
7070124311eSbellard             tb_flush(env);
708c33a346eSbellard         }
709e22a25c9Saliguori     }
710c33a346eSbellard #endif
711c33a346eSbellard }
712c33a346eSbellard 
713a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...)
7147501267eSbellard {
7157501267eSbellard     va_list ap;
716493ae1f0Spbrook     va_list ap2;
7177501267eSbellard 
7187501267eSbellard     va_start(ap, fmt);
719493ae1f0Spbrook     va_copy(ap2, ap);
7207501267eSbellard     fprintf(stderr, "qemu: fatal: ");
7217501267eSbellard     vfprintf(stderr, fmt, ap);
7227501267eSbellard     fprintf(stderr, "\n");
723878096eeSAndreas Färber     cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
72493fcfe39Saliguori     if (qemu_log_enabled()) {
72593fcfe39Saliguori         qemu_log("qemu: fatal: ");
72693fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
72793fcfe39Saliguori         qemu_log("\n");
728a0762859SAndreas Färber         log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
72931b1a7b4Saliguori         qemu_log_flush();
73093fcfe39Saliguori         qemu_log_close();
731924edcaeSbalrog     }
732493ae1f0Spbrook     va_end(ap2);
733f9373291Sj_mayer     va_end(ap);
734fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
735fd052bf6SRiku Voipio     {
736fd052bf6SRiku Voipio         struct sigaction act;
737fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
738fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
739fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
740fd052bf6SRiku Voipio     }
741fd052bf6SRiku Voipio #endif
7427501267eSbellard     abort();
7437501267eSbellard }
7447501267eSbellard 
7450124311eSbellard #if !defined(CONFIG_USER_ONLY)
746041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
747041603feSPaolo Bonzini {
748041603feSPaolo Bonzini     RAMBlock *block;
749041603feSPaolo Bonzini 
750041603feSPaolo Bonzini     /* The list is protected by the iothread lock here.  */
751041603feSPaolo Bonzini     block = ram_list.mru_block;
752041603feSPaolo Bonzini     if (block && addr - block->offset < block->length) {
753041603feSPaolo Bonzini         goto found;
754041603feSPaolo Bonzini     }
755041603feSPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
756041603feSPaolo Bonzini         if (addr - block->offset < block->length) {
757041603feSPaolo Bonzini             goto found;
758041603feSPaolo Bonzini         }
759041603feSPaolo Bonzini     }
760041603feSPaolo Bonzini 
761041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
762041603feSPaolo Bonzini     abort();
763041603feSPaolo Bonzini 
764041603feSPaolo Bonzini found:
765041603feSPaolo Bonzini     ram_list.mru_block = block;
766041603feSPaolo Bonzini     return block;
767041603feSPaolo Bonzini }
768041603feSPaolo Bonzini 
769a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
7701ccde1cbSbellard {
771041603feSPaolo Bonzini     ram_addr_t start1;
772a2f4d5beSJuan Quintela     RAMBlock *block;
773a2f4d5beSJuan Quintela     ram_addr_t end;
774a2f4d5beSJuan Quintela 
775a2f4d5beSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length);
776a2f4d5beSJuan Quintela     start &= TARGET_PAGE_MASK;
777f23db169Sbellard 
778041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
779041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
780041603feSPaolo Bonzini     start1 = (uintptr_t)block->host + (start - block->offset);
781e5548617SBlue Swirl     cpu_tlb_reset_dirty_all(start1, length);
782d24981d3SJuan Quintela }
783d24981d3SJuan Quintela 
784d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
785a2f4d5beSJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
78652159192SJuan Quintela                                      unsigned client)
787d24981d3SJuan Quintela {
788d24981d3SJuan Quintela     if (length == 0)
789d24981d3SJuan Quintela         return;
790ace694ccSJuan Quintela     cpu_physical_memory_clear_dirty_range(start, length, client);
791d24981d3SJuan Quintela 
792d24981d3SJuan Quintela     if (tcg_enabled()) {
793a2f4d5beSJuan Quintela         tlb_reset_dirty_range_all(start, length);
794d24981d3SJuan Quintela     }
7951ccde1cbSbellard }
7961ccde1cbSbellard 
797981fdf23SJuan Quintela static void cpu_physical_memory_set_dirty_tracking(bool enable)
79874576198Saliguori {
79974576198Saliguori     in_migration = enable;
80074576198Saliguori }
80174576198Saliguori 
802bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu,
803e5548617SBlue Swirl                                        MemoryRegionSection *section,
804e5548617SBlue Swirl                                        target_ulong vaddr,
805149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
806e5548617SBlue Swirl                                        int prot,
807e5548617SBlue Swirl                                        target_ulong *address)
808e5548617SBlue Swirl {
809a8170e5eSAvi Kivity     hwaddr iotlb;
810e5548617SBlue Swirl     CPUWatchpoint *wp;
811e5548617SBlue Swirl 
812cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
813e5548617SBlue Swirl         /* Normal RAM.  */
814e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
815149f54b5SPaolo Bonzini             + xlat;
816e5548617SBlue Swirl         if (!section->readonly) {
817b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_NOTDIRTY;
818e5548617SBlue Swirl         } else {
819b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_ROM;
820e5548617SBlue Swirl         }
821e5548617SBlue Swirl     } else {
8221b3fb98fSEdgar E. Iglesias         iotlb = section - section->address_space->dispatch->map.sections;
823149f54b5SPaolo Bonzini         iotlb += xlat;
824e5548617SBlue Swirl     }
825e5548617SBlue Swirl 
826e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
827e5548617SBlue Swirl        watchpoint trap routines.  */
828ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
829e5548617SBlue Swirl         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
830e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
831e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
832b41aac4fSLiu Ping Fan                 iotlb = PHYS_SECTION_WATCH + paddr;
833e5548617SBlue Swirl                 *address |= TLB_MMIO;
834e5548617SBlue Swirl                 break;
835e5548617SBlue Swirl             }
836e5548617SBlue Swirl         }
837e5548617SBlue Swirl     }
838e5548617SBlue Swirl 
839e5548617SBlue Swirl     return iotlb;
840e5548617SBlue Swirl }
8419fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
84233417e70Sbellard 
843e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
8448da3ff18Spbrook 
845c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8465312bd8bSAvi Kivity                              uint16_t section);
847acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
84854688b1eSAvi Kivity 
849575ddeb4SStefan Weil static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
85091138037SMarkus Armbruster 
85191138037SMarkus Armbruster /*
85291138037SMarkus Armbruster  * Set a custom physical guest memory alloator.
85391138037SMarkus Armbruster  * Accelerators with unusual needs may need this.  Hopefully, we can
85491138037SMarkus Armbruster  * get rid of it eventually.
85591138037SMarkus Armbruster  */
856575ddeb4SStefan Weil void phys_mem_set_alloc(void *(*alloc)(size_t))
85791138037SMarkus Armbruster {
85891138037SMarkus Armbruster     phys_mem_alloc = alloc;
85991138037SMarkus Armbruster }
86091138037SMarkus Armbruster 
86153cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
86253cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
8635312bd8bSAvi Kivity {
86468f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
86568f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
86668f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
86768f3f65bSPaolo Bonzini      */
86853cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
86968f3f65bSPaolo Bonzini 
87053cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
87153cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
87253cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
87353cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
8745312bd8bSAvi Kivity     }
87553cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
876dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
87753cb28cbSMarcel Apfelbaum     return map->sections_nb++;
8785312bd8bSAvi Kivity }
8795312bd8bSAvi Kivity 
880058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
881058bc4b5SPaolo Bonzini {
882dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
883dfde4e6eSPaolo Bonzini 
884058bc4b5SPaolo Bonzini     if (mr->subpage) {
885058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
886b4fefef9SPeter Crosthwaite         object_unref(OBJECT(&subpage->iomem));
887058bc4b5SPaolo Bonzini         g_free(subpage);
888058bc4b5SPaolo Bonzini     }
889058bc4b5SPaolo Bonzini }
890058bc4b5SPaolo Bonzini 
8916092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
8925312bd8bSAvi Kivity {
8939affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
8949affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
895058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
896058bc4b5SPaolo Bonzini     }
8979affd6fcSPaolo Bonzini     g_free(map->sections);
8989affd6fcSPaolo Bonzini     g_free(map->nodes);
8995312bd8bSAvi Kivity }
9005312bd8bSAvi Kivity 
901ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
9020f0cb164SAvi Kivity {
9030f0cb164SAvi Kivity     subpage_t *subpage;
904a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
9050f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
90697115a8dSMichael S. Tsirkin     MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
90753cb28cbSMarcel Apfelbaum                                                    d->map.nodes, d->map.sections);
9080f0cb164SAvi Kivity     MemoryRegionSection subsection = {
9090f0cb164SAvi Kivity         .offset_within_address_space = base,
910052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
9110f0cb164SAvi Kivity     };
912a8170e5eSAvi Kivity     hwaddr start, end;
9130f0cb164SAvi Kivity 
914f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
9150f0cb164SAvi Kivity 
916f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
917acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
9183be91e86SEdgar E. Iglesias         subsection.address_space = d->as;
9190f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
920ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
92153cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
9220f0cb164SAvi Kivity     } else {
923f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
9240f0cb164SAvi Kivity     }
9250f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
926052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
92753cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
92853cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
9290f0cb164SAvi Kivity }
9300f0cb164SAvi Kivity 
9310f0cb164SAvi Kivity 
932052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
933052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
93433417e70Sbellard {
935a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
93653cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
937052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
938052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
939dd81124bSAvi Kivity 
940733d5ef5SPaolo Bonzini     assert(num_pages);
941733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
94233417e70Sbellard }
94333417e70Sbellard 
944ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
9450f0cb164SAvi Kivity {
94689ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
94700752703SPaolo Bonzini     AddressSpaceDispatch *d = as->next_dispatch;
94899b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
949052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
9500f0cb164SAvi Kivity 
951733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
952733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
953733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
954733d5ef5SPaolo Bonzini 
955052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
956ac1970fbSAvi Kivity         register_subpage(d, &now);
957733d5ef5SPaolo Bonzini     } else {
958052e87b0SPaolo Bonzini         now.size = int128_zero();
959733d5ef5SPaolo Bonzini     }
960052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
961052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
962052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
963052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
9640f0cb164SAvi Kivity         now = remain;
965052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
966733d5ef5SPaolo Bonzini             register_subpage(d, &now);
96788266249SHu Tao         } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
968052e87b0SPaolo Bonzini             now.size = page_size;
969ac1970fbSAvi Kivity             register_subpage(d, &now);
97069b67646STyler Hall         } else {
971052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
972ac1970fbSAvi Kivity             register_multipage(d, &now);
97369b67646STyler Hall         }
9740f0cb164SAvi Kivity     }
9750f0cb164SAvi Kivity }
9760f0cb164SAvi Kivity 
97762a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
97862a2744cSSheng Yang {
97962a2744cSSheng Yang     if (kvm_enabled())
98062a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
98162a2744cSSheng Yang }
98262a2744cSSheng Yang 
983b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
984b2a8658eSUmesh Deshpande {
985b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
986b2a8658eSUmesh Deshpande }
987b2a8658eSUmesh Deshpande 
988b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
989b2a8658eSUmesh Deshpande {
990b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
991b2a8658eSUmesh Deshpande }
992b2a8658eSUmesh Deshpande 
993e1e84ba0SMarkus Armbruster #ifdef __linux__
994c902760fSMarcelo Tosatti 
995c902760fSMarcelo Tosatti #include <sys/vfs.h>
996c902760fSMarcelo Tosatti 
997c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
998c902760fSMarcelo Tosatti 
999c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
1000c902760fSMarcelo Tosatti {
1001c902760fSMarcelo Tosatti     struct statfs fs;
1002c902760fSMarcelo Tosatti     int ret;
1003c902760fSMarcelo Tosatti 
1004c902760fSMarcelo Tosatti     do {
1005c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
1006c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
1007c902760fSMarcelo Tosatti 
1008c902760fSMarcelo Tosatti     if (ret != 0) {
10096adc0549SMichael Tokarev         perror(path);
1010c902760fSMarcelo Tosatti         return 0;
1011c902760fSMarcelo Tosatti     }
1012c902760fSMarcelo Tosatti 
1013c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
1014c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1015c902760fSMarcelo Tosatti 
1016c902760fSMarcelo Tosatti     return fs.f_bsize;
1017c902760fSMarcelo Tosatti }
1018c902760fSMarcelo Tosatti 
101904b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
102004b16653SAlex Williamson                             ram_addr_t memory,
10217f56e740SPaolo Bonzini                             const char *path,
10227f56e740SPaolo Bonzini                             Error **errp)
1023c902760fSMarcelo Tosatti {
1024c902760fSMarcelo Tosatti     char *filename;
10258ca761f6SPeter Feiner     char *sanitized_name;
10268ca761f6SPeter Feiner     char *c;
1027c902760fSMarcelo Tosatti     void *area;
1028c902760fSMarcelo Tosatti     int fd;
1029c902760fSMarcelo Tosatti     unsigned long hpagesize;
1030c902760fSMarcelo Tosatti 
1031c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
1032c902760fSMarcelo Tosatti     if (!hpagesize) {
1033f9a49dfaSMarcelo Tosatti         goto error;
1034c902760fSMarcelo Tosatti     }
1035c902760fSMarcelo Tosatti 
1036c902760fSMarcelo Tosatti     if (memory < hpagesize) {
1037c902760fSMarcelo Tosatti         return NULL;
1038c902760fSMarcelo Tosatti     }
1039c902760fSMarcelo Tosatti 
1040c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
10417f56e740SPaolo Bonzini         error_setg(errp,
10427f56e740SPaolo Bonzini                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1043f9a49dfaSMarcelo Tosatti         goto error;
1044c902760fSMarcelo Tosatti     }
1045c902760fSMarcelo Tosatti 
10468ca761f6SPeter Feiner     /* Make name safe to use with mkstemp by replacing '/' with '_'. */
10478ca761f6SPeter Feiner     sanitized_name = g_strdup(block->mr->name);
10488ca761f6SPeter Feiner     for (c = sanitized_name; *c != '\0'; c++) {
10498ca761f6SPeter Feiner         if (*c == '/')
10508ca761f6SPeter Feiner             *c = '_';
10518ca761f6SPeter Feiner     }
10528ca761f6SPeter Feiner 
10538ca761f6SPeter Feiner     filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
10548ca761f6SPeter Feiner                                sanitized_name);
10558ca761f6SPeter Feiner     g_free(sanitized_name);
1056c902760fSMarcelo Tosatti 
1057c902760fSMarcelo Tosatti     fd = mkstemp(filename);
1058c902760fSMarcelo Tosatti     if (fd < 0) {
10597f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
10607f56e740SPaolo Bonzini                          "unable to create backing store for hugepages");
1061e4ada482SStefan Weil         g_free(filename);
1062f9a49dfaSMarcelo Tosatti         goto error;
1063c902760fSMarcelo Tosatti     }
1064c902760fSMarcelo Tosatti     unlink(filename);
1065e4ada482SStefan Weil     g_free(filename);
1066c902760fSMarcelo Tosatti 
1067c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
1068c902760fSMarcelo Tosatti 
1069c902760fSMarcelo Tosatti     /*
1070c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1071c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1072c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1073c902760fSMarcelo Tosatti      * mmap will fail.
1074c902760fSMarcelo Tosatti      */
10757f56e740SPaolo Bonzini     if (ftruncate(fd, memory)) {
1076c902760fSMarcelo Tosatti         perror("ftruncate");
10777f56e740SPaolo Bonzini     }
1078c902760fSMarcelo Tosatti 
1079dbcb8981SPaolo Bonzini     area = mmap(0, memory, PROT_READ | PROT_WRITE,
1080dbcb8981SPaolo Bonzini                 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1081dbcb8981SPaolo Bonzini                 fd, 0);
1082c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
10837f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
10847f56e740SPaolo Bonzini                          "unable to map backing store for hugepages");
1085c902760fSMarcelo Tosatti         close(fd);
1086f9a49dfaSMarcelo Tosatti         goto error;
1087c902760fSMarcelo Tosatti     }
1088ef36fa14SMarcelo Tosatti 
1089ef36fa14SMarcelo Tosatti     if (mem_prealloc) {
109038183310SPaolo Bonzini         os_mem_prealloc(fd, area, memory);
1091ef36fa14SMarcelo Tosatti     }
1092ef36fa14SMarcelo Tosatti 
109304b16653SAlex Williamson     block->fd = fd;
1094c902760fSMarcelo Tosatti     return area;
1095f9a49dfaSMarcelo Tosatti 
1096f9a49dfaSMarcelo Tosatti error:
1097f9a49dfaSMarcelo Tosatti     if (mem_prealloc) {
1098f9a49dfaSMarcelo Tosatti         exit(1);
1099f9a49dfaSMarcelo Tosatti     }
1100f9a49dfaSMarcelo Tosatti     return NULL;
1101c902760fSMarcelo Tosatti }
1102c902760fSMarcelo Tosatti #endif
1103c902760fSMarcelo Tosatti 
1104d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1105d17b5288SAlex Williamson {
110604b16653SAlex Williamson     RAMBlock *block, *next_block;
11073e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
110804b16653SAlex Williamson 
110949cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
111049cd9ac6SStefan Hajnoczi 
1111a3161038SPaolo Bonzini     if (QTAILQ_EMPTY(&ram_list.blocks))
111204b16653SAlex Williamson         return 0;
111304b16653SAlex Williamson 
1114a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1115f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
111604b16653SAlex Williamson 
111704b16653SAlex Williamson         end = block->offset + block->length;
111804b16653SAlex Williamson 
1119a3161038SPaolo Bonzini         QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
112004b16653SAlex Williamson             if (next_block->offset >= end) {
112104b16653SAlex Williamson                 next = MIN(next, next_block->offset);
112204b16653SAlex Williamson             }
112304b16653SAlex Williamson         }
112404b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
112504b16653SAlex Williamson             offset = end;
112604b16653SAlex Williamson             mingap = next - end;
112704b16653SAlex Williamson         }
112804b16653SAlex Williamson     }
11293e837b2cSAlex Williamson 
11303e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
11313e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
11323e837b2cSAlex Williamson                 (uint64_t)size);
11333e837b2cSAlex Williamson         abort();
11343e837b2cSAlex Williamson     }
11353e837b2cSAlex Williamson 
113604b16653SAlex Williamson     return offset;
113704b16653SAlex Williamson }
113804b16653SAlex Williamson 
1139652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
114004b16653SAlex Williamson {
1141d17b5288SAlex Williamson     RAMBlock *block;
1142d17b5288SAlex Williamson     ram_addr_t last = 0;
1143d17b5288SAlex Williamson 
1144a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next)
1145d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
1146d17b5288SAlex Williamson 
1147d17b5288SAlex Williamson     return last;
1148d17b5288SAlex Williamson }
1149d17b5288SAlex Williamson 
1150ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1151ddb97f1dSJason Baron {
1152ddb97f1dSJason Baron     int ret;
1153ddb97f1dSJason Baron 
1154ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
11552ff3de68SMarkus Armbruster     if (!qemu_opt_get_bool(qemu_get_machine_opts(),
11562ff3de68SMarkus Armbruster                            "dump-guest-core", true)) {
1157ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1158ddb97f1dSJason Baron         if (ret) {
1159ddb97f1dSJason Baron             perror("qemu_madvise");
1160ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1161ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1162ddb97f1dSJason Baron         }
1163ddb97f1dSJason Baron     }
1164ddb97f1dSJason Baron }
1165ddb97f1dSJason Baron 
116620cfe881SHu Tao static RAMBlock *find_ram_block(ram_addr_t addr)
116784b89d78SCam Macdonell {
116820cfe881SHu Tao     RAMBlock *block;
116984b89d78SCam Macdonell 
1170a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1171c5705a77SAvi Kivity         if (block->offset == addr) {
117220cfe881SHu Tao             return block;
1173c5705a77SAvi Kivity         }
1174c5705a77SAvi Kivity     }
117520cfe881SHu Tao 
117620cfe881SHu Tao     return NULL;
117720cfe881SHu Tao }
117820cfe881SHu Tao 
117920cfe881SHu Tao void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
118020cfe881SHu Tao {
118120cfe881SHu Tao     RAMBlock *new_block = find_ram_block(addr);
118220cfe881SHu Tao     RAMBlock *block;
118320cfe881SHu Tao 
1184c5705a77SAvi Kivity     assert(new_block);
1185c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
118684b89d78SCam Macdonell 
118709e5ab63SAnthony Liguori     if (dev) {
118809e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
118984b89d78SCam Macdonell         if (id) {
119084b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
11917267c094SAnthony Liguori             g_free(id);
119284b89d78SCam Macdonell         }
119384b89d78SCam Macdonell     }
119484b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
119584b89d78SCam Macdonell 
1196b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1197b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1198a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1199c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
120084b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
120184b89d78SCam Macdonell                     new_block->idstr);
120284b89d78SCam Macdonell             abort();
120384b89d78SCam Macdonell         }
120484b89d78SCam Macdonell     }
1205b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1206c5705a77SAvi Kivity }
1207c5705a77SAvi Kivity 
120820cfe881SHu Tao void qemu_ram_unset_idstr(ram_addr_t addr)
120920cfe881SHu Tao {
121020cfe881SHu Tao     RAMBlock *block = find_ram_block(addr);
121120cfe881SHu Tao 
121220cfe881SHu Tao     if (block) {
121320cfe881SHu Tao         memset(block->idstr, 0, sizeof(block->idstr));
121420cfe881SHu Tao     }
121520cfe881SHu Tao }
121620cfe881SHu Tao 
12178490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
12188490fc78SLuiz Capitulino {
12192ff3de68SMarkus Armbruster     if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
12208490fc78SLuiz Capitulino         /* disabled by the user */
12218490fc78SLuiz Capitulino         return 0;
12228490fc78SLuiz Capitulino     }
12238490fc78SLuiz Capitulino 
12248490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
12258490fc78SLuiz Capitulino }
12268490fc78SLuiz Capitulino 
1227e1c57ab8SPaolo Bonzini static ram_addr_t ram_block_add(RAMBlock *new_block)
1228c5705a77SAvi Kivity {
1229e1c57ab8SPaolo Bonzini     RAMBlock *block;
12302152f5caSJuan Quintela     ram_addr_t old_ram_size, new_ram_size;
12312152f5caSJuan Quintela 
12322152f5caSJuan Quintela     old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1233c5705a77SAvi Kivity 
1234b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1235b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1236e1c57ab8SPaolo Bonzini     new_block->offset = find_ram_offset(new_block->length);
1237e1c57ab8SPaolo Bonzini 
12380628c182SMarkus Armbruster     if (!new_block->host) {
1239e1c57ab8SPaolo Bonzini         if (xen_enabled()) {
1240e1c57ab8SPaolo Bonzini             xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1241e1c57ab8SPaolo Bonzini         } else {
1242e1c57ab8SPaolo Bonzini             new_block->host = phys_mem_alloc(new_block->length);
124339228250SMarkus Armbruster             if (!new_block->host) {
124439228250SMarkus Armbruster                 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
124539228250SMarkus Armbruster                         new_block->mr->name, strerror(errno));
124639228250SMarkus Armbruster                 exit(1);
124739228250SMarkus Armbruster             }
1248e1c57ab8SPaolo Bonzini             memory_try_enable_merging(new_block->host, new_block->length);
1249c902760fSMarcelo Tosatti         }
12506977dfe6SYoshiaki Tamura     }
125194a6b54fSpbrook 
1252abb26d63SPaolo Bonzini     /* Keep the list sorted from biggest to smallest block.  */
1253abb26d63SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1254abb26d63SPaolo Bonzini         if (block->length < new_block->length) {
1255abb26d63SPaolo Bonzini             break;
1256abb26d63SPaolo Bonzini         }
1257abb26d63SPaolo Bonzini     }
1258abb26d63SPaolo Bonzini     if (block) {
1259abb26d63SPaolo Bonzini         QTAILQ_INSERT_BEFORE(block, new_block, next);
1260abb26d63SPaolo Bonzini     } else {
1261abb26d63SPaolo Bonzini         QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1262abb26d63SPaolo Bonzini     }
12630d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
126494a6b54fSpbrook 
1265f798b07fSUmesh Deshpande     ram_list.version++;
1266b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1267f798b07fSUmesh Deshpande 
12682152f5caSJuan Quintela     new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
12692152f5caSJuan Quintela 
12702152f5caSJuan Quintela     if (new_ram_size > old_ram_size) {
12711ab4c8ceSJuan Quintela         int i;
12721ab4c8ceSJuan Quintela         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
12731ab4c8ceSJuan Quintela             ram_list.dirty_memory[i] =
12741ab4c8ceSJuan Quintela                 bitmap_zero_extend(ram_list.dirty_memory[i],
12751ab4c8ceSJuan Quintela                                    old_ram_size, new_ram_size);
12761ab4c8ceSJuan Quintela        }
12772152f5caSJuan Quintela     }
1278e1c57ab8SPaolo Bonzini     cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
127994a6b54fSpbrook 
1280e1c57ab8SPaolo Bonzini     qemu_ram_setup_dump(new_block->host, new_block->length);
1281e1c57ab8SPaolo Bonzini     qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1282e1c57ab8SPaolo Bonzini     qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
1283ddb97f1dSJason Baron 
1284e1c57ab8SPaolo Bonzini     if (kvm_enabled()) {
1285e1c57ab8SPaolo Bonzini         kvm_setup_guest_memory(new_block->host, new_block->length);
1286e1c57ab8SPaolo Bonzini     }
12876f0437e8SJan Kiszka 
128894a6b54fSpbrook     return new_block->offset;
128994a6b54fSpbrook }
1290e9a1ab19Sbellard 
12910b183fc8SPaolo Bonzini #ifdef __linux__
1292e1c57ab8SPaolo Bonzini ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1293dbcb8981SPaolo Bonzini                                     bool share, const char *mem_path,
12947f56e740SPaolo Bonzini                                     Error **errp)
1295e1c57ab8SPaolo Bonzini {
1296e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1297e1c57ab8SPaolo Bonzini 
1298e1c57ab8SPaolo Bonzini     if (xen_enabled()) {
12997f56e740SPaolo Bonzini         error_setg(errp, "-mem-path not supported with Xen");
13007f56e740SPaolo Bonzini         return -1;
1301e1c57ab8SPaolo Bonzini     }
1302e1c57ab8SPaolo Bonzini 
1303e1c57ab8SPaolo Bonzini     if (phys_mem_alloc != qemu_anon_ram_alloc) {
1304e1c57ab8SPaolo Bonzini         /*
1305e1c57ab8SPaolo Bonzini          * file_ram_alloc() needs to allocate just like
1306e1c57ab8SPaolo Bonzini          * phys_mem_alloc, but we haven't bothered to provide
1307e1c57ab8SPaolo Bonzini          * a hook there.
1308e1c57ab8SPaolo Bonzini          */
13097f56e740SPaolo Bonzini         error_setg(errp,
13107f56e740SPaolo Bonzini                    "-mem-path not supported with this accelerator");
13117f56e740SPaolo Bonzini         return -1;
1312e1c57ab8SPaolo Bonzini     }
1313e1c57ab8SPaolo Bonzini 
1314e1c57ab8SPaolo Bonzini     size = TARGET_PAGE_ALIGN(size);
1315e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1316e1c57ab8SPaolo Bonzini     new_block->mr = mr;
1317e1c57ab8SPaolo Bonzini     new_block->length = size;
1318dbcb8981SPaolo Bonzini     new_block->flags = share ? RAM_SHARED : 0;
13197f56e740SPaolo Bonzini     new_block->host = file_ram_alloc(new_block, size,
13207f56e740SPaolo Bonzini                                      mem_path, errp);
13217f56e740SPaolo Bonzini     if (!new_block->host) {
13227f56e740SPaolo Bonzini         g_free(new_block);
13237f56e740SPaolo Bonzini         return -1;
13247f56e740SPaolo Bonzini     }
13257f56e740SPaolo Bonzini 
1326e1c57ab8SPaolo Bonzini     return ram_block_add(new_block);
1327e1c57ab8SPaolo Bonzini }
13280b183fc8SPaolo Bonzini #endif
1329e1c57ab8SPaolo Bonzini 
1330e1c57ab8SPaolo Bonzini ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1331e1c57ab8SPaolo Bonzini                                    MemoryRegion *mr)
1332e1c57ab8SPaolo Bonzini {
1333e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1334e1c57ab8SPaolo Bonzini 
1335e1c57ab8SPaolo Bonzini     size = TARGET_PAGE_ALIGN(size);
1336e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1337e1c57ab8SPaolo Bonzini     new_block->mr = mr;
1338e1c57ab8SPaolo Bonzini     new_block->length = size;
1339e1c57ab8SPaolo Bonzini     new_block->fd = -1;
1340e1c57ab8SPaolo Bonzini     new_block->host = host;
1341e1c57ab8SPaolo Bonzini     if (host) {
13427bd4f430SPaolo Bonzini         new_block->flags |= RAM_PREALLOC;
1343e1c57ab8SPaolo Bonzini     }
1344e1c57ab8SPaolo Bonzini     return ram_block_add(new_block);
1345e1c57ab8SPaolo Bonzini }
1346e1c57ab8SPaolo Bonzini 
1347c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
13486977dfe6SYoshiaki Tamura {
1349c5705a77SAvi Kivity     return qemu_ram_alloc_from_ptr(size, NULL, mr);
13506977dfe6SYoshiaki Tamura }
13516977dfe6SYoshiaki Tamura 
13521f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
13531f2e98b6SAlex Williamson {
13541f2e98b6SAlex Williamson     RAMBlock *block;
13551f2e98b6SAlex Williamson 
1356b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1357b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1358a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
13591f2e98b6SAlex Williamson         if (addr == block->offset) {
1360a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
13610d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1362f798b07fSUmesh Deshpande             ram_list.version++;
13637267c094SAnthony Liguori             g_free(block);
1364b2a8658eSUmesh Deshpande             break;
13651f2e98b6SAlex Williamson         }
13661f2e98b6SAlex Williamson     }
1367b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
13681f2e98b6SAlex Williamson }
13691f2e98b6SAlex Williamson 
1370c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
1371e9a1ab19Sbellard {
137204b16653SAlex Williamson     RAMBlock *block;
137304b16653SAlex Williamson 
1374b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1375b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1376a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
137704b16653SAlex Williamson         if (addr == block->offset) {
1378a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
13790d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1380f798b07fSUmesh Deshpande             ram_list.version++;
13817bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1382cd19cfa2SHuang Ying                 ;
1383dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1384dfeaf2abSMarkus Armbruster                 xen_invalidate_map_cache_entry(block->host);
1385089f3f76SStefan Weil #ifndef _WIN32
13863435f395SMarkus Armbruster             } else if (block->fd >= 0) {
138704b16653SAlex Williamson                 munmap(block->host, block->length);
138804b16653SAlex Williamson                 close(block->fd);
1389089f3f76SStefan Weil #endif
139004b16653SAlex Williamson             } else {
1391e7a09b92SPaolo Bonzini                 qemu_anon_ram_free(block->host, block->length);
139204b16653SAlex Williamson             }
13937267c094SAnthony Liguori             g_free(block);
1394b2a8658eSUmesh Deshpande             break;
139504b16653SAlex Williamson         }
139604b16653SAlex Williamson     }
1397b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
139804b16653SAlex Williamson 
1399e9a1ab19Sbellard }
1400e9a1ab19Sbellard 
1401cd19cfa2SHuang Ying #ifndef _WIN32
1402cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1403cd19cfa2SHuang Ying {
1404cd19cfa2SHuang Ying     RAMBlock *block;
1405cd19cfa2SHuang Ying     ram_addr_t offset;
1406cd19cfa2SHuang Ying     int flags;
1407cd19cfa2SHuang Ying     void *area, *vaddr;
1408cd19cfa2SHuang Ying 
1409a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1410cd19cfa2SHuang Ying         offset = addr - block->offset;
1411cd19cfa2SHuang Ying         if (offset < block->length) {
1412cd19cfa2SHuang Ying             vaddr = block->host + offset;
14137bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1414cd19cfa2SHuang Ying                 ;
1415dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1416dfeaf2abSMarkus Armbruster                 abort();
1417cd19cfa2SHuang Ying             } else {
1418cd19cfa2SHuang Ying                 flags = MAP_FIXED;
1419cd19cfa2SHuang Ying                 munmap(vaddr, length);
14203435f395SMarkus Armbruster                 if (block->fd >= 0) {
1421dbcb8981SPaolo Bonzini                     flags |= (block->flags & RAM_SHARED ?
1422dbcb8981SPaolo Bonzini                               MAP_SHARED : MAP_PRIVATE);
1423cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1424cd19cfa2SHuang Ying                                 flags, block->fd, offset);
1425cd19cfa2SHuang Ying                 } else {
14262eb9fbaaSMarkus Armbruster                     /*
14272eb9fbaaSMarkus Armbruster                      * Remap needs to match alloc.  Accelerators that
14282eb9fbaaSMarkus Armbruster                      * set phys_mem_alloc never remap.  If they did,
14292eb9fbaaSMarkus Armbruster                      * we'd need a remap hook here.
14302eb9fbaaSMarkus Armbruster                      */
14312eb9fbaaSMarkus Armbruster                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
14322eb9fbaaSMarkus Armbruster 
1433cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1434cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1435cd19cfa2SHuang Ying                                 flags, -1, 0);
1436cd19cfa2SHuang Ying                 }
1437cd19cfa2SHuang Ying                 if (area != vaddr) {
1438f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1439f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1440cd19cfa2SHuang Ying                             length, addr);
1441cd19cfa2SHuang Ying                     exit(1);
1442cd19cfa2SHuang Ying                 }
14438490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1444ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1445cd19cfa2SHuang Ying             }
1446cd19cfa2SHuang Ying             return;
1447cd19cfa2SHuang Ying         }
1448cd19cfa2SHuang Ying     }
1449cd19cfa2SHuang Ying }
1450cd19cfa2SHuang Ying #endif /* !_WIN32 */
1451cd19cfa2SHuang Ying 
1452a35ba7beSPaolo Bonzini int qemu_get_ram_fd(ram_addr_t addr)
1453a35ba7beSPaolo Bonzini {
1454a35ba7beSPaolo Bonzini     RAMBlock *block = qemu_get_ram_block(addr);
1455a35ba7beSPaolo Bonzini 
1456a35ba7beSPaolo Bonzini     return block->fd;
1457a35ba7beSPaolo Bonzini }
1458a35ba7beSPaolo Bonzini 
14593fd74b84SDamjan Marion void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
14603fd74b84SDamjan Marion {
14613fd74b84SDamjan Marion     RAMBlock *block = qemu_get_ram_block(addr);
14623fd74b84SDamjan Marion 
14633fd74b84SDamjan Marion     return block->host;
14643fd74b84SDamjan Marion }
14653fd74b84SDamjan Marion 
14661b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.
14671b5ec234SPaolo Bonzini    With the exception of the softmmu code in this file, this should
14681b5ec234SPaolo Bonzini    only be used for local memory (e.g. video ram) that the device owns,
14691b5ec234SPaolo Bonzini    and knows it isn't going to access beyond the end of the block.
14701b5ec234SPaolo Bonzini 
14711b5ec234SPaolo Bonzini    It should not be used for general purpose DMA.
14721b5ec234SPaolo Bonzini    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
14731b5ec234SPaolo Bonzini  */
14741b5ec234SPaolo Bonzini void *qemu_get_ram_ptr(ram_addr_t addr)
14751b5ec234SPaolo Bonzini {
14761b5ec234SPaolo Bonzini     RAMBlock *block = qemu_get_ram_block(addr);
14771b5ec234SPaolo Bonzini 
1478868bb33fSJan Kiszka     if (xen_enabled()) {
1479432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1480432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1481712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1482432d268cSJun Nakajima          */
1483432d268cSJun Nakajima         if (block->offset == 0) {
1484e41d7c69SJan Kiszka             return xen_map_cache(addr, 0, 0);
1485432d268cSJun Nakajima         } else if (block->host == NULL) {
1486e41d7c69SJan Kiszka             block->host =
1487e41d7c69SJan Kiszka                 xen_map_cache(block->offset, block->length, 1);
1488432d268cSJun Nakajima         }
1489432d268cSJun Nakajima     }
1490f471a17eSAlex Williamson     return block->host + (addr - block->offset);
149194a6b54fSpbrook }
1492f471a17eSAlex Williamson 
149338bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
149438bee5dcSStefano Stabellini  * but takes a size argument */
1495cb85f7abSPeter Maydell static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
149638bee5dcSStefano Stabellini {
14978ab934f9SStefano Stabellini     if (*size == 0) {
14988ab934f9SStefano Stabellini         return NULL;
14998ab934f9SStefano Stabellini     }
1500868bb33fSJan Kiszka     if (xen_enabled()) {
1501e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
1502868bb33fSJan Kiszka     } else {
150338bee5dcSStefano Stabellini         RAMBlock *block;
150438bee5dcSStefano Stabellini 
1505a3161038SPaolo Bonzini         QTAILQ_FOREACH(block, &ram_list.blocks, next) {
150638bee5dcSStefano Stabellini             if (addr - block->offset < block->length) {
150738bee5dcSStefano Stabellini                 if (addr - block->offset + *size > block->length)
150838bee5dcSStefano Stabellini                     *size = block->length - addr + block->offset;
150938bee5dcSStefano Stabellini                 return block->host + (addr - block->offset);
151038bee5dcSStefano Stabellini             }
151138bee5dcSStefano Stabellini         }
151238bee5dcSStefano Stabellini 
151338bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
151438bee5dcSStefano Stabellini         abort();
151538bee5dcSStefano Stabellini     }
151638bee5dcSStefano Stabellini }
151738bee5dcSStefano Stabellini 
15187443b437SPaolo Bonzini /* Some of the softmmu routines need to translate from a host pointer
15197443b437SPaolo Bonzini    (typically a TLB entry) back to a ram offset.  */
15201b5ec234SPaolo Bonzini MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
15215579c7f3Spbrook {
152294a6b54fSpbrook     RAMBlock *block;
152394a6b54fSpbrook     uint8_t *host = ptr;
152494a6b54fSpbrook 
1525868bb33fSJan Kiszka     if (xen_enabled()) {
1526e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
15271b5ec234SPaolo Bonzini         return qemu_get_ram_block(*ram_addr)->mr;
1528712c2b41SStefano Stabellini     }
1529712c2b41SStefano Stabellini 
153023887b79SPaolo Bonzini     block = ram_list.mru_block;
153123887b79SPaolo Bonzini     if (block && block->host && host - block->host < block->length) {
153223887b79SPaolo Bonzini         goto found;
153323887b79SPaolo Bonzini     }
153423887b79SPaolo Bonzini 
1535a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1536432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1537432d268cSJun Nakajima         if (block->host == NULL) {
1538432d268cSJun Nakajima             continue;
1539432d268cSJun Nakajima         }
1540f471a17eSAlex Williamson         if (host - block->host < block->length) {
154123887b79SPaolo Bonzini             goto found;
154294a6b54fSpbrook         }
1543f471a17eSAlex Williamson     }
1544432d268cSJun Nakajima 
15451b5ec234SPaolo Bonzini     return NULL;
154623887b79SPaolo Bonzini 
154723887b79SPaolo Bonzini found:
154823887b79SPaolo Bonzini     *ram_addr = block->offset + (host - block->host);
15491b5ec234SPaolo Bonzini     return block->mr;
1550e890261fSMarcelo Tosatti }
1551f471a17eSAlex Williamson 
1552a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
15530e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
15541ccde1cbSbellard {
155552159192SJuan Quintela     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
15560e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
15573a7d929eSbellard     }
15580e0df1e2SAvi Kivity     switch (size) {
15590e0df1e2SAvi Kivity     case 1:
15605579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
15610e0df1e2SAvi Kivity         break;
15620e0df1e2SAvi Kivity     case 2:
15635579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
15640e0df1e2SAvi Kivity         break;
15650e0df1e2SAvi Kivity     case 4:
15665579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
15670e0df1e2SAvi Kivity         break;
15680e0df1e2SAvi Kivity     default:
15690e0df1e2SAvi Kivity         abort();
15700e0df1e2SAvi Kivity     }
157152159192SJuan Quintela     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
157252159192SJuan Quintela     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
1573f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1574f23db169Sbellard        flushed */
1575a2cd8c85SJuan Quintela     if (!cpu_physical_memory_is_clean(ram_addr)) {
15764917cf44SAndreas Färber         CPUArchState *env = current_cpu->env_ptr;
157793afeadeSAndreas Färber         tlb_set_dirty(env, current_cpu->mem_io_vaddr);
15784917cf44SAndreas Färber     }
15791ccde1cbSbellard }
15801ccde1cbSbellard 
1581b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1582b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
1583b018ddf6SPaolo Bonzini {
1584b018ddf6SPaolo Bonzini     return is_write;
1585b018ddf6SPaolo Bonzini }
1586b018ddf6SPaolo Bonzini 
15870e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
15880e0df1e2SAvi Kivity     .write = notdirty_mem_write,
1589b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
15900e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
15911ccde1cbSbellard };
15921ccde1cbSbellard 
15930f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
1594b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
15950f459d16Spbrook {
159693afeadeSAndreas Färber     CPUState *cpu = current_cpu;
159793afeadeSAndreas Färber     CPUArchState *env = cpu->env_ptr;
159806d55cc1Saliguori     target_ulong pc, cs_base;
15990f459d16Spbrook     target_ulong vaddr;
1600a1d1bb31Saliguori     CPUWatchpoint *wp;
160106d55cc1Saliguori     int cpu_flags;
16020f459d16Spbrook 
1603ff4700b0SAndreas Färber     if (cpu->watchpoint_hit) {
160406d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
160506d55cc1Saliguori          * the debug interrupt so that is will trigger after the
160606d55cc1Saliguori          * current instruction. */
160793afeadeSAndreas Färber         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
160806d55cc1Saliguori         return;
160906d55cc1Saliguori     }
161093afeadeSAndreas Färber     vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1611ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1612b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
1613b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
16146e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
1615ff4700b0SAndreas Färber             if (!cpu->watchpoint_hit) {
1616ff4700b0SAndreas Färber                 cpu->watchpoint_hit = wp;
1617239c51a5SAndreas Färber                 tb_check_watchpoint(cpu);
161806d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
161927103424SAndreas Färber                     cpu->exception_index = EXCP_DEBUG;
16205638d180SAndreas Färber                     cpu_loop_exit(cpu);
162106d55cc1Saliguori                 } else {
162206d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1623648f034cSAndreas Färber                     tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
16240ea8cb88SAndreas Färber                     cpu_resume_from_signal(cpu, NULL);
16250f459d16Spbrook                 }
1626488d6577SMax Filippov             }
16276e140f28Saliguori         } else {
16286e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
16296e140f28Saliguori         }
16300f459d16Spbrook     }
16310f459d16Spbrook }
16320f459d16Spbrook 
16336658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
16346658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
16356658ffb8Spbrook    phys routines.  */
1636a8170e5eSAvi Kivity static uint64_t watch_mem_read(void *opaque, hwaddr addr,
16371ec9b909SAvi Kivity                                unsigned size)
16386658ffb8Spbrook {
16391ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
16401ec9b909SAvi Kivity     switch (size) {
16412c17449bSEdgar E. Iglesias     case 1: return ldub_phys(&address_space_memory, addr);
164241701aa4SEdgar E. Iglesias     case 2: return lduw_phys(&address_space_memory, addr);
1643fdfba1a2SEdgar E. Iglesias     case 4: return ldl_phys(&address_space_memory, addr);
16441ec9b909SAvi Kivity     default: abort();
16451ec9b909SAvi Kivity     }
16466658ffb8Spbrook }
16476658ffb8Spbrook 
1648a8170e5eSAvi Kivity static void watch_mem_write(void *opaque, hwaddr addr,
16491ec9b909SAvi Kivity                             uint64_t val, unsigned size)
16506658ffb8Spbrook {
16511ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
16521ec9b909SAvi Kivity     switch (size) {
165367364150SMax Filippov     case 1:
1654db3be60dSEdgar E. Iglesias         stb_phys(&address_space_memory, addr, val);
165567364150SMax Filippov         break;
165667364150SMax Filippov     case 2:
16575ce5944dSEdgar E. Iglesias         stw_phys(&address_space_memory, addr, val);
165867364150SMax Filippov         break;
165967364150SMax Filippov     case 4:
1660ab1da857SEdgar E. Iglesias         stl_phys(&address_space_memory, addr, val);
166167364150SMax Filippov         break;
16621ec9b909SAvi Kivity     default: abort();
16631ec9b909SAvi Kivity     }
16646658ffb8Spbrook }
16656658ffb8Spbrook 
16661ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
16671ec9b909SAvi Kivity     .read = watch_mem_read,
16681ec9b909SAvi Kivity     .write = watch_mem_write,
16691ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
16706658ffb8Spbrook };
16716658ffb8Spbrook 
1672a8170e5eSAvi Kivity static uint64_t subpage_read(void *opaque, hwaddr addr,
167370c68e44SAvi Kivity                              unsigned len)
1674db7b5426Sblueswir1 {
1675acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1676acc9d80bSJan Kiszka     uint8_t buf[4];
1677791af8c8SPaolo Bonzini 
1678db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1679016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1680acc9d80bSJan Kiszka            subpage, len, addr);
1681db7b5426Sblueswir1 #endif
1682acc9d80bSJan Kiszka     address_space_read(subpage->as, addr + subpage->base, buf, len);
1683acc9d80bSJan Kiszka     switch (len) {
1684acc9d80bSJan Kiszka     case 1:
1685acc9d80bSJan Kiszka         return ldub_p(buf);
1686acc9d80bSJan Kiszka     case 2:
1687acc9d80bSJan Kiszka         return lduw_p(buf);
1688acc9d80bSJan Kiszka     case 4:
1689acc9d80bSJan Kiszka         return ldl_p(buf);
1690acc9d80bSJan Kiszka     default:
1691acc9d80bSJan Kiszka         abort();
1692acc9d80bSJan Kiszka     }
1693db7b5426Sblueswir1 }
1694db7b5426Sblueswir1 
1695a8170e5eSAvi Kivity static void subpage_write(void *opaque, hwaddr addr,
169670c68e44SAvi Kivity                           uint64_t value, unsigned len)
1697db7b5426Sblueswir1 {
1698acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1699acc9d80bSJan Kiszka     uint8_t buf[4];
1700acc9d80bSJan Kiszka 
1701db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1702016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1703acc9d80bSJan Kiszka            " value %"PRIx64"\n",
1704acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
1705db7b5426Sblueswir1 #endif
1706acc9d80bSJan Kiszka     switch (len) {
1707acc9d80bSJan Kiszka     case 1:
1708acc9d80bSJan Kiszka         stb_p(buf, value);
1709acc9d80bSJan Kiszka         break;
1710acc9d80bSJan Kiszka     case 2:
1711acc9d80bSJan Kiszka         stw_p(buf, value);
1712acc9d80bSJan Kiszka         break;
1713acc9d80bSJan Kiszka     case 4:
1714acc9d80bSJan Kiszka         stl_p(buf, value);
1715acc9d80bSJan Kiszka         break;
1716acc9d80bSJan Kiszka     default:
1717acc9d80bSJan Kiszka         abort();
1718acc9d80bSJan Kiszka     }
1719acc9d80bSJan Kiszka     address_space_write(subpage->as, addr + subpage->base, buf, len);
1720db7b5426Sblueswir1 }
1721db7b5426Sblueswir1 
1722c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
1723016e9d62SAmos Kong                             unsigned len, bool is_write)
1724c353e4ccSPaolo Bonzini {
1725acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1726c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
1727016e9d62SAmos Kong     printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1728acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
1729c353e4ccSPaolo Bonzini #endif
1730c353e4ccSPaolo Bonzini 
1731acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
1732016e9d62SAmos Kong                                       len, is_write);
1733c353e4ccSPaolo Bonzini }
1734c353e4ccSPaolo Bonzini 
173570c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
173670c68e44SAvi Kivity     .read = subpage_read,
173770c68e44SAvi Kivity     .write = subpage_write,
1738c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
173970c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
1740db7b5426Sblueswir1 };
1741db7b5426Sblueswir1 
1742c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
17435312bd8bSAvi Kivity                              uint16_t section)
1744db7b5426Sblueswir1 {
1745db7b5426Sblueswir1     int idx, eidx;
1746db7b5426Sblueswir1 
1747db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1748db7b5426Sblueswir1         return -1;
1749db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
1750db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
1751db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1752016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1753016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
1754db7b5426Sblueswir1 #endif
1755db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
17565312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
1757db7b5426Sblueswir1     }
1758db7b5426Sblueswir1 
1759db7b5426Sblueswir1     return 0;
1760db7b5426Sblueswir1 }
1761db7b5426Sblueswir1 
1762acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1763db7b5426Sblueswir1 {
1764c227f099SAnthony Liguori     subpage_t *mmio;
1765db7b5426Sblueswir1 
17667267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
17671eec614bSaliguori 
1768acc9d80bSJan Kiszka     mmio->as = as;
1769db7b5426Sblueswir1     mmio->base = base;
17702c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1771b4fefef9SPeter Crosthwaite                           NULL, TARGET_PAGE_SIZE);
1772b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
1773db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1774016e9d62SAmos Kong     printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1775016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
1776db7b5426Sblueswir1 #endif
1777b41aac4fSLiu Ping Fan     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1778db7b5426Sblueswir1 
1779db7b5426Sblueswir1     return mmio;
1780db7b5426Sblueswir1 }
1781db7b5426Sblueswir1 
1782a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1783a656e22fSPeter Crosthwaite                               MemoryRegion *mr)
17845312bd8bSAvi Kivity {
1785a656e22fSPeter Crosthwaite     assert(as);
17865312bd8bSAvi Kivity     MemoryRegionSection section = {
1787a656e22fSPeter Crosthwaite         .address_space = as,
17885312bd8bSAvi Kivity         .mr = mr,
17895312bd8bSAvi Kivity         .offset_within_address_space = 0,
17905312bd8bSAvi Kivity         .offset_within_region = 0,
1791052e87b0SPaolo Bonzini         .size = int128_2_64(),
17925312bd8bSAvi Kivity     };
17935312bd8bSAvi Kivity 
179453cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
17955312bd8bSAvi Kivity }
17965312bd8bSAvi Kivity 
179777717094SEdgar E. Iglesias MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
1798aa102231SAvi Kivity {
179977717094SEdgar E. Iglesias     return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
1800aa102231SAvi Kivity }
1801aa102231SAvi Kivity 
1802e9179ce1SAvi Kivity static void io_mem_init(void)
1803e9179ce1SAvi Kivity {
18041f6245e5SPaolo Bonzini     memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
18052c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
18061f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
18072c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
18081f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
18092c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
18101f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
1811e9179ce1SAvi Kivity }
1812e9179ce1SAvi Kivity 
1813ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
1814ac1970fbSAvi Kivity {
181589ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
181653cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
181753cb28cbSMarcel Apfelbaum     uint16_t n;
181853cb28cbSMarcel Apfelbaum 
1819a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_unassigned);
182053cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
1821a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_notdirty);
182253cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_NOTDIRTY);
1823a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_rom);
182453cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_ROM);
1825a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_watch);
182653cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_WATCH);
182700752703SPaolo Bonzini 
18289736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
182900752703SPaolo Bonzini     d->as = as;
183000752703SPaolo Bonzini     as->next_dispatch = d;
183100752703SPaolo Bonzini }
183200752703SPaolo Bonzini 
183300752703SPaolo Bonzini static void mem_commit(MemoryListener *listener)
183400752703SPaolo Bonzini {
183500752703SPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
18360475d94fSPaolo Bonzini     AddressSpaceDispatch *cur = as->dispatch;
18370475d94fSPaolo Bonzini     AddressSpaceDispatch *next = as->next_dispatch;
1838ac1970fbSAvi Kivity 
183953cb28cbSMarcel Apfelbaum     phys_page_compact_all(next, next->map.nodes_nb);
1840b35ba30fSMichael S. Tsirkin 
18410475d94fSPaolo Bonzini     as->dispatch = next;
184253cb28cbSMarcel Apfelbaum 
184353cb28cbSMarcel Apfelbaum     if (cur) {
184453cb28cbSMarcel Apfelbaum         phys_sections_free(&cur->map);
18450475d94fSPaolo Bonzini         g_free(cur);
1846ac1970fbSAvi Kivity     }
18479affd6fcSPaolo Bonzini }
18489affd6fcSPaolo Bonzini 
18491d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
185050c1e149SAvi Kivity {
1851182735efSAndreas Färber     CPUState *cpu;
1852117712c3SAvi Kivity 
1853117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
1854117712c3SAvi Kivity        reset the modified entries */
1855117712c3SAvi Kivity     /* XXX: slow ! */
1856bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
185733bde2e1SEdgar E. Iglesias         /* FIXME: Disentangle the cpu.h circular files deps so we can
185833bde2e1SEdgar E. Iglesias            directly get the right CPU from listener.  */
185933bde2e1SEdgar E. Iglesias         if (cpu->tcg_as_listener != listener) {
186033bde2e1SEdgar E. Iglesias             continue;
186133bde2e1SEdgar E. Iglesias         }
186200c8cb0aSAndreas Färber         tlb_flush(cpu, 1);
1863117712c3SAvi Kivity     }
186450c1e149SAvi Kivity }
186550c1e149SAvi Kivity 
186693632747SAvi Kivity static void core_log_global_start(MemoryListener *listener)
186793632747SAvi Kivity {
1868981fdf23SJuan Quintela     cpu_physical_memory_set_dirty_tracking(true);
186993632747SAvi Kivity }
187093632747SAvi Kivity 
187193632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener)
187293632747SAvi Kivity {
1873981fdf23SJuan Quintela     cpu_physical_memory_set_dirty_tracking(false);
187493632747SAvi Kivity }
187593632747SAvi Kivity 
187693632747SAvi Kivity static MemoryListener core_memory_listener = {
187793632747SAvi Kivity     .log_global_start = core_log_global_start,
187893632747SAvi Kivity     .log_global_stop = core_log_global_stop,
1879ac1970fbSAvi Kivity     .priority = 1,
188093632747SAvi Kivity };
188193632747SAvi Kivity 
1882ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
1883ac1970fbSAvi Kivity {
188400752703SPaolo Bonzini     as->dispatch = NULL;
188589ae337aSPaolo Bonzini     as->dispatch_listener = (MemoryListener) {
1886ac1970fbSAvi Kivity         .begin = mem_begin,
188700752703SPaolo Bonzini         .commit = mem_commit,
1888ac1970fbSAvi Kivity         .region_add = mem_add,
1889ac1970fbSAvi Kivity         .region_nop = mem_add,
1890ac1970fbSAvi Kivity         .priority = 0,
1891ac1970fbSAvi Kivity     };
189289ae337aSPaolo Bonzini     memory_listener_register(&as->dispatch_listener, as);
1893ac1970fbSAvi Kivity }
1894ac1970fbSAvi Kivity 
189583f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
189683f3c251SAvi Kivity {
189783f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
189883f3c251SAvi Kivity 
189989ae337aSPaolo Bonzini     memory_listener_unregister(&as->dispatch_listener);
190083f3c251SAvi Kivity     g_free(d);
190183f3c251SAvi Kivity     as->dispatch = NULL;
190283f3c251SAvi Kivity }
190383f3c251SAvi Kivity 
190462152b8aSAvi Kivity static void memory_map_init(void)
190562152b8aSAvi Kivity {
19067267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
190703f49957SPaolo Bonzini 
190857271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
19097dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
1910309cb471SAvi Kivity 
19117267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
19123bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
19133bb28b72SJan Kiszka                           65536);
19147dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
191593632747SAvi Kivity 
1916f6790af6SAvi Kivity     memory_listener_register(&core_memory_listener, &address_space_memory);
19172641689aSliguang }
191862152b8aSAvi Kivity 
191962152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
192062152b8aSAvi Kivity {
192162152b8aSAvi Kivity     return system_memory;
192262152b8aSAvi Kivity }
192362152b8aSAvi Kivity 
1924309cb471SAvi Kivity MemoryRegion *get_system_io(void)
1925309cb471SAvi Kivity {
1926309cb471SAvi Kivity     return system_io;
1927309cb471SAvi Kivity }
1928309cb471SAvi Kivity 
1929e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
1930e2eef170Spbrook 
193113eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
193213eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
1933f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1934a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
193513eb76e0Sbellard {
193613eb76e0Sbellard     int l, flags;
193713eb76e0Sbellard     target_ulong page;
193853a5960aSpbrook     void * p;
193913eb76e0Sbellard 
194013eb76e0Sbellard     while (len > 0) {
194113eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
194213eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
194313eb76e0Sbellard         if (l > len)
194413eb76e0Sbellard             l = len;
194513eb76e0Sbellard         flags = page_get_flags(page);
194613eb76e0Sbellard         if (!(flags & PAGE_VALID))
1947a68fe89cSPaul Brook             return -1;
194813eb76e0Sbellard         if (is_write) {
194913eb76e0Sbellard             if (!(flags & PAGE_WRITE))
1950a68fe89cSPaul Brook                 return -1;
1951579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
195272fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1953a68fe89cSPaul Brook                 return -1;
195472fb7daaSaurel32             memcpy(p, buf, l);
195572fb7daaSaurel32             unlock_user(p, addr, l);
195613eb76e0Sbellard         } else {
195713eb76e0Sbellard             if (!(flags & PAGE_READ))
1958a68fe89cSPaul Brook                 return -1;
1959579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
196072fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1961a68fe89cSPaul Brook                 return -1;
196272fb7daaSaurel32             memcpy(buf, p, l);
19635b257578Saurel32             unlock_user(p, addr, 0);
196413eb76e0Sbellard         }
196513eb76e0Sbellard         len -= l;
196613eb76e0Sbellard         buf += l;
196713eb76e0Sbellard         addr += l;
196813eb76e0Sbellard     }
1969a68fe89cSPaul Brook     return 0;
197013eb76e0Sbellard }
19718df1cd07Sbellard 
197213eb76e0Sbellard #else
197351d7a9ebSAnthony PERARD 
1974a8170e5eSAvi Kivity static void invalidate_and_set_dirty(hwaddr addr,
1975a8170e5eSAvi Kivity                                      hwaddr length)
197651d7a9ebSAnthony PERARD {
1977a2cd8c85SJuan Quintela     if (cpu_physical_memory_is_clean(addr)) {
197851d7a9ebSAnthony PERARD         /* invalidate code */
197951d7a9ebSAnthony PERARD         tb_invalidate_phys_page_range(addr, addr + length, 0);
198051d7a9ebSAnthony PERARD         /* set dirty bit */
198152159192SJuan Quintela         cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
198252159192SJuan Quintela         cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
198351d7a9ebSAnthony PERARD     }
1984e226939dSAnthony PERARD     xen_modified_memory(addr, length);
198551d7a9ebSAnthony PERARD }
198651d7a9ebSAnthony PERARD 
198723326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
198882f2563fSPaolo Bonzini {
1989e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
199023326164SRichard Henderson 
199123326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
199223326164SRichard Henderson        otherwise specified.  */
199323326164SRichard Henderson     if (access_size_max == 0) {
199423326164SRichard Henderson         access_size_max = 4;
199582f2563fSPaolo Bonzini     }
199623326164SRichard Henderson 
199723326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
199823326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
199923326164SRichard Henderson         unsigned align_size_max = addr & -addr;
200023326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
200123326164SRichard Henderson             access_size_max = align_size_max;
200223326164SRichard Henderson         }
200323326164SRichard Henderson     }
200423326164SRichard Henderson 
200523326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
200623326164SRichard Henderson     if (l > access_size_max) {
200723326164SRichard Henderson         l = access_size_max;
200823326164SRichard Henderson     }
2009098178f2SPaolo Bonzini     if (l & (l - 1)) {
2010098178f2SPaolo Bonzini         l = 1 << (qemu_fls(l) - 1);
2011098178f2SPaolo Bonzini     }
201223326164SRichard Henderson 
201323326164SRichard Henderson     return l;
201482f2563fSPaolo Bonzini }
201582f2563fSPaolo Bonzini 
2016fd8aaa76SPaolo Bonzini bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
2017ac1970fbSAvi Kivity                       int len, bool is_write)
201813eb76e0Sbellard {
2019149f54b5SPaolo Bonzini     hwaddr l;
202013eb76e0Sbellard     uint8_t *ptr;
2021791af8c8SPaolo Bonzini     uint64_t val;
2022149f54b5SPaolo Bonzini     hwaddr addr1;
20235c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2024fd8aaa76SPaolo Bonzini     bool error = false;
202513eb76e0Sbellard 
202613eb76e0Sbellard     while (len > 0) {
202713eb76e0Sbellard         l = len;
20285c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, is_write);
202913eb76e0Sbellard 
203013eb76e0Sbellard         if (is_write) {
20315c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
20325c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
20334917cf44SAndreas Färber                 /* XXX: could force current_cpu to NULL to avoid
20346a00d601Sbellard                    potential bugs */
203523326164SRichard Henderson                 switch (l) {
203623326164SRichard Henderson                 case 8:
203723326164SRichard Henderson                     /* 64 bit write access */
203823326164SRichard Henderson                     val = ldq_p(buf);
203923326164SRichard Henderson                     error |= io_mem_write(mr, addr1, val, 8);
204023326164SRichard Henderson                     break;
204123326164SRichard Henderson                 case 4:
20421c213d19Sbellard                     /* 32 bit write access */
2043c27004ecSbellard                     val = ldl_p(buf);
20445c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 4);
204523326164SRichard Henderson                     break;
204623326164SRichard Henderson                 case 2:
20471c213d19Sbellard                     /* 16 bit write access */
2048c27004ecSbellard                     val = lduw_p(buf);
20495c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 2);
205023326164SRichard Henderson                     break;
205123326164SRichard Henderson                 case 1:
20521c213d19Sbellard                     /* 8 bit write access */
2053c27004ecSbellard                     val = ldub_p(buf);
20545c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 1);
205523326164SRichard Henderson                     break;
205623326164SRichard Henderson                 default:
205723326164SRichard Henderson                     abort();
205813eb76e0Sbellard                 }
20592bbfa05dSPaolo Bonzini             } else {
20605c8a00ceSPaolo Bonzini                 addr1 += memory_region_get_ram_addr(mr);
206113eb76e0Sbellard                 /* RAM case */
20625579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
206313eb76e0Sbellard                 memcpy(ptr, buf, l);
206451d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
20653a7d929eSbellard             }
206613eb76e0Sbellard         } else {
20675c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
206813eb76e0Sbellard                 /* I/O case */
20695c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
207023326164SRichard Henderson                 switch (l) {
207123326164SRichard Henderson                 case 8:
207223326164SRichard Henderson                     /* 64 bit read access */
207323326164SRichard Henderson                     error |= io_mem_read(mr, addr1, &val, 8);
207423326164SRichard Henderson                     stq_p(buf, val);
207523326164SRichard Henderson                     break;
207623326164SRichard Henderson                 case 4:
207713eb76e0Sbellard                     /* 32 bit read access */
20785c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 4);
2079c27004ecSbellard                     stl_p(buf, val);
208023326164SRichard Henderson                     break;
208123326164SRichard Henderson                 case 2:
208213eb76e0Sbellard                     /* 16 bit read access */
20835c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 2);
2084c27004ecSbellard                     stw_p(buf, val);
208523326164SRichard Henderson                     break;
208623326164SRichard Henderson                 case 1:
20871c213d19Sbellard                     /* 8 bit read access */
20885c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 1);
2089c27004ecSbellard                     stb_p(buf, val);
209023326164SRichard Henderson                     break;
209123326164SRichard Henderson                 default:
209223326164SRichard Henderson                     abort();
209313eb76e0Sbellard                 }
209413eb76e0Sbellard             } else {
209513eb76e0Sbellard                 /* RAM case */
20965c8a00ceSPaolo Bonzini                 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2097f3705d53SAvi Kivity                 memcpy(buf, ptr, l);
209813eb76e0Sbellard             }
209913eb76e0Sbellard         }
210013eb76e0Sbellard         len -= l;
210113eb76e0Sbellard         buf += l;
210213eb76e0Sbellard         addr += l;
210313eb76e0Sbellard     }
2104fd8aaa76SPaolo Bonzini 
2105fd8aaa76SPaolo Bonzini     return error;
210613eb76e0Sbellard }
21078df1cd07Sbellard 
2108fd8aaa76SPaolo Bonzini bool address_space_write(AddressSpace *as, hwaddr addr,
2109ac1970fbSAvi Kivity                          const uint8_t *buf, int len)
2110ac1970fbSAvi Kivity {
2111fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2112ac1970fbSAvi Kivity }
2113ac1970fbSAvi Kivity 
2114fd8aaa76SPaolo Bonzini bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2115ac1970fbSAvi Kivity {
2116fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, buf, len, false);
2117ac1970fbSAvi Kivity }
2118ac1970fbSAvi Kivity 
2119ac1970fbSAvi Kivity 
2120a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2121ac1970fbSAvi Kivity                             int len, int is_write)
2122ac1970fbSAvi Kivity {
2123fd8aaa76SPaolo Bonzini     address_space_rw(&address_space_memory, addr, buf, len, is_write);
2124ac1970fbSAvi Kivity }
2125ac1970fbSAvi Kivity 
2126582b55a9SAlexander Graf enum write_rom_type {
2127582b55a9SAlexander Graf     WRITE_DATA,
2128582b55a9SAlexander Graf     FLUSH_CACHE,
2129582b55a9SAlexander Graf };
2130582b55a9SAlexander Graf 
21312a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2132582b55a9SAlexander Graf     hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2133d0ecd2aaSbellard {
2134149f54b5SPaolo Bonzini     hwaddr l;
2135d0ecd2aaSbellard     uint8_t *ptr;
2136149f54b5SPaolo Bonzini     hwaddr addr1;
21375c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2138d0ecd2aaSbellard 
2139d0ecd2aaSbellard     while (len > 0) {
2140d0ecd2aaSbellard         l = len;
21412a221651SEdgar E. Iglesias         mr = address_space_translate(as, addr, &addr1, &l, true);
2142d0ecd2aaSbellard 
21435c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
21445c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2145d0ecd2aaSbellard             /* do nothing */
2146d0ecd2aaSbellard         } else {
21475c8a00ceSPaolo Bonzini             addr1 += memory_region_get_ram_addr(mr);
2148d0ecd2aaSbellard             /* ROM/RAM case */
21495579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
2150582b55a9SAlexander Graf             switch (type) {
2151582b55a9SAlexander Graf             case WRITE_DATA:
2152d0ecd2aaSbellard                 memcpy(ptr, buf, l);
215351d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
2154582b55a9SAlexander Graf                 break;
2155582b55a9SAlexander Graf             case FLUSH_CACHE:
2156582b55a9SAlexander Graf                 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2157582b55a9SAlexander Graf                 break;
2158582b55a9SAlexander Graf             }
2159d0ecd2aaSbellard         }
2160d0ecd2aaSbellard         len -= l;
2161d0ecd2aaSbellard         buf += l;
2162d0ecd2aaSbellard         addr += l;
2163d0ecd2aaSbellard     }
2164d0ecd2aaSbellard }
2165d0ecd2aaSbellard 
2166582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
21672a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2168582b55a9SAlexander Graf                                    const uint8_t *buf, int len)
2169582b55a9SAlexander Graf {
21702a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2171582b55a9SAlexander Graf }
2172582b55a9SAlexander Graf 
2173582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len)
2174582b55a9SAlexander Graf {
2175582b55a9SAlexander Graf     /*
2176582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
2177582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
2178582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
2179582b55a9SAlexander Graf      * the host's instruction cache at least.
2180582b55a9SAlexander Graf      */
2181582b55a9SAlexander Graf     if (tcg_enabled()) {
2182582b55a9SAlexander Graf         return;
2183582b55a9SAlexander Graf     }
2184582b55a9SAlexander Graf 
21852a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(&address_space_memory,
21862a221651SEdgar E. Iglesias                                            start, NULL, len, FLUSH_CACHE);
2187582b55a9SAlexander Graf }
2188582b55a9SAlexander Graf 
21896d16c2f8Saliguori typedef struct {
2190d3e71559SPaolo Bonzini     MemoryRegion *mr;
21916d16c2f8Saliguori     void *buffer;
2192a8170e5eSAvi Kivity     hwaddr addr;
2193a8170e5eSAvi Kivity     hwaddr len;
21946d16c2f8Saliguori } BounceBuffer;
21956d16c2f8Saliguori 
21966d16c2f8Saliguori static BounceBuffer bounce;
21976d16c2f8Saliguori 
2198ba223c29Saliguori typedef struct MapClient {
2199ba223c29Saliguori     void *opaque;
2200ba223c29Saliguori     void (*callback)(void *opaque);
220172cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2202ba223c29Saliguori } MapClient;
2203ba223c29Saliguori 
220472cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
220572cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2206ba223c29Saliguori 
2207ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2208ba223c29Saliguori {
22097267c094SAnthony Liguori     MapClient *client = g_malloc(sizeof(*client));
2210ba223c29Saliguori 
2211ba223c29Saliguori     client->opaque = opaque;
2212ba223c29Saliguori     client->callback = callback;
221372cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
2214ba223c29Saliguori     return client;
2215ba223c29Saliguori }
2216ba223c29Saliguori 
22178b9c99d9SBlue Swirl static void cpu_unregister_map_client(void *_client)
2218ba223c29Saliguori {
2219ba223c29Saliguori     MapClient *client = (MapClient *)_client;
2220ba223c29Saliguori 
222172cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
22227267c094SAnthony Liguori     g_free(client);
2223ba223c29Saliguori }
2224ba223c29Saliguori 
2225ba223c29Saliguori static void cpu_notify_map_clients(void)
2226ba223c29Saliguori {
2227ba223c29Saliguori     MapClient *client;
2228ba223c29Saliguori 
222972cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
223072cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2231ba223c29Saliguori         client->callback(client->opaque);
223234d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
2233ba223c29Saliguori     }
2234ba223c29Saliguori }
2235ba223c29Saliguori 
223651644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
223751644ab7SPaolo Bonzini {
22385c8a00ceSPaolo Bonzini     MemoryRegion *mr;
223951644ab7SPaolo Bonzini     hwaddr l, xlat;
224051644ab7SPaolo Bonzini 
224151644ab7SPaolo Bonzini     while (len > 0) {
224251644ab7SPaolo Bonzini         l = len;
22435c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
22445c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
22455c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
22465c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
224751644ab7SPaolo Bonzini                 return false;
224851644ab7SPaolo Bonzini             }
224951644ab7SPaolo Bonzini         }
225051644ab7SPaolo Bonzini 
225151644ab7SPaolo Bonzini         len -= l;
225251644ab7SPaolo Bonzini         addr += l;
225351644ab7SPaolo Bonzini     }
225451644ab7SPaolo Bonzini     return true;
225551644ab7SPaolo Bonzini }
225651644ab7SPaolo Bonzini 
22576d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
22586d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
22596d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
22606d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2261ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2262ba223c29Saliguori  * likely to succeed.
22636d16c2f8Saliguori  */
2264ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2265a8170e5eSAvi Kivity                         hwaddr addr,
2266a8170e5eSAvi Kivity                         hwaddr *plen,
2267ac1970fbSAvi Kivity                         bool is_write)
22686d16c2f8Saliguori {
2269a8170e5eSAvi Kivity     hwaddr len = *plen;
2270e3127ae0SPaolo Bonzini     hwaddr done = 0;
2271e3127ae0SPaolo Bonzini     hwaddr l, xlat, base;
2272e3127ae0SPaolo Bonzini     MemoryRegion *mr, *this_mr;
2273e3127ae0SPaolo Bonzini     ram_addr_t raddr;
22746d16c2f8Saliguori 
2275e3127ae0SPaolo Bonzini     if (len == 0) {
2276e3127ae0SPaolo Bonzini         return NULL;
2277e3127ae0SPaolo Bonzini     }
2278e3127ae0SPaolo Bonzini 
22796d16c2f8Saliguori     l = len;
22805c8a00ceSPaolo Bonzini     mr = address_space_translate(as, addr, &xlat, &l, is_write);
22815c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
2282e3127ae0SPaolo Bonzini         if (bounce.buffer) {
2283e3127ae0SPaolo Bonzini             return NULL;
22846d16c2f8Saliguori         }
2285e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
2286e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
2287e85d9db5SKevin Wolf         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
22886d16c2f8Saliguori         bounce.addr = addr;
22896d16c2f8Saliguori         bounce.len = l;
2290d3e71559SPaolo Bonzini 
2291d3e71559SPaolo Bonzini         memory_region_ref(mr);
2292d3e71559SPaolo Bonzini         bounce.mr = mr;
22936d16c2f8Saliguori         if (!is_write) {
2294ac1970fbSAvi Kivity             address_space_read(as, addr, bounce.buffer, l);
22956d16c2f8Saliguori         }
229638bee5dcSStefano Stabellini 
229738bee5dcSStefano Stabellini         *plen = l;
229838bee5dcSStefano Stabellini         return bounce.buffer;
22996d16c2f8Saliguori     }
2300e3127ae0SPaolo Bonzini 
2301e3127ae0SPaolo Bonzini     base = xlat;
2302e3127ae0SPaolo Bonzini     raddr = memory_region_get_ram_addr(mr);
2303e3127ae0SPaolo Bonzini 
2304e3127ae0SPaolo Bonzini     for (;;) {
2305e3127ae0SPaolo Bonzini         len -= l;
2306e3127ae0SPaolo Bonzini         addr += l;
2307e3127ae0SPaolo Bonzini         done += l;
2308e3127ae0SPaolo Bonzini         if (len == 0) {
2309e3127ae0SPaolo Bonzini             break;
2310e3127ae0SPaolo Bonzini         }
2311e3127ae0SPaolo Bonzini 
2312e3127ae0SPaolo Bonzini         l = len;
2313e3127ae0SPaolo Bonzini         this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2314e3127ae0SPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
2315149f54b5SPaolo Bonzini             break;
2316149f54b5SPaolo Bonzini         }
23178ab934f9SStefano Stabellini     }
23186d16c2f8Saliguori 
2319d3e71559SPaolo Bonzini     memory_region_ref(mr);
2320e3127ae0SPaolo Bonzini     *plen = done;
2321e3127ae0SPaolo Bonzini     return qemu_ram_ptr_length(raddr + base, plen);
23226d16c2f8Saliguori }
23236d16c2f8Saliguori 
2324ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
23256d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
23266d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
23276d16c2f8Saliguori  */
2328a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2329a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
23306d16c2f8Saliguori {
23316d16c2f8Saliguori     if (buffer != bounce.buffer) {
2332d3e71559SPaolo Bonzini         MemoryRegion *mr;
23337443b437SPaolo Bonzini         ram_addr_t addr1;
2334d3e71559SPaolo Bonzini 
2335d3e71559SPaolo Bonzini         mr = qemu_ram_addr_from_host(buffer, &addr1);
23361b5ec234SPaolo Bonzini         assert(mr != NULL);
2337d3e71559SPaolo Bonzini         if (is_write) {
23386d16c2f8Saliguori             while (access_len) {
23396d16c2f8Saliguori                 unsigned l;
23406d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
23416d16c2f8Saliguori                 if (l > access_len)
23426d16c2f8Saliguori                     l = access_len;
234351d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
23446d16c2f8Saliguori                 addr1 += l;
23456d16c2f8Saliguori                 access_len -= l;
23466d16c2f8Saliguori             }
23476d16c2f8Saliguori         }
2348868bb33fSJan Kiszka         if (xen_enabled()) {
2349e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
2350050a0ddfSAnthony PERARD         }
2351d3e71559SPaolo Bonzini         memory_region_unref(mr);
23526d16c2f8Saliguori         return;
23536d16c2f8Saliguori     }
23546d16c2f8Saliguori     if (is_write) {
2355ac1970fbSAvi Kivity         address_space_write(as, bounce.addr, bounce.buffer, access_len);
23566d16c2f8Saliguori     }
2357f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
23586d16c2f8Saliguori     bounce.buffer = NULL;
2359d3e71559SPaolo Bonzini     memory_region_unref(bounce.mr);
2360ba223c29Saliguori     cpu_notify_map_clients();
23616d16c2f8Saliguori }
2362d0ecd2aaSbellard 
2363a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
2364a8170e5eSAvi Kivity                               hwaddr *plen,
2365ac1970fbSAvi Kivity                               int is_write)
2366ac1970fbSAvi Kivity {
2367ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
2368ac1970fbSAvi Kivity }
2369ac1970fbSAvi Kivity 
2370a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2371a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
2372ac1970fbSAvi Kivity {
2373ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2374ac1970fbSAvi Kivity }
2375ac1970fbSAvi Kivity 
23768df1cd07Sbellard /* warning: addr must be aligned */
2377fdfba1a2SEdgar E. Iglesias static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
23781e78bcc1SAlexander Graf                                          enum device_endian endian)
23798df1cd07Sbellard {
23808df1cd07Sbellard     uint8_t *ptr;
2381791af8c8SPaolo Bonzini     uint64_t val;
23825c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2383149f54b5SPaolo Bonzini     hwaddr l = 4;
2384149f54b5SPaolo Bonzini     hwaddr addr1;
23858df1cd07Sbellard 
2386fdfba1a2SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, false);
23875c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
23888df1cd07Sbellard         /* I/O case */
23895c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 4);
23901e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
23911e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
23921e78bcc1SAlexander Graf             val = bswap32(val);
23931e78bcc1SAlexander Graf         }
23941e78bcc1SAlexander Graf #else
23951e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
23961e78bcc1SAlexander Graf             val = bswap32(val);
23971e78bcc1SAlexander Graf         }
23981e78bcc1SAlexander Graf #endif
23998df1cd07Sbellard     } else {
24008df1cd07Sbellard         /* RAM case */
24015c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
240206ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2403149f54b5SPaolo Bonzini                                + addr1);
24041e78bcc1SAlexander Graf         switch (endian) {
24051e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
24061e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
24071e78bcc1SAlexander Graf             break;
24081e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
24091e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
24101e78bcc1SAlexander Graf             break;
24111e78bcc1SAlexander Graf         default:
24128df1cd07Sbellard             val = ldl_p(ptr);
24131e78bcc1SAlexander Graf             break;
24141e78bcc1SAlexander Graf         }
24158df1cd07Sbellard     }
24168df1cd07Sbellard     return val;
24178df1cd07Sbellard }
24188df1cd07Sbellard 
2419fdfba1a2SEdgar E. Iglesias uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
24201e78bcc1SAlexander Graf {
2421fdfba1a2SEdgar E. Iglesias     return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
24221e78bcc1SAlexander Graf }
24231e78bcc1SAlexander Graf 
2424fdfba1a2SEdgar E. Iglesias uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
24251e78bcc1SAlexander Graf {
2426fdfba1a2SEdgar E. Iglesias     return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
24271e78bcc1SAlexander Graf }
24281e78bcc1SAlexander Graf 
2429fdfba1a2SEdgar E. Iglesias uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
24301e78bcc1SAlexander Graf {
2431fdfba1a2SEdgar E. Iglesias     return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
24321e78bcc1SAlexander Graf }
24331e78bcc1SAlexander Graf 
243484b7b8e7Sbellard /* warning: addr must be aligned */
24352c17449bSEdgar E. Iglesias static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
24361e78bcc1SAlexander Graf                                          enum device_endian endian)
243784b7b8e7Sbellard {
243884b7b8e7Sbellard     uint8_t *ptr;
243984b7b8e7Sbellard     uint64_t val;
24405c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2441149f54b5SPaolo Bonzini     hwaddr l = 8;
2442149f54b5SPaolo Bonzini     hwaddr addr1;
244384b7b8e7Sbellard 
24442c17449bSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2445149f54b5SPaolo Bonzini                                  false);
24465c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
244784b7b8e7Sbellard         /* I/O case */
24485c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 8);
2449968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
2450968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
2451968a5627SPaolo Bonzini             val = bswap64(val);
2452968a5627SPaolo Bonzini         }
2453968a5627SPaolo Bonzini #else
2454968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
2455968a5627SPaolo Bonzini             val = bswap64(val);
2456968a5627SPaolo Bonzini         }
2457968a5627SPaolo Bonzini #endif
245884b7b8e7Sbellard     } else {
245984b7b8e7Sbellard         /* RAM case */
24605c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
246106ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2462149f54b5SPaolo Bonzini                                + addr1);
24631e78bcc1SAlexander Graf         switch (endian) {
24641e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
24651e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
24661e78bcc1SAlexander Graf             break;
24671e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
24681e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
24691e78bcc1SAlexander Graf             break;
24701e78bcc1SAlexander Graf         default:
247184b7b8e7Sbellard             val = ldq_p(ptr);
24721e78bcc1SAlexander Graf             break;
24731e78bcc1SAlexander Graf         }
247484b7b8e7Sbellard     }
247584b7b8e7Sbellard     return val;
247684b7b8e7Sbellard }
247784b7b8e7Sbellard 
24782c17449bSEdgar E. Iglesias uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
24791e78bcc1SAlexander Graf {
24802c17449bSEdgar E. Iglesias     return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
24811e78bcc1SAlexander Graf }
24821e78bcc1SAlexander Graf 
24832c17449bSEdgar E. Iglesias uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
24841e78bcc1SAlexander Graf {
24852c17449bSEdgar E. Iglesias     return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
24861e78bcc1SAlexander Graf }
24871e78bcc1SAlexander Graf 
24882c17449bSEdgar E. Iglesias uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
24891e78bcc1SAlexander Graf {
24902c17449bSEdgar E. Iglesias     return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
24911e78bcc1SAlexander Graf }
24921e78bcc1SAlexander Graf 
2493aab33094Sbellard /* XXX: optimize */
24942c17449bSEdgar E. Iglesias uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2495aab33094Sbellard {
2496aab33094Sbellard     uint8_t val;
24972c17449bSEdgar E. Iglesias     address_space_rw(as, addr, &val, 1, 0);
2498aab33094Sbellard     return val;
2499aab33094Sbellard }
2500aab33094Sbellard 
2501733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
250241701aa4SEdgar E. Iglesias static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
25031e78bcc1SAlexander Graf                                           enum device_endian endian)
2504aab33094Sbellard {
2505733f0b02SMichael S. Tsirkin     uint8_t *ptr;
2506733f0b02SMichael S. Tsirkin     uint64_t val;
25075c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2508149f54b5SPaolo Bonzini     hwaddr l = 2;
2509149f54b5SPaolo Bonzini     hwaddr addr1;
2510733f0b02SMichael S. Tsirkin 
251141701aa4SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2512149f54b5SPaolo Bonzini                                  false);
25135c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
2514733f0b02SMichael S. Tsirkin         /* I/O case */
25155c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 2);
25161e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
25171e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
25181e78bcc1SAlexander Graf             val = bswap16(val);
25191e78bcc1SAlexander Graf         }
25201e78bcc1SAlexander Graf #else
25211e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
25221e78bcc1SAlexander Graf             val = bswap16(val);
25231e78bcc1SAlexander Graf         }
25241e78bcc1SAlexander Graf #endif
2525733f0b02SMichael S. Tsirkin     } else {
2526733f0b02SMichael S. Tsirkin         /* RAM case */
25275c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
252806ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2529149f54b5SPaolo Bonzini                                + addr1);
25301e78bcc1SAlexander Graf         switch (endian) {
25311e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
25321e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
25331e78bcc1SAlexander Graf             break;
25341e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
25351e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
25361e78bcc1SAlexander Graf             break;
25371e78bcc1SAlexander Graf         default:
2538733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
25391e78bcc1SAlexander Graf             break;
25401e78bcc1SAlexander Graf         }
2541733f0b02SMichael S. Tsirkin     }
2542733f0b02SMichael S. Tsirkin     return val;
2543aab33094Sbellard }
2544aab33094Sbellard 
254541701aa4SEdgar E. Iglesias uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
25461e78bcc1SAlexander Graf {
254741701aa4SEdgar E. Iglesias     return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
25481e78bcc1SAlexander Graf }
25491e78bcc1SAlexander Graf 
255041701aa4SEdgar E. Iglesias uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
25511e78bcc1SAlexander Graf {
255241701aa4SEdgar E. Iglesias     return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
25531e78bcc1SAlexander Graf }
25541e78bcc1SAlexander Graf 
255541701aa4SEdgar E. Iglesias uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
25561e78bcc1SAlexander Graf {
255741701aa4SEdgar E. Iglesias     return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
25581e78bcc1SAlexander Graf }
25591e78bcc1SAlexander Graf 
25608df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
25618df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
25628df1cd07Sbellard    bits are used to track modified PTEs */
25632198a121SEdgar E. Iglesias void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
25648df1cd07Sbellard {
25658df1cd07Sbellard     uint8_t *ptr;
25665c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2567149f54b5SPaolo Bonzini     hwaddr l = 4;
2568149f54b5SPaolo Bonzini     hwaddr addr1;
25698df1cd07Sbellard 
25702198a121SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2571149f54b5SPaolo Bonzini                                  true);
25725c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
25735c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
25748df1cd07Sbellard     } else {
25755c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
25765579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
25778df1cd07Sbellard         stl_p(ptr, val);
257874576198Saliguori 
257974576198Saliguori         if (unlikely(in_migration)) {
2580a2cd8c85SJuan Quintela             if (cpu_physical_memory_is_clean(addr1)) {
258174576198Saliguori                 /* invalidate code */
258274576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
258374576198Saliguori                 /* set dirty bit */
258452159192SJuan Quintela                 cpu_physical_memory_set_dirty_flag(addr1,
258552159192SJuan Quintela                                                    DIRTY_MEMORY_MIGRATION);
258652159192SJuan Quintela                 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
258774576198Saliguori             }
258874576198Saliguori         }
25898df1cd07Sbellard     }
25908df1cd07Sbellard }
25918df1cd07Sbellard 
25928df1cd07Sbellard /* warning: addr must be aligned */
2593ab1da857SEdgar E. Iglesias static inline void stl_phys_internal(AddressSpace *as,
2594ab1da857SEdgar E. Iglesias                                      hwaddr addr, uint32_t val,
25951e78bcc1SAlexander Graf                                      enum device_endian endian)
25968df1cd07Sbellard {
25978df1cd07Sbellard     uint8_t *ptr;
25985c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2599149f54b5SPaolo Bonzini     hwaddr l = 4;
2600149f54b5SPaolo Bonzini     hwaddr addr1;
26018df1cd07Sbellard 
2602ab1da857SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2603149f54b5SPaolo Bonzini                                  true);
26045c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
26051e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
26061e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
26071e78bcc1SAlexander Graf             val = bswap32(val);
26081e78bcc1SAlexander Graf         }
26091e78bcc1SAlexander Graf #else
26101e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
26111e78bcc1SAlexander Graf             val = bswap32(val);
26121e78bcc1SAlexander Graf         }
26131e78bcc1SAlexander Graf #endif
26145c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
26158df1cd07Sbellard     } else {
26168df1cd07Sbellard         /* RAM case */
26175c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
26185579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
26191e78bcc1SAlexander Graf         switch (endian) {
26201e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
26211e78bcc1SAlexander Graf             stl_le_p(ptr, val);
26221e78bcc1SAlexander Graf             break;
26231e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
26241e78bcc1SAlexander Graf             stl_be_p(ptr, val);
26251e78bcc1SAlexander Graf             break;
26261e78bcc1SAlexander Graf         default:
26278df1cd07Sbellard             stl_p(ptr, val);
26281e78bcc1SAlexander Graf             break;
26291e78bcc1SAlexander Graf         }
263051d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 4);
26318df1cd07Sbellard     }
26323a7d929eSbellard }
26338df1cd07Sbellard 
2634ab1da857SEdgar E. Iglesias void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
26351e78bcc1SAlexander Graf {
2636ab1da857SEdgar E. Iglesias     stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
26371e78bcc1SAlexander Graf }
26381e78bcc1SAlexander Graf 
2639ab1da857SEdgar E. Iglesias void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
26401e78bcc1SAlexander Graf {
2641ab1da857SEdgar E. Iglesias     stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
26421e78bcc1SAlexander Graf }
26431e78bcc1SAlexander Graf 
2644ab1da857SEdgar E. Iglesias void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
26451e78bcc1SAlexander Graf {
2646ab1da857SEdgar E. Iglesias     stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
26471e78bcc1SAlexander Graf }
26481e78bcc1SAlexander Graf 
2649aab33094Sbellard /* XXX: optimize */
2650db3be60dSEdgar E. Iglesias void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2651aab33094Sbellard {
2652aab33094Sbellard     uint8_t v = val;
2653db3be60dSEdgar E. Iglesias     address_space_rw(as, addr, &v, 1, 1);
2654aab33094Sbellard }
2655aab33094Sbellard 
2656733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
26575ce5944dSEdgar E. Iglesias static inline void stw_phys_internal(AddressSpace *as,
26585ce5944dSEdgar E. Iglesias                                      hwaddr addr, uint32_t val,
26591e78bcc1SAlexander Graf                                      enum device_endian endian)
2660aab33094Sbellard {
2661733f0b02SMichael S. Tsirkin     uint8_t *ptr;
26625c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2663149f54b5SPaolo Bonzini     hwaddr l = 2;
2664149f54b5SPaolo Bonzini     hwaddr addr1;
2665733f0b02SMichael S. Tsirkin 
26665ce5944dSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, true);
26675c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
26681e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
26691e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
26701e78bcc1SAlexander Graf             val = bswap16(val);
26711e78bcc1SAlexander Graf         }
26721e78bcc1SAlexander Graf #else
26731e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
26741e78bcc1SAlexander Graf             val = bswap16(val);
26751e78bcc1SAlexander Graf         }
26761e78bcc1SAlexander Graf #endif
26775c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 2);
2678733f0b02SMichael S. Tsirkin     } else {
2679733f0b02SMichael S. Tsirkin         /* RAM case */
26805c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2681733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
26821e78bcc1SAlexander Graf         switch (endian) {
26831e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
26841e78bcc1SAlexander Graf             stw_le_p(ptr, val);
26851e78bcc1SAlexander Graf             break;
26861e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
26871e78bcc1SAlexander Graf             stw_be_p(ptr, val);
26881e78bcc1SAlexander Graf             break;
26891e78bcc1SAlexander Graf         default:
2690733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
26911e78bcc1SAlexander Graf             break;
26921e78bcc1SAlexander Graf         }
269351d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 2);
2694733f0b02SMichael S. Tsirkin     }
2695aab33094Sbellard }
2696aab33094Sbellard 
26975ce5944dSEdgar E. Iglesias void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
26981e78bcc1SAlexander Graf {
26995ce5944dSEdgar E. Iglesias     stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
27001e78bcc1SAlexander Graf }
27011e78bcc1SAlexander Graf 
27025ce5944dSEdgar E. Iglesias void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
27031e78bcc1SAlexander Graf {
27045ce5944dSEdgar E. Iglesias     stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
27051e78bcc1SAlexander Graf }
27061e78bcc1SAlexander Graf 
27075ce5944dSEdgar E. Iglesias void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
27081e78bcc1SAlexander Graf {
27095ce5944dSEdgar E. Iglesias     stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
27101e78bcc1SAlexander Graf }
27111e78bcc1SAlexander Graf 
2712aab33094Sbellard /* XXX: optimize */
2713f606604fSEdgar E. Iglesias void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2714aab33094Sbellard {
2715aab33094Sbellard     val = tswap64(val);
2716f606604fSEdgar E. Iglesias     address_space_rw(as, addr, (void *) &val, 8, 1);
2717aab33094Sbellard }
2718aab33094Sbellard 
2719f606604fSEdgar E. Iglesias void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
27201e78bcc1SAlexander Graf {
27211e78bcc1SAlexander Graf     val = cpu_to_le64(val);
2722f606604fSEdgar E. Iglesias     address_space_rw(as, addr, (void *) &val, 8, 1);
27231e78bcc1SAlexander Graf }
27241e78bcc1SAlexander Graf 
2725f606604fSEdgar E. Iglesias void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
27261e78bcc1SAlexander Graf {
27271e78bcc1SAlexander Graf     val = cpu_to_be64(val);
2728f606604fSEdgar E. Iglesias     address_space_rw(as, addr, (void *) &val, 8, 1);
27291e78bcc1SAlexander Graf }
27301e78bcc1SAlexander Graf 
27315e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
2732f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2733b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
273413eb76e0Sbellard {
273513eb76e0Sbellard     int l;
2736a8170e5eSAvi Kivity     hwaddr phys_addr;
27379b3c35e0Sj_mayer     target_ulong page;
273813eb76e0Sbellard 
273913eb76e0Sbellard     while (len > 0) {
274013eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
2741f17ec444SAndreas Färber         phys_addr = cpu_get_phys_page_debug(cpu, page);
274213eb76e0Sbellard         /* if no physical page mapped, return an error */
274313eb76e0Sbellard         if (phys_addr == -1)
274413eb76e0Sbellard             return -1;
274513eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
274613eb76e0Sbellard         if (l > len)
274713eb76e0Sbellard             l = len;
27485e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
27492e38847bSEdgar E. Iglesias         if (is_write) {
27502e38847bSEdgar E. Iglesias             cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
27512e38847bSEdgar E. Iglesias         } else {
27522e38847bSEdgar E. Iglesias             address_space_rw(cpu->as, phys_addr, buf, l, 0);
27532e38847bSEdgar E. Iglesias         }
275413eb76e0Sbellard         len -= l;
275513eb76e0Sbellard         buf += l;
275613eb76e0Sbellard         addr += l;
275713eb76e0Sbellard     }
275813eb76e0Sbellard     return 0;
275913eb76e0Sbellard }
2760a68fe89cSPaul Brook #endif
276113eb76e0Sbellard 
27628e4a424bSBlue Swirl /*
27638e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
27648e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
27658e4a424bSBlue Swirl  */
276698ed8ecfSGreg Kurz bool target_words_bigendian(void);
276798ed8ecfSGreg Kurz bool target_words_bigendian(void)
27688e4a424bSBlue Swirl {
27698e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
27708e4a424bSBlue Swirl     return true;
27718e4a424bSBlue Swirl #else
27728e4a424bSBlue Swirl     return false;
27738e4a424bSBlue Swirl #endif
27748e4a424bSBlue Swirl }
27758e4a424bSBlue Swirl 
277676f35538SWen Congyang #ifndef CONFIG_USER_ONLY
2777a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
277876f35538SWen Congyang {
27795c8a00ceSPaolo Bonzini     MemoryRegion*mr;
2780149f54b5SPaolo Bonzini     hwaddr l = 1;
278176f35538SWen Congyang 
27825c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
2783149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
278476f35538SWen Congyang 
27855c8a00ceSPaolo Bonzini     return !(memory_region_is_ram(mr) ||
27865c8a00ceSPaolo Bonzini              memory_region_is_romd(mr));
278776f35538SWen Congyang }
2788bd2fa51fSMichael R. Hines 
2789bd2fa51fSMichael R. Hines void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2790bd2fa51fSMichael R. Hines {
2791bd2fa51fSMichael R. Hines     RAMBlock *block;
2792bd2fa51fSMichael R. Hines 
2793bd2fa51fSMichael R. Hines     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2794bd2fa51fSMichael R. Hines         func(block->host, block->offset, block->length, opaque);
2795bd2fa51fSMichael R. Hines     }
2796bd2fa51fSMichael R. Hines }
2797ec3f8c99SPeter Maydell #endif
2798