xref: /qemu/system/physmem.c (revision 6c3bff0ed8a40921464b9a07aa0fe079e860c978)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20777872e5SStefan Weil #ifndef _WIN32
21a98d49b1Sbellard #include <sys/types.h>
22d5a8f07cSbellard #include <sys/mman.h>
23d5a8f07cSbellard #endif
2454936004Sbellard 
25055403b2SStefan Weil #include "qemu-common.h"
266180a181Sbellard #include "cpu.h"
27b67d9a52Sbellard #include "tcg.h"
28b3c7724cSpbrook #include "hw/hw.h"
29cc9e98cbSAlex Williamson #include "hw/qdev.h"
301de7afc9SPaolo Bonzini #include "qemu/osdep.h"
319c17d615SPaolo Bonzini #include "sysemu/kvm.h"
322ff3de68SMarkus Armbruster #include "sysemu/sysemu.h"
330d09e41aSPaolo Bonzini #include "hw/xen/xen.h"
341de7afc9SPaolo Bonzini #include "qemu/timer.h"
351de7afc9SPaolo Bonzini #include "qemu/config-file.h"
3675a34036SAndreas Färber #include "qemu/error-report.h"
37022c62cbSPaolo Bonzini #include "exec/memory.h"
389c17d615SPaolo Bonzini #include "sysemu/dma.h"
39022c62cbSPaolo Bonzini #include "exec/address-spaces.h"
4053a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4153a5960aSpbrook #include <qemu.h>
42432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
439c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
446506e4f9SStefano Stabellini #include "trace.h"
4553a5960aSpbrook #endif
460d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
4754936004Sbellard 
48022c62cbSPaolo Bonzini #include "exec/cputlb.h"
495b6dd868SBlue Swirl #include "translate-all.h"
500cac1b66SBlue Swirl 
51022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
52220c3ebdSJuan Quintela #include "exec/ram_addr.h"
5367d95c15SAvi Kivity 
54b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
55b35ba30fSMichael S. Tsirkin 
56db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
571196be37Sths 
5899773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
59981fdf23SJuan Quintela static bool in_migration;
6094a6b54fSpbrook 
61a3161038SPaolo Bonzini RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
6262152b8aSAvi Kivity 
6362152b8aSAvi Kivity static MemoryRegion *system_memory;
64309cb471SAvi Kivity static MemoryRegion *system_io;
6562152b8aSAvi Kivity 
66f6790af6SAvi Kivity AddressSpace address_space_io;
67f6790af6SAvi Kivity AddressSpace address_space_memory;
682673a5daSAvi Kivity 
690844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
70acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
710e0df1e2SAvi Kivity 
727bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
737bd4f430SPaolo Bonzini #define RAM_PREALLOC   (1 << 0)
747bd4f430SPaolo Bonzini 
75dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */
76dbcb8981SPaolo Bonzini #define RAM_SHARED     (1 << 1)
77dbcb8981SPaolo Bonzini 
78e2eef170Spbrook #endif
799fa3e853Sbellard 
80bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
816a00d601Sbellard /* current CPU in the current thread. It is only valid inside
826a00d601Sbellard    cpu_exec() */
834917cf44SAndreas Färber DEFINE_TLS(CPUState *, current_cpu);
842e70f6efSpbrook /* 0 = Do not count executed instructions.
85bf20dc07Sths    1 = Precise instruction counting.
862e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
875708fc66SPaolo Bonzini int use_icount;
886a00d601Sbellard 
89e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
904346ae3eSAvi Kivity 
911db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
921db8abb1SPaolo Bonzini 
931db8abb1SPaolo Bonzini struct PhysPageEntry {
949736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
958b795765SMichael S. Tsirkin     uint32_t skip : 6;
969736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
978b795765SMichael S. Tsirkin     uint32_t ptr : 26;
981db8abb1SPaolo Bonzini };
991db8abb1SPaolo Bonzini 
1008b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
1018b795765SMichael S. Tsirkin 
10203f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
10357271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
10403f49957SPaolo Bonzini 
105026736ceSMichael S. Tsirkin #define P_L2_BITS 9
10603f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
10703f49957SPaolo Bonzini 
10803f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
10903f49957SPaolo Bonzini 
11003f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1110475d94fSPaolo Bonzini 
11253cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
11353cb28cbSMarcel Apfelbaum     unsigned sections_nb;
11453cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
11553cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
11653cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
11753cb28cbSMarcel Apfelbaum     Node *nodes;
11853cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
11953cb28cbSMarcel Apfelbaum } PhysPageMap;
12053cb28cbSMarcel Apfelbaum 
1211db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
1221db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1231db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1241db8abb1SPaolo Bonzini      */
1251db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
12653cb28cbSMarcel Apfelbaum     PhysPageMap map;
127acc9d80bSJan Kiszka     AddressSpace *as;
1281db8abb1SPaolo Bonzini };
1291db8abb1SPaolo Bonzini 
13090260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
13190260c6cSJan Kiszka typedef struct subpage_t {
13290260c6cSJan Kiszka     MemoryRegion iomem;
133acc9d80bSJan Kiszka     AddressSpace *as;
13490260c6cSJan Kiszka     hwaddr base;
13590260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
13690260c6cSJan Kiszka } subpage_t;
13790260c6cSJan Kiszka 
138b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
139b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1
140b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2
141b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3
1425312bd8bSAvi Kivity 
143e2eef170Spbrook static void io_mem_init(void);
14462152b8aSAvi Kivity static void memory_map_init(void);
14509daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener);
146e2eef170Spbrook 
1471ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
1486658ffb8Spbrook #endif
14954936004Sbellard 
1506d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
151d6f2ea22SAvi Kivity 
15253cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
153f7bf5461SAvi Kivity {
15453cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
15553cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
15653cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
15753cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
158f7bf5461SAvi Kivity     }
159f7bf5461SAvi Kivity }
160f7bf5461SAvi Kivity 
16153cb28cbSMarcel Apfelbaum static uint32_t phys_map_node_alloc(PhysPageMap *map)
162d6f2ea22SAvi Kivity {
163d6f2ea22SAvi Kivity     unsigned i;
1648b795765SMichael S. Tsirkin     uint32_t ret;
165d6f2ea22SAvi Kivity 
16653cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
167d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
16853cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
16903f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
17053cb28cbSMarcel Apfelbaum         map->nodes[ret][i].skip = 1;
17153cb28cbSMarcel Apfelbaum         map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
172d6f2ea22SAvi Kivity     }
173f7bf5461SAvi Kivity     return ret;
174d6f2ea22SAvi Kivity }
175d6f2ea22SAvi Kivity 
17653cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
17753cb28cbSMarcel Apfelbaum                                 hwaddr *index, hwaddr *nb, uint16_t leaf,
1782999097bSAvi Kivity                                 int level)
17992e873b9Sbellard {
180f7bf5461SAvi Kivity     PhysPageEntry *p;
181f7bf5461SAvi Kivity     int i;
18203f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
1835cd2c5b6SRichard Henderson 
1849736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
18553cb28cbSMarcel Apfelbaum         lp->ptr = phys_map_node_alloc(map);
18653cb28cbSMarcel Apfelbaum         p = map->nodes[lp->ptr];
187f7bf5461SAvi Kivity         if (level == 0) {
18803f49957SPaolo Bonzini             for (i = 0; i < P_L2_SIZE; i++) {
1899736e55bSMichael S. Tsirkin                 p[i].skip = 0;
190b41aac4fSLiu Ping Fan                 p[i].ptr = PHYS_SECTION_UNASSIGNED;
19167c4d23cSpbrook             }
19292e873b9Sbellard         }
193d6f2ea22SAvi Kivity     } else {
19453cb28cbSMarcel Apfelbaum         p = map->nodes[lp->ptr];
1954346ae3eSAvi Kivity     }
19603f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
197f7bf5461SAvi Kivity 
19803f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
19907f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2009736e55bSMichael S. Tsirkin             lp->skip = 0;
201c19e8800SAvi Kivity             lp->ptr = leaf;
20207f07b31SAvi Kivity             *index += step;
20307f07b31SAvi Kivity             *nb -= step;
204f7bf5461SAvi Kivity         } else {
20553cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2062999097bSAvi Kivity         }
2072999097bSAvi Kivity         ++lp;
208f7bf5461SAvi Kivity     }
2094346ae3eSAvi Kivity }
2105cd2c5b6SRichard Henderson 
211ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
212a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2132999097bSAvi Kivity                           uint16_t leaf)
214f7bf5461SAvi Kivity {
2152999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
21653cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
217f7bf5461SAvi Kivity 
21853cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
21992e873b9Sbellard }
22092e873b9Sbellard 
221b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
223b35ba30fSMichael S. Tsirkin  */
224b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225b35ba30fSMichael S. Tsirkin {
226b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
227b35ba30fSMichael S. Tsirkin     int valid = 0;
228b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
229b35ba30fSMichael S. Tsirkin     int i;
230b35ba30fSMichael S. Tsirkin 
231b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
232b35ba30fSMichael S. Tsirkin         return;
233b35ba30fSMichael S. Tsirkin     }
234b35ba30fSMichael S. Tsirkin 
235b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
236b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
237b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238b35ba30fSMichael S. Tsirkin             continue;
239b35ba30fSMichael S. Tsirkin         }
240b35ba30fSMichael S. Tsirkin 
241b35ba30fSMichael S. Tsirkin         valid_ptr = i;
242b35ba30fSMichael S. Tsirkin         valid++;
243b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
244b35ba30fSMichael S. Tsirkin             phys_page_compact(&p[i], nodes, compacted);
245b35ba30fSMichael S. Tsirkin         }
246b35ba30fSMichael S. Tsirkin     }
247b35ba30fSMichael S. Tsirkin 
248b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
249b35ba30fSMichael S. Tsirkin     if (valid != 1) {
250b35ba30fSMichael S. Tsirkin         return;
251b35ba30fSMichael S. Tsirkin     }
252b35ba30fSMichael S. Tsirkin 
253b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
254b35ba30fSMichael S. Tsirkin 
255b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
256b35ba30fSMichael S. Tsirkin     if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257b35ba30fSMichael S. Tsirkin         return;
258b35ba30fSMichael S. Tsirkin     }
259b35ba30fSMichael S. Tsirkin 
260b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
261b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
262b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
263b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
264b35ba30fSMichael S. Tsirkin          * should never reach here.
265b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
266b35ba30fSMichael S. Tsirkin          * change this rule.
267b35ba30fSMichael S. Tsirkin          */
268b35ba30fSMichael S. Tsirkin         lp->skip = 0;
269b35ba30fSMichael S. Tsirkin     } else {
270b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
271b35ba30fSMichael S. Tsirkin     }
272b35ba30fSMichael S. Tsirkin }
273b35ba30fSMichael S. Tsirkin 
274b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275b35ba30fSMichael S. Tsirkin {
276b35ba30fSMichael S. Tsirkin     DECLARE_BITMAP(compacted, nodes_nb);
277b35ba30fSMichael S. Tsirkin 
278b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
27953cb28cbSMarcel Apfelbaum         phys_page_compact(&d->phys_map, d->map.nodes, compacted);
280b35ba30fSMichael S. Tsirkin     }
281b35ba30fSMichael S. Tsirkin }
282b35ba30fSMichael S. Tsirkin 
28397115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
2849affd6fcSPaolo Bonzini                                            Node *nodes, MemoryRegionSection *sections)
28592e873b9Sbellard {
28631ab2b4aSAvi Kivity     PhysPageEntry *p;
28797115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
28831ab2b4aSAvi Kivity     int i;
289f1f6e3b8SAvi Kivity 
2909736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
291c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
2929affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
293f1f6e3b8SAvi Kivity         }
2949affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
29503f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
29631ab2b4aSAvi Kivity     }
297b35ba30fSMichael S. Tsirkin 
298b35ba30fSMichael S. Tsirkin     if (sections[lp.ptr].size.hi ||
299b35ba30fSMichael S. Tsirkin         range_covers_byte(sections[lp.ptr].offset_within_address_space,
300b35ba30fSMichael S. Tsirkin                           sections[lp.ptr].size.lo, addr)) {
3019affd6fcSPaolo Bonzini         return &sections[lp.ptr];
302b35ba30fSMichael S. Tsirkin     } else {
303b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
304b35ba30fSMichael S. Tsirkin     }
305f3705d53SAvi Kivity }
306f3705d53SAvi Kivity 
307e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
308e5548617SBlue Swirl {
3092a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
310e5548617SBlue Swirl         && mr != &io_mem_watch;
311e5548617SBlue Swirl }
312149f54b5SPaolo Bonzini 
313c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
31490260c6cSJan Kiszka                                                         hwaddr addr,
31590260c6cSJan Kiszka                                                         bool resolve_subpage)
3169f029603SJan Kiszka {
31790260c6cSJan Kiszka     MemoryRegionSection *section;
31890260c6cSJan Kiszka     subpage_t *subpage;
31990260c6cSJan Kiszka 
32053cb28cbSMarcel Apfelbaum     section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
32190260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
32290260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
32353cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
32490260c6cSJan Kiszka     }
32590260c6cSJan Kiszka     return section;
3269f029603SJan Kiszka }
3279f029603SJan Kiszka 
32890260c6cSJan Kiszka static MemoryRegionSection *
329c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
33090260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
331149f54b5SPaolo Bonzini {
332149f54b5SPaolo Bonzini     MemoryRegionSection *section;
333a87f3954SPaolo Bonzini     Int128 diff;
334149f54b5SPaolo Bonzini 
335c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
336149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
337149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
338149f54b5SPaolo Bonzini 
339149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
340149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
341149f54b5SPaolo Bonzini 
342149f54b5SPaolo Bonzini     diff = int128_sub(section->mr->size, int128_make64(addr));
3433752a036SPeter Maydell     *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
344149f54b5SPaolo Bonzini     return section;
345149f54b5SPaolo Bonzini }
34690260c6cSJan Kiszka 
347a87f3954SPaolo Bonzini static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348a87f3954SPaolo Bonzini {
349a87f3954SPaolo Bonzini     if (memory_region_is_ram(mr)) {
350a87f3954SPaolo Bonzini         return !(is_write && mr->readonly);
351a87f3954SPaolo Bonzini     }
352a87f3954SPaolo Bonzini     if (memory_region_is_romd(mr)) {
353a87f3954SPaolo Bonzini         return !is_write;
354a87f3954SPaolo Bonzini     }
355a87f3954SPaolo Bonzini 
356a87f3954SPaolo Bonzini     return false;
357a87f3954SPaolo Bonzini }
358a87f3954SPaolo Bonzini 
3595c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
36090260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
36190260c6cSJan Kiszka                                       bool is_write)
36290260c6cSJan Kiszka {
36330951157SAvi Kivity     IOMMUTLBEntry iotlb;
36430951157SAvi Kivity     MemoryRegionSection *section;
36530951157SAvi Kivity     MemoryRegion *mr;
36630951157SAvi Kivity     hwaddr len = *plen;
36730951157SAvi Kivity 
36830951157SAvi Kivity     for (;;) {
369a87f3954SPaolo Bonzini         section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
37030951157SAvi Kivity         mr = section->mr;
37130951157SAvi Kivity 
37230951157SAvi Kivity         if (!mr->iommu_ops) {
37330951157SAvi Kivity             break;
37430951157SAvi Kivity         }
37530951157SAvi Kivity 
3768d7b8cb9SLe Tan         iotlb = mr->iommu_ops->translate(mr, addr, is_write);
37730951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
37830951157SAvi Kivity                 | (addr & iotlb.addr_mask));
37930951157SAvi Kivity         len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
38030951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
38130951157SAvi Kivity             mr = &io_mem_unassigned;
38230951157SAvi Kivity             break;
38330951157SAvi Kivity         }
38430951157SAvi Kivity 
38530951157SAvi Kivity         as = iotlb.target_as;
38630951157SAvi Kivity     }
38730951157SAvi Kivity 
388fe680d0dSAlexey Kardashevskiy     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
389a87f3954SPaolo Bonzini         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390a87f3954SPaolo Bonzini         len = MIN(page, len);
391a87f3954SPaolo Bonzini     }
392a87f3954SPaolo Bonzini 
39330951157SAvi Kivity     *plen = len;
39430951157SAvi Kivity     *xlat = addr;
39530951157SAvi Kivity     return mr;
39690260c6cSJan Kiszka }
39790260c6cSJan Kiszka 
39890260c6cSJan Kiszka MemoryRegionSection *
39990260c6cSJan Kiszka address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
40090260c6cSJan Kiszka                                   hwaddr *plen)
40190260c6cSJan Kiszka {
40230951157SAvi Kivity     MemoryRegionSection *section;
403c7086b4aSPaolo Bonzini     section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
40430951157SAvi Kivity 
40530951157SAvi Kivity     assert(!section->mr->iommu_ops);
40630951157SAvi Kivity     return section;
40790260c6cSJan Kiszka }
4089fa3e853Sbellard #endif
409fd6ce8f6Sbellard 
410d5ab9713SJan Kiszka void cpu_exec_init_all(void)
411d5ab9713SJan Kiszka {
412d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY)
413b2a8658eSUmesh Deshpande     qemu_mutex_init(&ram_list.mutex);
414d5ab9713SJan Kiszka     memory_map_init();
415d5ab9713SJan Kiszka     io_mem_init();
416d5ab9713SJan Kiszka #endif
417d5ab9713SJan Kiszka }
418d5ab9713SJan Kiszka 
419b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
4209656f324Spbrook 
421e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
422e7f4eff7SJuan Quintela {
423259186a7SAndreas Färber     CPUState *cpu = opaque;
424e7f4eff7SJuan Quintela 
4253098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
4263098dba0Saurel32        version_id is increased. */
427259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
428c01a71c1SChristian Borntraeger     tlb_flush(cpu, 1);
4299656f324Spbrook 
4309656f324Spbrook     return 0;
4319656f324Spbrook }
432e7f4eff7SJuan Quintela 
4336c3bff0eSPavel Dovgaluk static int cpu_common_pre_load(void *opaque)
4346c3bff0eSPavel Dovgaluk {
4356c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4366c3bff0eSPavel Dovgaluk 
4376c3bff0eSPavel Dovgaluk     cpu->exception_index = 0;
4386c3bff0eSPavel Dovgaluk 
4396c3bff0eSPavel Dovgaluk     return 0;
4406c3bff0eSPavel Dovgaluk }
4416c3bff0eSPavel Dovgaluk 
4426c3bff0eSPavel Dovgaluk static bool cpu_common_exception_index_needed(void *opaque)
4436c3bff0eSPavel Dovgaluk {
4446c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4456c3bff0eSPavel Dovgaluk 
4466c3bff0eSPavel Dovgaluk     return cpu->exception_index != 0;
4476c3bff0eSPavel Dovgaluk }
4486c3bff0eSPavel Dovgaluk 
4496c3bff0eSPavel Dovgaluk static const VMStateDescription vmstate_cpu_common_exception_index = {
4506c3bff0eSPavel Dovgaluk     .name = "cpu_common/exception_index",
4516c3bff0eSPavel Dovgaluk     .version_id = 1,
4526c3bff0eSPavel Dovgaluk     .minimum_version_id = 1,
4536c3bff0eSPavel Dovgaluk     .fields = (VMStateField[]) {
4546c3bff0eSPavel Dovgaluk         VMSTATE_INT32(exception_index, CPUState),
4556c3bff0eSPavel Dovgaluk         VMSTATE_END_OF_LIST()
4566c3bff0eSPavel Dovgaluk     }
4576c3bff0eSPavel Dovgaluk };
4586c3bff0eSPavel Dovgaluk 
4591a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
460e7f4eff7SJuan Quintela     .name = "cpu_common",
461e7f4eff7SJuan Quintela     .version_id = 1,
462e7f4eff7SJuan Quintela     .minimum_version_id = 1,
4636c3bff0eSPavel Dovgaluk     .pre_load = cpu_common_pre_load,
464e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
465e7f4eff7SJuan Quintela     .fields = (VMStateField[]) {
466259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
467259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
468e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
4696c3bff0eSPavel Dovgaluk     },
4706c3bff0eSPavel Dovgaluk     .subsections = (VMStateSubsection[]) {
4716c3bff0eSPavel Dovgaluk         {
4726c3bff0eSPavel Dovgaluk             .vmsd = &vmstate_cpu_common_exception_index,
4736c3bff0eSPavel Dovgaluk             .needed = cpu_common_exception_index_needed,
4746c3bff0eSPavel Dovgaluk         } , {
4756c3bff0eSPavel Dovgaluk             /* empty */
4766c3bff0eSPavel Dovgaluk         }
477e7f4eff7SJuan Quintela     }
478e7f4eff7SJuan Quintela };
4791a1562f5SAndreas Färber 
4809656f324Spbrook #endif
4819656f324Spbrook 
48238d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
483950f1472SGlauber Costa {
484bdc44640SAndreas Färber     CPUState *cpu;
485950f1472SGlauber Costa 
486bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
48755e5c285SAndreas Färber         if (cpu->cpu_index == index) {
488bdc44640SAndreas Färber             return cpu;
48955e5c285SAndreas Färber         }
490950f1472SGlauber Costa     }
491950f1472SGlauber Costa 
492bdc44640SAndreas Färber     return NULL;
493950f1472SGlauber Costa }
494950f1472SGlauber Costa 
49509daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY)
49609daed84SEdgar E. Iglesias void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
49709daed84SEdgar E. Iglesias {
49809daed84SEdgar E. Iglesias     /* We only support one address space per cpu at the moment.  */
49909daed84SEdgar E. Iglesias     assert(cpu->as == as);
50009daed84SEdgar E. Iglesias 
50109daed84SEdgar E. Iglesias     if (cpu->tcg_as_listener) {
50209daed84SEdgar E. Iglesias         memory_listener_unregister(cpu->tcg_as_listener);
50309daed84SEdgar E. Iglesias     } else {
50409daed84SEdgar E. Iglesias         cpu->tcg_as_listener = g_new0(MemoryListener, 1);
50509daed84SEdgar E. Iglesias     }
50609daed84SEdgar E. Iglesias     cpu->tcg_as_listener->commit = tcg_commit;
50709daed84SEdgar E. Iglesias     memory_listener_register(cpu->tcg_as_listener, as);
50809daed84SEdgar E. Iglesias }
50909daed84SEdgar E. Iglesias #endif
51009daed84SEdgar E. Iglesias 
5119349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env)
512fd6ce8f6Sbellard {
5139f09e18aSAndreas Färber     CPUState *cpu = ENV_GET_CPU(env);
514b170fce3SAndreas Färber     CPUClass *cc = CPU_GET_CLASS(cpu);
515bdc44640SAndreas Färber     CPUState *some_cpu;
5166a00d601Sbellard     int cpu_index;
5176a00d601Sbellard 
518c2764719Spbrook #if defined(CONFIG_USER_ONLY)
519c2764719Spbrook     cpu_list_lock();
520c2764719Spbrook #endif
5216a00d601Sbellard     cpu_index = 0;
522bdc44640SAndreas Färber     CPU_FOREACH(some_cpu) {
5236a00d601Sbellard         cpu_index++;
5246a00d601Sbellard     }
52555e5c285SAndreas Färber     cpu->cpu_index = cpu_index;
5261b1ed8dcSAndreas Färber     cpu->numa_node = 0;
527f0c3c505SAndreas Färber     QTAILQ_INIT(&cpu->breakpoints);
528ff4700b0SAndreas Färber     QTAILQ_INIT(&cpu->watchpoints);
529dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
53009daed84SEdgar E. Iglesias     cpu->as = &address_space_memory;
5319f09e18aSAndreas Färber     cpu->thread_id = qemu_get_thread_id();
532dc7a09cfSJan Kiszka #endif
533bdc44640SAndreas Färber     QTAILQ_INSERT_TAIL(&cpus, cpu, node);
534c2764719Spbrook #if defined(CONFIG_USER_ONLY)
535c2764719Spbrook     cpu_list_unlock();
536c2764719Spbrook #endif
537e0d47944SAndreas Färber     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
538259186a7SAndreas Färber         vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
539e0d47944SAndreas Färber     }
540b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5410be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
542b3c7724cSpbrook                     cpu_save, cpu_load, env);
543b170fce3SAndreas Färber     assert(cc->vmsd == NULL);
544e0d47944SAndreas Färber     assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
545b3c7724cSpbrook #endif
546b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
547b170fce3SAndreas Färber         vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
548b170fce3SAndreas Färber     }
549fd6ce8f6Sbellard }
550fd6ce8f6Sbellard 
5511fddef4bSbellard #if defined(TARGET_HAS_ICE)
55294df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
55300b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
55494df27fdSPaul Brook {
55594df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
55694df27fdSPaul Brook }
55794df27fdSPaul Brook #else
55800b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
5591e7855a5SMax Filippov {
560e8262a1bSMax Filippov     hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
561e8262a1bSMax Filippov     if (phys != -1) {
56209daed84SEdgar E. Iglesias         tb_invalidate_phys_addr(cpu->as,
56329d8ec7bSEdgar E. Iglesias                                 phys | (pc & ~TARGET_PAGE_MASK));
564e8262a1bSMax Filippov     }
5651e7855a5SMax Filippov }
566c27004ecSbellard #endif
56794df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
568d720b93dSbellard 
569c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
57075a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
571c527ee8fSPaul Brook 
572c527ee8fSPaul Brook {
573c527ee8fSPaul Brook }
574c527ee8fSPaul Brook 
57575a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
576c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
577c527ee8fSPaul Brook {
578c527ee8fSPaul Brook     return -ENOSYS;
579c527ee8fSPaul Brook }
580c527ee8fSPaul Brook #else
5816658ffb8Spbrook /* Add a watchpoint.  */
58275a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
583a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
5846658ffb8Spbrook {
58575a34036SAndreas Färber     vaddr len_mask = ~(len - 1);
586c0ce998eSaliguori     CPUWatchpoint *wp;
5876658ffb8Spbrook 
588b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
5890dc23828SMax Filippov     if ((len & (len - 1)) || (addr & ~len_mask) ||
5900dc23828SMax Filippov             len == 0 || len > TARGET_PAGE_SIZE) {
59175a34036SAndreas Färber         error_report("tried to set invalid watchpoint at %"
59275a34036SAndreas Färber                      VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
593b4051334Saliguori         return -EINVAL;
594b4051334Saliguori     }
5957267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
5966658ffb8Spbrook 
597a1d1bb31Saliguori     wp->vaddr = addr;
598b4051334Saliguori     wp->len_mask = len_mask;
599a1d1bb31Saliguori     wp->flags = flags;
600a1d1bb31Saliguori 
6012dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
602ff4700b0SAndreas Färber     if (flags & BP_GDB) {
603ff4700b0SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
604ff4700b0SAndreas Färber     } else {
605ff4700b0SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
606ff4700b0SAndreas Färber     }
607a1d1bb31Saliguori 
60831b030d4SAndreas Färber     tlb_flush_page(cpu, addr);
609a1d1bb31Saliguori 
610a1d1bb31Saliguori     if (watchpoint)
611a1d1bb31Saliguori         *watchpoint = wp;
612a1d1bb31Saliguori     return 0;
6136658ffb8Spbrook }
6146658ffb8Spbrook 
615a1d1bb31Saliguori /* Remove a specific watchpoint.  */
61675a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
617a1d1bb31Saliguori                           int flags)
6186658ffb8Spbrook {
61975a34036SAndreas Färber     vaddr len_mask = ~(len - 1);
620a1d1bb31Saliguori     CPUWatchpoint *wp;
6216658ffb8Spbrook 
622ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
623b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
6246e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
62575a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
6266658ffb8Spbrook             return 0;
6276658ffb8Spbrook         }
6286658ffb8Spbrook     }
629a1d1bb31Saliguori     return -ENOENT;
6306658ffb8Spbrook }
6316658ffb8Spbrook 
632a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
63375a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
634a1d1bb31Saliguori {
635ff4700b0SAndreas Färber     QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
6367d03f82fSedgar_igl 
63731b030d4SAndreas Färber     tlb_flush_page(cpu, watchpoint->vaddr);
638a1d1bb31Saliguori 
6397267c094SAnthony Liguori     g_free(watchpoint);
6407d03f82fSedgar_igl }
6417d03f82fSedgar_igl 
642a1d1bb31Saliguori /* Remove all matching watchpoints.  */
64375a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
644a1d1bb31Saliguori {
645c0ce998eSaliguori     CPUWatchpoint *wp, *next;
646a1d1bb31Saliguori 
647ff4700b0SAndreas Färber     QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
64875a34036SAndreas Färber         if (wp->flags & mask) {
64975a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
65075a34036SAndreas Färber         }
651a1d1bb31Saliguori     }
652c0ce998eSaliguori }
653c527ee8fSPaul Brook #endif
654a1d1bb31Saliguori 
655a1d1bb31Saliguori /* Add a breakpoint.  */
656b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
657a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
6584c3a88a2Sbellard {
6591fddef4bSbellard #if defined(TARGET_HAS_ICE)
660c0ce998eSaliguori     CPUBreakpoint *bp;
6614c3a88a2Sbellard 
6627267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
6634c3a88a2Sbellard 
664a1d1bb31Saliguori     bp->pc = pc;
665a1d1bb31Saliguori     bp->flags = flags;
666a1d1bb31Saliguori 
6672dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
66800b941e5SAndreas Färber     if (flags & BP_GDB) {
669f0c3c505SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
67000b941e5SAndreas Färber     } else {
671f0c3c505SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
67200b941e5SAndreas Färber     }
673d720b93dSbellard 
674f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, pc);
675a1d1bb31Saliguori 
67600b941e5SAndreas Färber     if (breakpoint) {
677a1d1bb31Saliguori         *breakpoint = bp;
67800b941e5SAndreas Färber     }
6794c3a88a2Sbellard     return 0;
6804c3a88a2Sbellard #else
681a1d1bb31Saliguori     return -ENOSYS;
6824c3a88a2Sbellard #endif
6834c3a88a2Sbellard }
6844c3a88a2Sbellard 
685a1d1bb31Saliguori /* Remove a specific breakpoint.  */
686b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
687a1d1bb31Saliguori {
6887d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
689a1d1bb31Saliguori     CPUBreakpoint *bp;
690a1d1bb31Saliguori 
691f0c3c505SAndreas Färber     QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
692a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
693b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
694a1d1bb31Saliguori             return 0;
6957d03f82fSedgar_igl         }
696a1d1bb31Saliguori     }
697a1d1bb31Saliguori     return -ENOENT;
698a1d1bb31Saliguori #else
699a1d1bb31Saliguori     return -ENOSYS;
7007d03f82fSedgar_igl #endif
7017d03f82fSedgar_igl }
7027d03f82fSedgar_igl 
703a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
704b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
7054c3a88a2Sbellard {
7061fddef4bSbellard #if defined(TARGET_HAS_ICE)
707f0c3c505SAndreas Färber     QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
708f0c3c505SAndreas Färber 
709f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, breakpoint->pc);
710a1d1bb31Saliguori 
7117267c094SAnthony Liguori     g_free(breakpoint);
712a1d1bb31Saliguori #endif
713a1d1bb31Saliguori }
714a1d1bb31Saliguori 
715a1d1bb31Saliguori /* Remove all matching breakpoints. */
716b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
717a1d1bb31Saliguori {
718a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
719c0ce998eSaliguori     CPUBreakpoint *bp, *next;
720a1d1bb31Saliguori 
721f0c3c505SAndreas Färber     QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
722b3310ab3SAndreas Färber         if (bp->flags & mask) {
723b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
724b3310ab3SAndreas Färber         }
725c0ce998eSaliguori     }
7264c3a88a2Sbellard #endif
7274c3a88a2Sbellard }
7284c3a88a2Sbellard 
729c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
730c33a346eSbellard    CPU loop after each instruction */
7313825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled)
732c33a346eSbellard {
7331fddef4bSbellard #if defined(TARGET_HAS_ICE)
734ed2803daSAndreas Färber     if (cpu->singlestep_enabled != enabled) {
735ed2803daSAndreas Färber         cpu->singlestep_enabled = enabled;
736ed2803daSAndreas Färber         if (kvm_enabled()) {
73738e478ecSStefan Weil             kvm_update_guest_debug(cpu, 0);
738ed2803daSAndreas Färber         } else {
739ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
7409fa3e853Sbellard             /* XXX: only flush what is necessary */
74138e478ecSStefan Weil             CPUArchState *env = cpu->env_ptr;
7420124311eSbellard             tb_flush(env);
743c33a346eSbellard         }
744e22a25c9Saliguori     }
745c33a346eSbellard #endif
746c33a346eSbellard }
747c33a346eSbellard 
748a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...)
7497501267eSbellard {
7507501267eSbellard     va_list ap;
751493ae1f0Spbrook     va_list ap2;
7527501267eSbellard 
7537501267eSbellard     va_start(ap, fmt);
754493ae1f0Spbrook     va_copy(ap2, ap);
7557501267eSbellard     fprintf(stderr, "qemu: fatal: ");
7567501267eSbellard     vfprintf(stderr, fmt, ap);
7577501267eSbellard     fprintf(stderr, "\n");
758878096eeSAndreas Färber     cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
75993fcfe39Saliguori     if (qemu_log_enabled()) {
76093fcfe39Saliguori         qemu_log("qemu: fatal: ");
76193fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
76293fcfe39Saliguori         qemu_log("\n");
763a0762859SAndreas Färber         log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
76431b1a7b4Saliguori         qemu_log_flush();
76593fcfe39Saliguori         qemu_log_close();
766924edcaeSbalrog     }
767493ae1f0Spbrook     va_end(ap2);
768f9373291Sj_mayer     va_end(ap);
769fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
770fd052bf6SRiku Voipio     {
771fd052bf6SRiku Voipio         struct sigaction act;
772fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
773fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
774fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
775fd052bf6SRiku Voipio     }
776fd052bf6SRiku Voipio #endif
7777501267eSbellard     abort();
7787501267eSbellard }
7797501267eSbellard 
7800124311eSbellard #if !defined(CONFIG_USER_ONLY)
781041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
782041603feSPaolo Bonzini {
783041603feSPaolo Bonzini     RAMBlock *block;
784041603feSPaolo Bonzini 
785041603feSPaolo Bonzini     /* The list is protected by the iothread lock here.  */
786041603feSPaolo Bonzini     block = ram_list.mru_block;
787041603feSPaolo Bonzini     if (block && addr - block->offset < block->length) {
788041603feSPaolo Bonzini         goto found;
789041603feSPaolo Bonzini     }
790041603feSPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
791041603feSPaolo Bonzini         if (addr - block->offset < block->length) {
792041603feSPaolo Bonzini             goto found;
793041603feSPaolo Bonzini         }
794041603feSPaolo Bonzini     }
795041603feSPaolo Bonzini 
796041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
797041603feSPaolo Bonzini     abort();
798041603feSPaolo Bonzini 
799041603feSPaolo Bonzini found:
800041603feSPaolo Bonzini     ram_list.mru_block = block;
801041603feSPaolo Bonzini     return block;
802041603feSPaolo Bonzini }
803041603feSPaolo Bonzini 
804a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
8051ccde1cbSbellard {
806041603feSPaolo Bonzini     ram_addr_t start1;
807a2f4d5beSJuan Quintela     RAMBlock *block;
808a2f4d5beSJuan Quintela     ram_addr_t end;
809a2f4d5beSJuan Quintela 
810a2f4d5beSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length);
811a2f4d5beSJuan Quintela     start &= TARGET_PAGE_MASK;
812f23db169Sbellard 
813041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
814041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
815041603feSPaolo Bonzini     start1 = (uintptr_t)block->host + (start - block->offset);
816e5548617SBlue Swirl     cpu_tlb_reset_dirty_all(start1, length);
817d24981d3SJuan Quintela }
818d24981d3SJuan Quintela 
819d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
820a2f4d5beSJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
82152159192SJuan Quintela                                      unsigned client)
822d24981d3SJuan Quintela {
823d24981d3SJuan Quintela     if (length == 0)
824d24981d3SJuan Quintela         return;
825ace694ccSJuan Quintela     cpu_physical_memory_clear_dirty_range(start, length, client);
826d24981d3SJuan Quintela 
827d24981d3SJuan Quintela     if (tcg_enabled()) {
828a2f4d5beSJuan Quintela         tlb_reset_dirty_range_all(start, length);
829d24981d3SJuan Quintela     }
8301ccde1cbSbellard }
8311ccde1cbSbellard 
832981fdf23SJuan Quintela static void cpu_physical_memory_set_dirty_tracking(bool enable)
83374576198Saliguori {
83474576198Saliguori     in_migration = enable;
83574576198Saliguori }
83674576198Saliguori 
837bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu,
838e5548617SBlue Swirl                                        MemoryRegionSection *section,
839e5548617SBlue Swirl                                        target_ulong vaddr,
840149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
841e5548617SBlue Swirl                                        int prot,
842e5548617SBlue Swirl                                        target_ulong *address)
843e5548617SBlue Swirl {
844a8170e5eSAvi Kivity     hwaddr iotlb;
845e5548617SBlue Swirl     CPUWatchpoint *wp;
846e5548617SBlue Swirl 
847cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
848e5548617SBlue Swirl         /* Normal RAM.  */
849e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
850149f54b5SPaolo Bonzini             + xlat;
851e5548617SBlue Swirl         if (!section->readonly) {
852b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_NOTDIRTY;
853e5548617SBlue Swirl         } else {
854b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_ROM;
855e5548617SBlue Swirl         }
856e5548617SBlue Swirl     } else {
8571b3fb98fSEdgar E. Iglesias         iotlb = section - section->address_space->dispatch->map.sections;
858149f54b5SPaolo Bonzini         iotlb += xlat;
859e5548617SBlue Swirl     }
860e5548617SBlue Swirl 
861e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
862e5548617SBlue Swirl        watchpoint trap routines.  */
863ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
864e5548617SBlue Swirl         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
865e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
866e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
867b41aac4fSLiu Ping Fan                 iotlb = PHYS_SECTION_WATCH + paddr;
868e5548617SBlue Swirl                 *address |= TLB_MMIO;
869e5548617SBlue Swirl                 break;
870e5548617SBlue Swirl             }
871e5548617SBlue Swirl         }
872e5548617SBlue Swirl     }
873e5548617SBlue Swirl 
874e5548617SBlue Swirl     return iotlb;
875e5548617SBlue Swirl }
8769fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
87733417e70Sbellard 
878e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
8798da3ff18Spbrook 
880c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8815312bd8bSAvi Kivity                              uint16_t section);
882acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
88354688b1eSAvi Kivity 
884575ddeb4SStefan Weil static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
88591138037SMarkus Armbruster 
88691138037SMarkus Armbruster /*
88791138037SMarkus Armbruster  * Set a custom physical guest memory alloator.
88891138037SMarkus Armbruster  * Accelerators with unusual needs may need this.  Hopefully, we can
88991138037SMarkus Armbruster  * get rid of it eventually.
89091138037SMarkus Armbruster  */
891575ddeb4SStefan Weil void phys_mem_set_alloc(void *(*alloc)(size_t))
89291138037SMarkus Armbruster {
89391138037SMarkus Armbruster     phys_mem_alloc = alloc;
89491138037SMarkus Armbruster }
89591138037SMarkus Armbruster 
89653cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
89753cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
8985312bd8bSAvi Kivity {
89968f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
90068f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
90168f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
90268f3f65bSPaolo Bonzini      */
90353cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
90468f3f65bSPaolo Bonzini 
90553cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
90653cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
90753cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
90853cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
9095312bd8bSAvi Kivity     }
91053cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
911dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
91253cb28cbSMarcel Apfelbaum     return map->sections_nb++;
9135312bd8bSAvi Kivity }
9145312bd8bSAvi Kivity 
915058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
916058bc4b5SPaolo Bonzini {
917dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
918dfde4e6eSPaolo Bonzini 
919058bc4b5SPaolo Bonzini     if (mr->subpage) {
920058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
921b4fefef9SPeter Crosthwaite         object_unref(OBJECT(&subpage->iomem));
922058bc4b5SPaolo Bonzini         g_free(subpage);
923058bc4b5SPaolo Bonzini     }
924058bc4b5SPaolo Bonzini }
925058bc4b5SPaolo Bonzini 
9266092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
9275312bd8bSAvi Kivity {
9289affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
9299affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
930058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
931058bc4b5SPaolo Bonzini     }
9329affd6fcSPaolo Bonzini     g_free(map->sections);
9339affd6fcSPaolo Bonzini     g_free(map->nodes);
9345312bd8bSAvi Kivity }
9355312bd8bSAvi Kivity 
936ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
9370f0cb164SAvi Kivity {
9380f0cb164SAvi Kivity     subpage_t *subpage;
939a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
9400f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
94197115a8dSMichael S. Tsirkin     MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
94253cb28cbSMarcel Apfelbaum                                                    d->map.nodes, d->map.sections);
9430f0cb164SAvi Kivity     MemoryRegionSection subsection = {
9440f0cb164SAvi Kivity         .offset_within_address_space = base,
945052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
9460f0cb164SAvi Kivity     };
947a8170e5eSAvi Kivity     hwaddr start, end;
9480f0cb164SAvi Kivity 
949f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
9500f0cb164SAvi Kivity 
951f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
952acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
9533be91e86SEdgar E. Iglesias         subsection.address_space = d->as;
9540f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
955ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
95653cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
9570f0cb164SAvi Kivity     } else {
958f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
9590f0cb164SAvi Kivity     }
9600f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
961052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
96253cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
96353cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
9640f0cb164SAvi Kivity }
9650f0cb164SAvi Kivity 
9660f0cb164SAvi Kivity 
967052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
968052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
96933417e70Sbellard {
970a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
97153cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
972052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
973052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
974dd81124bSAvi Kivity 
975733d5ef5SPaolo Bonzini     assert(num_pages);
976733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
97733417e70Sbellard }
97833417e70Sbellard 
979ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
9800f0cb164SAvi Kivity {
98189ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
98200752703SPaolo Bonzini     AddressSpaceDispatch *d = as->next_dispatch;
98399b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
984052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
9850f0cb164SAvi Kivity 
986733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
987733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
988733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
989733d5ef5SPaolo Bonzini 
990052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
991ac1970fbSAvi Kivity         register_subpage(d, &now);
992733d5ef5SPaolo Bonzini     } else {
993052e87b0SPaolo Bonzini         now.size = int128_zero();
994733d5ef5SPaolo Bonzini     }
995052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
996052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
997052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
998052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
9990f0cb164SAvi Kivity         now = remain;
1000052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
1001733d5ef5SPaolo Bonzini             register_subpage(d, &now);
100288266249SHu Tao         } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1003052e87b0SPaolo Bonzini             now.size = page_size;
1004ac1970fbSAvi Kivity             register_subpage(d, &now);
100569b67646STyler Hall         } else {
1006052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
1007ac1970fbSAvi Kivity             register_multipage(d, &now);
100869b67646STyler Hall         }
10090f0cb164SAvi Kivity     }
10100f0cb164SAvi Kivity }
10110f0cb164SAvi Kivity 
101262a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
101362a2744cSSheng Yang {
101462a2744cSSheng Yang     if (kvm_enabled())
101562a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
101662a2744cSSheng Yang }
101762a2744cSSheng Yang 
1018b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
1019b2a8658eSUmesh Deshpande {
1020b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
1021b2a8658eSUmesh Deshpande }
1022b2a8658eSUmesh Deshpande 
1023b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
1024b2a8658eSUmesh Deshpande {
1025b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
1026b2a8658eSUmesh Deshpande }
1027b2a8658eSUmesh Deshpande 
1028e1e84ba0SMarkus Armbruster #ifdef __linux__
1029c902760fSMarcelo Tosatti 
1030c902760fSMarcelo Tosatti #include <sys/vfs.h>
1031c902760fSMarcelo Tosatti 
1032c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
1033c902760fSMarcelo Tosatti 
1034c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
1035c902760fSMarcelo Tosatti {
1036c902760fSMarcelo Tosatti     struct statfs fs;
1037c902760fSMarcelo Tosatti     int ret;
1038c902760fSMarcelo Tosatti 
1039c902760fSMarcelo Tosatti     do {
1040c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
1041c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
1042c902760fSMarcelo Tosatti 
1043c902760fSMarcelo Tosatti     if (ret != 0) {
10446adc0549SMichael Tokarev         perror(path);
1045c902760fSMarcelo Tosatti         return 0;
1046c902760fSMarcelo Tosatti     }
1047c902760fSMarcelo Tosatti 
1048c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
1049c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1050c902760fSMarcelo Tosatti 
1051c902760fSMarcelo Tosatti     return fs.f_bsize;
1052c902760fSMarcelo Tosatti }
1053c902760fSMarcelo Tosatti 
105404b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
105504b16653SAlex Williamson                             ram_addr_t memory,
10567f56e740SPaolo Bonzini                             const char *path,
10577f56e740SPaolo Bonzini                             Error **errp)
1058c902760fSMarcelo Tosatti {
1059c902760fSMarcelo Tosatti     char *filename;
10608ca761f6SPeter Feiner     char *sanitized_name;
10618ca761f6SPeter Feiner     char *c;
1062c902760fSMarcelo Tosatti     void *area;
1063c902760fSMarcelo Tosatti     int fd;
1064c902760fSMarcelo Tosatti     unsigned long hpagesize;
1065c902760fSMarcelo Tosatti 
1066c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
1067c902760fSMarcelo Tosatti     if (!hpagesize) {
1068f9a49dfaSMarcelo Tosatti         goto error;
1069c902760fSMarcelo Tosatti     }
1070c902760fSMarcelo Tosatti 
1071c902760fSMarcelo Tosatti     if (memory < hpagesize) {
1072c902760fSMarcelo Tosatti         return NULL;
1073c902760fSMarcelo Tosatti     }
1074c902760fSMarcelo Tosatti 
1075c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
10767f56e740SPaolo Bonzini         error_setg(errp,
10777f56e740SPaolo Bonzini                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1078f9a49dfaSMarcelo Tosatti         goto error;
1079c902760fSMarcelo Tosatti     }
1080c902760fSMarcelo Tosatti 
10818ca761f6SPeter Feiner     /* Make name safe to use with mkstemp by replacing '/' with '_'. */
108283234bf2SPeter Crosthwaite     sanitized_name = g_strdup(memory_region_name(block->mr));
10838ca761f6SPeter Feiner     for (c = sanitized_name; *c != '\0'; c++) {
10848ca761f6SPeter Feiner         if (*c == '/')
10858ca761f6SPeter Feiner             *c = '_';
10868ca761f6SPeter Feiner     }
10878ca761f6SPeter Feiner 
10888ca761f6SPeter Feiner     filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
10898ca761f6SPeter Feiner                                sanitized_name);
10908ca761f6SPeter Feiner     g_free(sanitized_name);
1091c902760fSMarcelo Tosatti 
1092c902760fSMarcelo Tosatti     fd = mkstemp(filename);
1093c902760fSMarcelo Tosatti     if (fd < 0) {
10947f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
10957f56e740SPaolo Bonzini                          "unable to create backing store for hugepages");
1096e4ada482SStefan Weil         g_free(filename);
1097f9a49dfaSMarcelo Tosatti         goto error;
1098c902760fSMarcelo Tosatti     }
1099c902760fSMarcelo Tosatti     unlink(filename);
1100e4ada482SStefan Weil     g_free(filename);
1101c902760fSMarcelo Tosatti 
1102c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
1103c902760fSMarcelo Tosatti 
1104c902760fSMarcelo Tosatti     /*
1105c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1106c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1107c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1108c902760fSMarcelo Tosatti      * mmap will fail.
1109c902760fSMarcelo Tosatti      */
11107f56e740SPaolo Bonzini     if (ftruncate(fd, memory)) {
1111c902760fSMarcelo Tosatti         perror("ftruncate");
11127f56e740SPaolo Bonzini     }
1113c902760fSMarcelo Tosatti 
1114dbcb8981SPaolo Bonzini     area = mmap(0, memory, PROT_READ | PROT_WRITE,
1115dbcb8981SPaolo Bonzini                 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1116dbcb8981SPaolo Bonzini                 fd, 0);
1117c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
11187f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
11197f56e740SPaolo Bonzini                          "unable to map backing store for hugepages");
1120c902760fSMarcelo Tosatti         close(fd);
1121f9a49dfaSMarcelo Tosatti         goto error;
1122c902760fSMarcelo Tosatti     }
1123ef36fa14SMarcelo Tosatti 
1124ef36fa14SMarcelo Tosatti     if (mem_prealloc) {
112538183310SPaolo Bonzini         os_mem_prealloc(fd, area, memory);
1126ef36fa14SMarcelo Tosatti     }
1127ef36fa14SMarcelo Tosatti 
112804b16653SAlex Williamson     block->fd = fd;
1129c902760fSMarcelo Tosatti     return area;
1130f9a49dfaSMarcelo Tosatti 
1131f9a49dfaSMarcelo Tosatti error:
1132f9a49dfaSMarcelo Tosatti     if (mem_prealloc) {
1133f9a49dfaSMarcelo Tosatti         exit(1);
1134f9a49dfaSMarcelo Tosatti     }
1135f9a49dfaSMarcelo Tosatti     return NULL;
1136c902760fSMarcelo Tosatti }
1137c902760fSMarcelo Tosatti #endif
1138c902760fSMarcelo Tosatti 
1139d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1140d17b5288SAlex Williamson {
114104b16653SAlex Williamson     RAMBlock *block, *next_block;
11423e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
114304b16653SAlex Williamson 
114449cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
114549cd9ac6SStefan Hajnoczi 
1146a3161038SPaolo Bonzini     if (QTAILQ_EMPTY(&ram_list.blocks))
114704b16653SAlex Williamson         return 0;
114804b16653SAlex Williamson 
1149a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1150f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
115104b16653SAlex Williamson 
115204b16653SAlex Williamson         end = block->offset + block->length;
115304b16653SAlex Williamson 
1154a3161038SPaolo Bonzini         QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
115504b16653SAlex Williamson             if (next_block->offset >= end) {
115604b16653SAlex Williamson                 next = MIN(next, next_block->offset);
115704b16653SAlex Williamson             }
115804b16653SAlex Williamson         }
115904b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
116004b16653SAlex Williamson             offset = end;
116104b16653SAlex Williamson             mingap = next - end;
116204b16653SAlex Williamson         }
116304b16653SAlex Williamson     }
11643e837b2cSAlex Williamson 
11653e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
11663e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
11673e837b2cSAlex Williamson                 (uint64_t)size);
11683e837b2cSAlex Williamson         abort();
11693e837b2cSAlex Williamson     }
11703e837b2cSAlex Williamson 
117104b16653SAlex Williamson     return offset;
117204b16653SAlex Williamson }
117304b16653SAlex Williamson 
1174652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
117504b16653SAlex Williamson {
1176d17b5288SAlex Williamson     RAMBlock *block;
1177d17b5288SAlex Williamson     ram_addr_t last = 0;
1178d17b5288SAlex Williamson 
1179a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next)
1180d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
1181d17b5288SAlex Williamson 
1182d17b5288SAlex Williamson     return last;
1183d17b5288SAlex Williamson }
1184d17b5288SAlex Williamson 
1185ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1186ddb97f1dSJason Baron {
1187ddb97f1dSJason Baron     int ret;
1188ddb97f1dSJason Baron 
1189ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
11902ff3de68SMarkus Armbruster     if (!qemu_opt_get_bool(qemu_get_machine_opts(),
11912ff3de68SMarkus Armbruster                            "dump-guest-core", true)) {
1192ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1193ddb97f1dSJason Baron         if (ret) {
1194ddb97f1dSJason Baron             perror("qemu_madvise");
1195ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1196ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1197ddb97f1dSJason Baron         }
1198ddb97f1dSJason Baron     }
1199ddb97f1dSJason Baron }
1200ddb97f1dSJason Baron 
120120cfe881SHu Tao static RAMBlock *find_ram_block(ram_addr_t addr)
120284b89d78SCam Macdonell {
120320cfe881SHu Tao     RAMBlock *block;
120484b89d78SCam Macdonell 
1205a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1206c5705a77SAvi Kivity         if (block->offset == addr) {
120720cfe881SHu Tao             return block;
1208c5705a77SAvi Kivity         }
1209c5705a77SAvi Kivity     }
121020cfe881SHu Tao 
121120cfe881SHu Tao     return NULL;
121220cfe881SHu Tao }
121320cfe881SHu Tao 
121420cfe881SHu Tao void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
121520cfe881SHu Tao {
121620cfe881SHu Tao     RAMBlock *new_block = find_ram_block(addr);
121720cfe881SHu Tao     RAMBlock *block;
121820cfe881SHu Tao 
1219c5705a77SAvi Kivity     assert(new_block);
1220c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
122184b89d78SCam Macdonell 
122209e5ab63SAnthony Liguori     if (dev) {
122309e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
122484b89d78SCam Macdonell         if (id) {
122584b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
12267267c094SAnthony Liguori             g_free(id);
122784b89d78SCam Macdonell         }
122884b89d78SCam Macdonell     }
122984b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
123084b89d78SCam Macdonell 
1231b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1232b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1233a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1234c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
123584b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
123684b89d78SCam Macdonell                     new_block->idstr);
123784b89d78SCam Macdonell             abort();
123884b89d78SCam Macdonell         }
123984b89d78SCam Macdonell     }
1240b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1241c5705a77SAvi Kivity }
1242c5705a77SAvi Kivity 
124320cfe881SHu Tao void qemu_ram_unset_idstr(ram_addr_t addr)
124420cfe881SHu Tao {
124520cfe881SHu Tao     RAMBlock *block = find_ram_block(addr);
124620cfe881SHu Tao 
124720cfe881SHu Tao     if (block) {
124820cfe881SHu Tao         memset(block->idstr, 0, sizeof(block->idstr));
124920cfe881SHu Tao     }
125020cfe881SHu Tao }
125120cfe881SHu Tao 
12528490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
12538490fc78SLuiz Capitulino {
12542ff3de68SMarkus Armbruster     if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
12558490fc78SLuiz Capitulino         /* disabled by the user */
12568490fc78SLuiz Capitulino         return 0;
12578490fc78SLuiz Capitulino     }
12588490fc78SLuiz Capitulino 
12598490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
12608490fc78SLuiz Capitulino }
12618490fc78SLuiz Capitulino 
1262e1c57ab8SPaolo Bonzini static ram_addr_t ram_block_add(RAMBlock *new_block)
1263c5705a77SAvi Kivity {
1264e1c57ab8SPaolo Bonzini     RAMBlock *block;
12652152f5caSJuan Quintela     ram_addr_t old_ram_size, new_ram_size;
12662152f5caSJuan Quintela 
12672152f5caSJuan Quintela     old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1268c5705a77SAvi Kivity 
1269b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1270b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1271e1c57ab8SPaolo Bonzini     new_block->offset = find_ram_offset(new_block->length);
1272e1c57ab8SPaolo Bonzini 
12730628c182SMarkus Armbruster     if (!new_block->host) {
1274e1c57ab8SPaolo Bonzini         if (xen_enabled()) {
1275e1c57ab8SPaolo Bonzini             xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1276e1c57ab8SPaolo Bonzini         } else {
1277e1c57ab8SPaolo Bonzini             new_block->host = phys_mem_alloc(new_block->length);
127839228250SMarkus Armbruster             if (!new_block->host) {
127939228250SMarkus Armbruster                 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
128083234bf2SPeter Crosthwaite                         memory_region_name(new_block->mr), strerror(errno));
128139228250SMarkus Armbruster                 exit(1);
128239228250SMarkus Armbruster             }
1283e1c57ab8SPaolo Bonzini             memory_try_enable_merging(new_block->host, new_block->length);
1284c902760fSMarcelo Tosatti         }
12856977dfe6SYoshiaki Tamura     }
128694a6b54fSpbrook 
1287abb26d63SPaolo Bonzini     /* Keep the list sorted from biggest to smallest block.  */
1288abb26d63SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1289abb26d63SPaolo Bonzini         if (block->length < new_block->length) {
1290abb26d63SPaolo Bonzini             break;
1291abb26d63SPaolo Bonzini         }
1292abb26d63SPaolo Bonzini     }
1293abb26d63SPaolo Bonzini     if (block) {
1294abb26d63SPaolo Bonzini         QTAILQ_INSERT_BEFORE(block, new_block, next);
1295abb26d63SPaolo Bonzini     } else {
1296abb26d63SPaolo Bonzini         QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1297abb26d63SPaolo Bonzini     }
12980d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
129994a6b54fSpbrook 
1300f798b07fSUmesh Deshpande     ram_list.version++;
1301b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1302f798b07fSUmesh Deshpande 
13032152f5caSJuan Quintela     new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
13042152f5caSJuan Quintela 
13052152f5caSJuan Quintela     if (new_ram_size > old_ram_size) {
13061ab4c8ceSJuan Quintela         int i;
13071ab4c8ceSJuan Quintela         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
13081ab4c8ceSJuan Quintela             ram_list.dirty_memory[i] =
13091ab4c8ceSJuan Quintela                 bitmap_zero_extend(ram_list.dirty_memory[i],
13101ab4c8ceSJuan Quintela                                    old_ram_size, new_ram_size);
13111ab4c8ceSJuan Quintela        }
13122152f5caSJuan Quintela     }
1313e1c57ab8SPaolo Bonzini     cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
131494a6b54fSpbrook 
1315e1c57ab8SPaolo Bonzini     qemu_ram_setup_dump(new_block->host, new_block->length);
1316e1c57ab8SPaolo Bonzini     qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1317e1c57ab8SPaolo Bonzini     qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
1318ddb97f1dSJason Baron 
1319e1c57ab8SPaolo Bonzini     if (kvm_enabled()) {
1320e1c57ab8SPaolo Bonzini         kvm_setup_guest_memory(new_block->host, new_block->length);
1321e1c57ab8SPaolo Bonzini     }
13226f0437e8SJan Kiszka 
132394a6b54fSpbrook     return new_block->offset;
132494a6b54fSpbrook }
1325e9a1ab19Sbellard 
13260b183fc8SPaolo Bonzini #ifdef __linux__
1327e1c57ab8SPaolo Bonzini ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1328dbcb8981SPaolo Bonzini                                     bool share, const char *mem_path,
13297f56e740SPaolo Bonzini                                     Error **errp)
1330e1c57ab8SPaolo Bonzini {
1331e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1332e1c57ab8SPaolo Bonzini 
1333e1c57ab8SPaolo Bonzini     if (xen_enabled()) {
13347f56e740SPaolo Bonzini         error_setg(errp, "-mem-path not supported with Xen");
13357f56e740SPaolo Bonzini         return -1;
1336e1c57ab8SPaolo Bonzini     }
1337e1c57ab8SPaolo Bonzini 
1338e1c57ab8SPaolo Bonzini     if (phys_mem_alloc != qemu_anon_ram_alloc) {
1339e1c57ab8SPaolo Bonzini         /*
1340e1c57ab8SPaolo Bonzini          * file_ram_alloc() needs to allocate just like
1341e1c57ab8SPaolo Bonzini          * phys_mem_alloc, but we haven't bothered to provide
1342e1c57ab8SPaolo Bonzini          * a hook there.
1343e1c57ab8SPaolo Bonzini          */
13447f56e740SPaolo Bonzini         error_setg(errp,
13457f56e740SPaolo Bonzini                    "-mem-path not supported with this accelerator");
13467f56e740SPaolo Bonzini         return -1;
1347e1c57ab8SPaolo Bonzini     }
1348e1c57ab8SPaolo Bonzini 
1349e1c57ab8SPaolo Bonzini     size = TARGET_PAGE_ALIGN(size);
1350e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1351e1c57ab8SPaolo Bonzini     new_block->mr = mr;
1352e1c57ab8SPaolo Bonzini     new_block->length = size;
1353dbcb8981SPaolo Bonzini     new_block->flags = share ? RAM_SHARED : 0;
13547f56e740SPaolo Bonzini     new_block->host = file_ram_alloc(new_block, size,
13557f56e740SPaolo Bonzini                                      mem_path, errp);
13567f56e740SPaolo Bonzini     if (!new_block->host) {
13577f56e740SPaolo Bonzini         g_free(new_block);
13587f56e740SPaolo Bonzini         return -1;
13597f56e740SPaolo Bonzini     }
13607f56e740SPaolo Bonzini 
1361e1c57ab8SPaolo Bonzini     return ram_block_add(new_block);
1362e1c57ab8SPaolo Bonzini }
13630b183fc8SPaolo Bonzini #endif
1364e1c57ab8SPaolo Bonzini 
1365e1c57ab8SPaolo Bonzini ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1366e1c57ab8SPaolo Bonzini                                    MemoryRegion *mr)
1367e1c57ab8SPaolo Bonzini {
1368e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1369e1c57ab8SPaolo Bonzini 
1370e1c57ab8SPaolo Bonzini     size = TARGET_PAGE_ALIGN(size);
1371e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1372e1c57ab8SPaolo Bonzini     new_block->mr = mr;
1373e1c57ab8SPaolo Bonzini     new_block->length = size;
1374e1c57ab8SPaolo Bonzini     new_block->fd = -1;
1375e1c57ab8SPaolo Bonzini     new_block->host = host;
1376e1c57ab8SPaolo Bonzini     if (host) {
13777bd4f430SPaolo Bonzini         new_block->flags |= RAM_PREALLOC;
1378e1c57ab8SPaolo Bonzini     }
1379e1c57ab8SPaolo Bonzini     return ram_block_add(new_block);
1380e1c57ab8SPaolo Bonzini }
1381e1c57ab8SPaolo Bonzini 
1382c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
13836977dfe6SYoshiaki Tamura {
1384c5705a77SAvi Kivity     return qemu_ram_alloc_from_ptr(size, NULL, mr);
13856977dfe6SYoshiaki Tamura }
13866977dfe6SYoshiaki Tamura 
13871f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
13881f2e98b6SAlex Williamson {
13891f2e98b6SAlex Williamson     RAMBlock *block;
13901f2e98b6SAlex Williamson 
1391b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1392b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1393a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
13941f2e98b6SAlex Williamson         if (addr == block->offset) {
1395a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
13960d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1397f798b07fSUmesh Deshpande             ram_list.version++;
13987267c094SAnthony Liguori             g_free(block);
1399b2a8658eSUmesh Deshpande             break;
14001f2e98b6SAlex Williamson         }
14011f2e98b6SAlex Williamson     }
1402b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
14031f2e98b6SAlex Williamson }
14041f2e98b6SAlex Williamson 
1405c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
1406e9a1ab19Sbellard {
140704b16653SAlex Williamson     RAMBlock *block;
140804b16653SAlex Williamson 
1409b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1410b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1411a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
141204b16653SAlex Williamson         if (addr == block->offset) {
1413a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
14140d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1415f798b07fSUmesh Deshpande             ram_list.version++;
14167bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1417cd19cfa2SHuang Ying                 ;
1418dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1419dfeaf2abSMarkus Armbruster                 xen_invalidate_map_cache_entry(block->host);
1420089f3f76SStefan Weil #ifndef _WIN32
14213435f395SMarkus Armbruster             } else if (block->fd >= 0) {
142204b16653SAlex Williamson                 munmap(block->host, block->length);
142304b16653SAlex Williamson                 close(block->fd);
1424089f3f76SStefan Weil #endif
142504b16653SAlex Williamson             } else {
1426e7a09b92SPaolo Bonzini                 qemu_anon_ram_free(block->host, block->length);
142704b16653SAlex Williamson             }
14287267c094SAnthony Liguori             g_free(block);
1429b2a8658eSUmesh Deshpande             break;
143004b16653SAlex Williamson         }
143104b16653SAlex Williamson     }
1432b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
143304b16653SAlex Williamson 
1434e9a1ab19Sbellard }
1435e9a1ab19Sbellard 
1436cd19cfa2SHuang Ying #ifndef _WIN32
1437cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1438cd19cfa2SHuang Ying {
1439cd19cfa2SHuang Ying     RAMBlock *block;
1440cd19cfa2SHuang Ying     ram_addr_t offset;
1441cd19cfa2SHuang Ying     int flags;
1442cd19cfa2SHuang Ying     void *area, *vaddr;
1443cd19cfa2SHuang Ying 
1444a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1445cd19cfa2SHuang Ying         offset = addr - block->offset;
1446cd19cfa2SHuang Ying         if (offset < block->length) {
1447cd19cfa2SHuang Ying             vaddr = block->host + offset;
14487bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1449cd19cfa2SHuang Ying                 ;
1450dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1451dfeaf2abSMarkus Armbruster                 abort();
1452cd19cfa2SHuang Ying             } else {
1453cd19cfa2SHuang Ying                 flags = MAP_FIXED;
1454cd19cfa2SHuang Ying                 munmap(vaddr, length);
14553435f395SMarkus Armbruster                 if (block->fd >= 0) {
1456dbcb8981SPaolo Bonzini                     flags |= (block->flags & RAM_SHARED ?
1457dbcb8981SPaolo Bonzini                               MAP_SHARED : MAP_PRIVATE);
1458cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1459cd19cfa2SHuang Ying                                 flags, block->fd, offset);
1460cd19cfa2SHuang Ying                 } else {
14612eb9fbaaSMarkus Armbruster                     /*
14622eb9fbaaSMarkus Armbruster                      * Remap needs to match alloc.  Accelerators that
14632eb9fbaaSMarkus Armbruster                      * set phys_mem_alloc never remap.  If they did,
14642eb9fbaaSMarkus Armbruster                      * we'd need a remap hook here.
14652eb9fbaaSMarkus Armbruster                      */
14662eb9fbaaSMarkus Armbruster                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
14672eb9fbaaSMarkus Armbruster 
1468cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1469cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1470cd19cfa2SHuang Ying                                 flags, -1, 0);
1471cd19cfa2SHuang Ying                 }
1472cd19cfa2SHuang Ying                 if (area != vaddr) {
1473f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1474f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1475cd19cfa2SHuang Ying                             length, addr);
1476cd19cfa2SHuang Ying                     exit(1);
1477cd19cfa2SHuang Ying                 }
14788490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1479ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1480cd19cfa2SHuang Ying             }
1481cd19cfa2SHuang Ying             return;
1482cd19cfa2SHuang Ying         }
1483cd19cfa2SHuang Ying     }
1484cd19cfa2SHuang Ying }
1485cd19cfa2SHuang Ying #endif /* !_WIN32 */
1486cd19cfa2SHuang Ying 
1487a35ba7beSPaolo Bonzini int qemu_get_ram_fd(ram_addr_t addr)
1488a35ba7beSPaolo Bonzini {
1489a35ba7beSPaolo Bonzini     RAMBlock *block = qemu_get_ram_block(addr);
1490a35ba7beSPaolo Bonzini 
1491a35ba7beSPaolo Bonzini     return block->fd;
1492a35ba7beSPaolo Bonzini }
1493a35ba7beSPaolo Bonzini 
14943fd74b84SDamjan Marion void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
14953fd74b84SDamjan Marion {
14963fd74b84SDamjan Marion     RAMBlock *block = qemu_get_ram_block(addr);
14973fd74b84SDamjan Marion 
14983fd74b84SDamjan Marion     return block->host;
14993fd74b84SDamjan Marion }
15003fd74b84SDamjan Marion 
15011b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.
15021b5ec234SPaolo Bonzini    With the exception of the softmmu code in this file, this should
15031b5ec234SPaolo Bonzini    only be used for local memory (e.g. video ram) that the device owns,
15041b5ec234SPaolo Bonzini    and knows it isn't going to access beyond the end of the block.
15051b5ec234SPaolo Bonzini 
15061b5ec234SPaolo Bonzini    It should not be used for general purpose DMA.
15071b5ec234SPaolo Bonzini    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
15081b5ec234SPaolo Bonzini  */
15091b5ec234SPaolo Bonzini void *qemu_get_ram_ptr(ram_addr_t addr)
15101b5ec234SPaolo Bonzini {
15111b5ec234SPaolo Bonzini     RAMBlock *block = qemu_get_ram_block(addr);
15121b5ec234SPaolo Bonzini 
1513868bb33fSJan Kiszka     if (xen_enabled()) {
1514432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1515432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1516712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1517432d268cSJun Nakajima          */
1518432d268cSJun Nakajima         if (block->offset == 0) {
1519e41d7c69SJan Kiszka             return xen_map_cache(addr, 0, 0);
1520432d268cSJun Nakajima         } else if (block->host == NULL) {
1521e41d7c69SJan Kiszka             block->host =
1522e41d7c69SJan Kiszka                 xen_map_cache(block->offset, block->length, 1);
1523432d268cSJun Nakajima         }
1524432d268cSJun Nakajima     }
1525f471a17eSAlex Williamson     return block->host + (addr - block->offset);
152694a6b54fSpbrook }
1527f471a17eSAlex Williamson 
152838bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
152938bee5dcSStefano Stabellini  * but takes a size argument */
1530cb85f7abSPeter Maydell static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
153138bee5dcSStefano Stabellini {
15328ab934f9SStefano Stabellini     if (*size == 0) {
15338ab934f9SStefano Stabellini         return NULL;
15348ab934f9SStefano Stabellini     }
1535868bb33fSJan Kiszka     if (xen_enabled()) {
1536e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
1537868bb33fSJan Kiszka     } else {
153838bee5dcSStefano Stabellini         RAMBlock *block;
153938bee5dcSStefano Stabellini 
1540a3161038SPaolo Bonzini         QTAILQ_FOREACH(block, &ram_list.blocks, next) {
154138bee5dcSStefano Stabellini             if (addr - block->offset < block->length) {
154238bee5dcSStefano Stabellini                 if (addr - block->offset + *size > block->length)
154338bee5dcSStefano Stabellini                     *size = block->length - addr + block->offset;
154438bee5dcSStefano Stabellini                 return block->host + (addr - block->offset);
154538bee5dcSStefano Stabellini             }
154638bee5dcSStefano Stabellini         }
154738bee5dcSStefano Stabellini 
154838bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
154938bee5dcSStefano Stabellini         abort();
155038bee5dcSStefano Stabellini     }
155138bee5dcSStefano Stabellini }
155238bee5dcSStefano Stabellini 
15537443b437SPaolo Bonzini /* Some of the softmmu routines need to translate from a host pointer
15547443b437SPaolo Bonzini    (typically a TLB entry) back to a ram offset.  */
15551b5ec234SPaolo Bonzini MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
15565579c7f3Spbrook {
155794a6b54fSpbrook     RAMBlock *block;
155894a6b54fSpbrook     uint8_t *host = ptr;
155994a6b54fSpbrook 
1560868bb33fSJan Kiszka     if (xen_enabled()) {
1561e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
15621b5ec234SPaolo Bonzini         return qemu_get_ram_block(*ram_addr)->mr;
1563712c2b41SStefano Stabellini     }
1564712c2b41SStefano Stabellini 
156523887b79SPaolo Bonzini     block = ram_list.mru_block;
156623887b79SPaolo Bonzini     if (block && block->host && host - block->host < block->length) {
156723887b79SPaolo Bonzini         goto found;
156823887b79SPaolo Bonzini     }
156923887b79SPaolo Bonzini 
1570a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1571432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1572432d268cSJun Nakajima         if (block->host == NULL) {
1573432d268cSJun Nakajima             continue;
1574432d268cSJun Nakajima         }
1575f471a17eSAlex Williamson         if (host - block->host < block->length) {
157623887b79SPaolo Bonzini             goto found;
157794a6b54fSpbrook         }
1578f471a17eSAlex Williamson     }
1579432d268cSJun Nakajima 
15801b5ec234SPaolo Bonzini     return NULL;
158123887b79SPaolo Bonzini 
158223887b79SPaolo Bonzini found:
158323887b79SPaolo Bonzini     *ram_addr = block->offset + (host - block->host);
15841b5ec234SPaolo Bonzini     return block->mr;
1585e890261fSMarcelo Tosatti }
1586f471a17eSAlex Williamson 
1587a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
15880e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
15891ccde1cbSbellard {
159052159192SJuan Quintela     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
15910e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
15923a7d929eSbellard     }
15930e0df1e2SAvi Kivity     switch (size) {
15940e0df1e2SAvi Kivity     case 1:
15955579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
15960e0df1e2SAvi Kivity         break;
15970e0df1e2SAvi Kivity     case 2:
15985579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
15990e0df1e2SAvi Kivity         break;
16000e0df1e2SAvi Kivity     case 4:
16015579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
16020e0df1e2SAvi Kivity         break;
16030e0df1e2SAvi Kivity     default:
16040e0df1e2SAvi Kivity         abort();
16050e0df1e2SAvi Kivity     }
16066886867eSPaolo Bonzini     cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
1607f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1608f23db169Sbellard        flushed */
1609a2cd8c85SJuan Quintela     if (!cpu_physical_memory_is_clean(ram_addr)) {
16104917cf44SAndreas Färber         CPUArchState *env = current_cpu->env_ptr;
161193afeadeSAndreas Färber         tlb_set_dirty(env, current_cpu->mem_io_vaddr);
16124917cf44SAndreas Färber     }
16131ccde1cbSbellard }
16141ccde1cbSbellard 
1615b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1616b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
1617b018ddf6SPaolo Bonzini {
1618b018ddf6SPaolo Bonzini     return is_write;
1619b018ddf6SPaolo Bonzini }
1620b018ddf6SPaolo Bonzini 
16210e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
16220e0df1e2SAvi Kivity     .write = notdirty_mem_write,
1623b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
16240e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
16251ccde1cbSbellard };
16261ccde1cbSbellard 
16270f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
1628b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
16290f459d16Spbrook {
163093afeadeSAndreas Färber     CPUState *cpu = current_cpu;
163193afeadeSAndreas Färber     CPUArchState *env = cpu->env_ptr;
163206d55cc1Saliguori     target_ulong pc, cs_base;
16330f459d16Spbrook     target_ulong vaddr;
1634a1d1bb31Saliguori     CPUWatchpoint *wp;
163506d55cc1Saliguori     int cpu_flags;
16360f459d16Spbrook 
1637ff4700b0SAndreas Färber     if (cpu->watchpoint_hit) {
163806d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
163906d55cc1Saliguori          * the debug interrupt so that is will trigger after the
164006d55cc1Saliguori          * current instruction. */
164193afeadeSAndreas Färber         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
164206d55cc1Saliguori         return;
164306d55cc1Saliguori     }
164493afeadeSAndreas Färber     vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1645ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1646b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
1647b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
16486e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
1649ff4700b0SAndreas Färber             if (!cpu->watchpoint_hit) {
1650ff4700b0SAndreas Färber                 cpu->watchpoint_hit = wp;
1651239c51a5SAndreas Färber                 tb_check_watchpoint(cpu);
165206d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
165327103424SAndreas Färber                     cpu->exception_index = EXCP_DEBUG;
16545638d180SAndreas Färber                     cpu_loop_exit(cpu);
165506d55cc1Saliguori                 } else {
165606d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1657648f034cSAndreas Färber                     tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
16580ea8cb88SAndreas Färber                     cpu_resume_from_signal(cpu, NULL);
16590f459d16Spbrook                 }
1660488d6577SMax Filippov             }
16616e140f28Saliguori         } else {
16626e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
16636e140f28Saliguori         }
16640f459d16Spbrook     }
16650f459d16Spbrook }
16660f459d16Spbrook 
16676658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
16686658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
16696658ffb8Spbrook    phys routines.  */
1670a8170e5eSAvi Kivity static uint64_t watch_mem_read(void *opaque, hwaddr addr,
16711ec9b909SAvi Kivity                                unsigned size)
16726658ffb8Spbrook {
16731ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
16741ec9b909SAvi Kivity     switch (size) {
16752c17449bSEdgar E. Iglesias     case 1: return ldub_phys(&address_space_memory, addr);
167641701aa4SEdgar E. Iglesias     case 2: return lduw_phys(&address_space_memory, addr);
1677fdfba1a2SEdgar E. Iglesias     case 4: return ldl_phys(&address_space_memory, addr);
16781ec9b909SAvi Kivity     default: abort();
16791ec9b909SAvi Kivity     }
16806658ffb8Spbrook }
16816658ffb8Spbrook 
1682a8170e5eSAvi Kivity static void watch_mem_write(void *opaque, hwaddr addr,
16831ec9b909SAvi Kivity                             uint64_t val, unsigned size)
16846658ffb8Spbrook {
16851ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
16861ec9b909SAvi Kivity     switch (size) {
168767364150SMax Filippov     case 1:
1688db3be60dSEdgar E. Iglesias         stb_phys(&address_space_memory, addr, val);
168967364150SMax Filippov         break;
169067364150SMax Filippov     case 2:
16915ce5944dSEdgar E. Iglesias         stw_phys(&address_space_memory, addr, val);
169267364150SMax Filippov         break;
169367364150SMax Filippov     case 4:
1694ab1da857SEdgar E. Iglesias         stl_phys(&address_space_memory, addr, val);
169567364150SMax Filippov         break;
16961ec9b909SAvi Kivity     default: abort();
16971ec9b909SAvi Kivity     }
16986658ffb8Spbrook }
16996658ffb8Spbrook 
17001ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
17011ec9b909SAvi Kivity     .read = watch_mem_read,
17021ec9b909SAvi Kivity     .write = watch_mem_write,
17031ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
17046658ffb8Spbrook };
17056658ffb8Spbrook 
1706a8170e5eSAvi Kivity static uint64_t subpage_read(void *opaque, hwaddr addr,
170770c68e44SAvi Kivity                              unsigned len)
1708db7b5426Sblueswir1 {
1709acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1710acc9d80bSJan Kiszka     uint8_t buf[4];
1711791af8c8SPaolo Bonzini 
1712db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1713016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1714acc9d80bSJan Kiszka            subpage, len, addr);
1715db7b5426Sblueswir1 #endif
1716acc9d80bSJan Kiszka     address_space_read(subpage->as, addr + subpage->base, buf, len);
1717acc9d80bSJan Kiszka     switch (len) {
1718acc9d80bSJan Kiszka     case 1:
1719acc9d80bSJan Kiszka         return ldub_p(buf);
1720acc9d80bSJan Kiszka     case 2:
1721acc9d80bSJan Kiszka         return lduw_p(buf);
1722acc9d80bSJan Kiszka     case 4:
1723acc9d80bSJan Kiszka         return ldl_p(buf);
1724acc9d80bSJan Kiszka     default:
1725acc9d80bSJan Kiszka         abort();
1726acc9d80bSJan Kiszka     }
1727db7b5426Sblueswir1 }
1728db7b5426Sblueswir1 
1729a8170e5eSAvi Kivity static void subpage_write(void *opaque, hwaddr addr,
173070c68e44SAvi Kivity                           uint64_t value, unsigned len)
1731db7b5426Sblueswir1 {
1732acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1733acc9d80bSJan Kiszka     uint8_t buf[4];
1734acc9d80bSJan Kiszka 
1735db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1736016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1737acc9d80bSJan Kiszka            " value %"PRIx64"\n",
1738acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
1739db7b5426Sblueswir1 #endif
1740acc9d80bSJan Kiszka     switch (len) {
1741acc9d80bSJan Kiszka     case 1:
1742acc9d80bSJan Kiszka         stb_p(buf, value);
1743acc9d80bSJan Kiszka         break;
1744acc9d80bSJan Kiszka     case 2:
1745acc9d80bSJan Kiszka         stw_p(buf, value);
1746acc9d80bSJan Kiszka         break;
1747acc9d80bSJan Kiszka     case 4:
1748acc9d80bSJan Kiszka         stl_p(buf, value);
1749acc9d80bSJan Kiszka         break;
1750acc9d80bSJan Kiszka     default:
1751acc9d80bSJan Kiszka         abort();
1752acc9d80bSJan Kiszka     }
1753acc9d80bSJan Kiszka     address_space_write(subpage->as, addr + subpage->base, buf, len);
1754db7b5426Sblueswir1 }
1755db7b5426Sblueswir1 
1756c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
1757016e9d62SAmos Kong                             unsigned len, bool is_write)
1758c353e4ccSPaolo Bonzini {
1759acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1760c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
1761016e9d62SAmos Kong     printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1762acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
1763c353e4ccSPaolo Bonzini #endif
1764c353e4ccSPaolo Bonzini 
1765acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
1766016e9d62SAmos Kong                                       len, is_write);
1767c353e4ccSPaolo Bonzini }
1768c353e4ccSPaolo Bonzini 
176970c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
177070c68e44SAvi Kivity     .read = subpage_read,
177170c68e44SAvi Kivity     .write = subpage_write,
1772c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
177370c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
1774db7b5426Sblueswir1 };
1775db7b5426Sblueswir1 
1776c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
17775312bd8bSAvi Kivity                              uint16_t section)
1778db7b5426Sblueswir1 {
1779db7b5426Sblueswir1     int idx, eidx;
1780db7b5426Sblueswir1 
1781db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1782db7b5426Sblueswir1         return -1;
1783db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
1784db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
1785db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1786016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1787016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
1788db7b5426Sblueswir1 #endif
1789db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
17905312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
1791db7b5426Sblueswir1     }
1792db7b5426Sblueswir1 
1793db7b5426Sblueswir1     return 0;
1794db7b5426Sblueswir1 }
1795db7b5426Sblueswir1 
1796acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1797db7b5426Sblueswir1 {
1798c227f099SAnthony Liguori     subpage_t *mmio;
1799db7b5426Sblueswir1 
18007267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
18011eec614bSaliguori 
1802acc9d80bSJan Kiszka     mmio->as = as;
1803db7b5426Sblueswir1     mmio->base = base;
18042c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1805b4fefef9SPeter Crosthwaite                           NULL, TARGET_PAGE_SIZE);
1806b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
1807db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1808016e9d62SAmos Kong     printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1809016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
1810db7b5426Sblueswir1 #endif
1811b41aac4fSLiu Ping Fan     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1812db7b5426Sblueswir1 
1813db7b5426Sblueswir1     return mmio;
1814db7b5426Sblueswir1 }
1815db7b5426Sblueswir1 
1816a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1817a656e22fSPeter Crosthwaite                               MemoryRegion *mr)
18185312bd8bSAvi Kivity {
1819a656e22fSPeter Crosthwaite     assert(as);
18205312bd8bSAvi Kivity     MemoryRegionSection section = {
1821a656e22fSPeter Crosthwaite         .address_space = as,
18225312bd8bSAvi Kivity         .mr = mr,
18235312bd8bSAvi Kivity         .offset_within_address_space = 0,
18245312bd8bSAvi Kivity         .offset_within_region = 0,
1825052e87b0SPaolo Bonzini         .size = int128_2_64(),
18265312bd8bSAvi Kivity     };
18275312bd8bSAvi Kivity 
182853cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
18295312bd8bSAvi Kivity }
18305312bd8bSAvi Kivity 
183177717094SEdgar E. Iglesias MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
1832aa102231SAvi Kivity {
183377717094SEdgar E. Iglesias     return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
1834aa102231SAvi Kivity }
1835aa102231SAvi Kivity 
1836e9179ce1SAvi Kivity static void io_mem_init(void)
1837e9179ce1SAvi Kivity {
18381f6245e5SPaolo Bonzini     memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
18392c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
18401f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
18412c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
18421f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
18432c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
18441f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
1845e9179ce1SAvi Kivity }
1846e9179ce1SAvi Kivity 
1847ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
1848ac1970fbSAvi Kivity {
184989ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
185053cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
185153cb28cbSMarcel Apfelbaum     uint16_t n;
185253cb28cbSMarcel Apfelbaum 
1853a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_unassigned);
185453cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
1855a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_notdirty);
185653cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_NOTDIRTY);
1857a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_rom);
185853cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_ROM);
1859a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_watch);
186053cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_WATCH);
186100752703SPaolo Bonzini 
18629736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
186300752703SPaolo Bonzini     d->as = as;
186400752703SPaolo Bonzini     as->next_dispatch = d;
186500752703SPaolo Bonzini }
186600752703SPaolo Bonzini 
186700752703SPaolo Bonzini static void mem_commit(MemoryListener *listener)
186800752703SPaolo Bonzini {
186900752703SPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
18700475d94fSPaolo Bonzini     AddressSpaceDispatch *cur = as->dispatch;
18710475d94fSPaolo Bonzini     AddressSpaceDispatch *next = as->next_dispatch;
1872ac1970fbSAvi Kivity 
187353cb28cbSMarcel Apfelbaum     phys_page_compact_all(next, next->map.nodes_nb);
1874b35ba30fSMichael S. Tsirkin 
18750475d94fSPaolo Bonzini     as->dispatch = next;
187653cb28cbSMarcel Apfelbaum 
187753cb28cbSMarcel Apfelbaum     if (cur) {
187853cb28cbSMarcel Apfelbaum         phys_sections_free(&cur->map);
18790475d94fSPaolo Bonzini         g_free(cur);
1880ac1970fbSAvi Kivity     }
18819affd6fcSPaolo Bonzini }
18829affd6fcSPaolo Bonzini 
18831d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
188450c1e149SAvi Kivity {
1885182735efSAndreas Färber     CPUState *cpu;
1886117712c3SAvi Kivity 
1887117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
1888117712c3SAvi Kivity        reset the modified entries */
1889117712c3SAvi Kivity     /* XXX: slow ! */
1890bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
189133bde2e1SEdgar E. Iglesias         /* FIXME: Disentangle the cpu.h circular files deps so we can
189233bde2e1SEdgar E. Iglesias            directly get the right CPU from listener.  */
189333bde2e1SEdgar E. Iglesias         if (cpu->tcg_as_listener != listener) {
189433bde2e1SEdgar E. Iglesias             continue;
189533bde2e1SEdgar E. Iglesias         }
189600c8cb0aSAndreas Färber         tlb_flush(cpu, 1);
1897117712c3SAvi Kivity     }
189850c1e149SAvi Kivity }
189950c1e149SAvi Kivity 
190093632747SAvi Kivity static void core_log_global_start(MemoryListener *listener)
190193632747SAvi Kivity {
1902981fdf23SJuan Quintela     cpu_physical_memory_set_dirty_tracking(true);
190393632747SAvi Kivity }
190493632747SAvi Kivity 
190593632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener)
190693632747SAvi Kivity {
1907981fdf23SJuan Quintela     cpu_physical_memory_set_dirty_tracking(false);
190893632747SAvi Kivity }
190993632747SAvi Kivity 
191093632747SAvi Kivity static MemoryListener core_memory_listener = {
191193632747SAvi Kivity     .log_global_start = core_log_global_start,
191293632747SAvi Kivity     .log_global_stop = core_log_global_stop,
1913ac1970fbSAvi Kivity     .priority = 1,
191493632747SAvi Kivity };
191593632747SAvi Kivity 
1916ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
1917ac1970fbSAvi Kivity {
191800752703SPaolo Bonzini     as->dispatch = NULL;
191989ae337aSPaolo Bonzini     as->dispatch_listener = (MemoryListener) {
1920ac1970fbSAvi Kivity         .begin = mem_begin,
192100752703SPaolo Bonzini         .commit = mem_commit,
1922ac1970fbSAvi Kivity         .region_add = mem_add,
1923ac1970fbSAvi Kivity         .region_nop = mem_add,
1924ac1970fbSAvi Kivity         .priority = 0,
1925ac1970fbSAvi Kivity     };
192689ae337aSPaolo Bonzini     memory_listener_register(&as->dispatch_listener, as);
1927ac1970fbSAvi Kivity }
1928ac1970fbSAvi Kivity 
192983f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
193083f3c251SAvi Kivity {
193183f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
193283f3c251SAvi Kivity 
193389ae337aSPaolo Bonzini     memory_listener_unregister(&as->dispatch_listener);
193483f3c251SAvi Kivity     g_free(d);
193583f3c251SAvi Kivity     as->dispatch = NULL;
193683f3c251SAvi Kivity }
193783f3c251SAvi Kivity 
193862152b8aSAvi Kivity static void memory_map_init(void)
193962152b8aSAvi Kivity {
19407267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
194103f49957SPaolo Bonzini 
194257271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
19437dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
1944309cb471SAvi Kivity 
19457267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
19463bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
19473bb28b72SJan Kiszka                           65536);
19487dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
194993632747SAvi Kivity 
1950f6790af6SAvi Kivity     memory_listener_register(&core_memory_listener, &address_space_memory);
19512641689aSliguang }
195262152b8aSAvi Kivity 
195362152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
195462152b8aSAvi Kivity {
195562152b8aSAvi Kivity     return system_memory;
195662152b8aSAvi Kivity }
195762152b8aSAvi Kivity 
1958309cb471SAvi Kivity MemoryRegion *get_system_io(void)
1959309cb471SAvi Kivity {
1960309cb471SAvi Kivity     return system_io;
1961309cb471SAvi Kivity }
1962309cb471SAvi Kivity 
1963e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
1964e2eef170Spbrook 
196513eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
196613eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
1967f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1968a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
196913eb76e0Sbellard {
197013eb76e0Sbellard     int l, flags;
197113eb76e0Sbellard     target_ulong page;
197253a5960aSpbrook     void * p;
197313eb76e0Sbellard 
197413eb76e0Sbellard     while (len > 0) {
197513eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
197613eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
197713eb76e0Sbellard         if (l > len)
197813eb76e0Sbellard             l = len;
197913eb76e0Sbellard         flags = page_get_flags(page);
198013eb76e0Sbellard         if (!(flags & PAGE_VALID))
1981a68fe89cSPaul Brook             return -1;
198213eb76e0Sbellard         if (is_write) {
198313eb76e0Sbellard             if (!(flags & PAGE_WRITE))
1984a68fe89cSPaul Brook                 return -1;
1985579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
198672fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1987a68fe89cSPaul Brook                 return -1;
198872fb7daaSaurel32             memcpy(p, buf, l);
198972fb7daaSaurel32             unlock_user(p, addr, l);
199013eb76e0Sbellard         } else {
199113eb76e0Sbellard             if (!(flags & PAGE_READ))
1992a68fe89cSPaul Brook                 return -1;
1993579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
199472fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1995a68fe89cSPaul Brook                 return -1;
199672fb7daaSaurel32             memcpy(buf, p, l);
19975b257578Saurel32             unlock_user(p, addr, 0);
199813eb76e0Sbellard         }
199913eb76e0Sbellard         len -= l;
200013eb76e0Sbellard         buf += l;
200113eb76e0Sbellard         addr += l;
200213eb76e0Sbellard     }
2003a68fe89cSPaul Brook     return 0;
200413eb76e0Sbellard }
20058df1cd07Sbellard 
200613eb76e0Sbellard #else
200751d7a9ebSAnthony PERARD 
2008a8170e5eSAvi Kivity static void invalidate_and_set_dirty(hwaddr addr,
2009a8170e5eSAvi Kivity                                      hwaddr length)
201051d7a9ebSAnthony PERARD {
2011a2cd8c85SJuan Quintela     if (cpu_physical_memory_is_clean(addr)) {
201251d7a9ebSAnthony PERARD         /* invalidate code */
201351d7a9ebSAnthony PERARD         tb_invalidate_phys_page_range(addr, addr + length, 0);
201451d7a9ebSAnthony PERARD         /* set dirty bit */
20156886867eSPaolo Bonzini         cpu_physical_memory_set_dirty_range_nocode(addr, length);
201651d7a9ebSAnthony PERARD     }
2017e226939dSAnthony PERARD     xen_modified_memory(addr, length);
201851d7a9ebSAnthony PERARD }
201951d7a9ebSAnthony PERARD 
202023326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
202182f2563fSPaolo Bonzini {
2022e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
202323326164SRichard Henderson 
202423326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
202523326164SRichard Henderson        otherwise specified.  */
202623326164SRichard Henderson     if (access_size_max == 0) {
202723326164SRichard Henderson         access_size_max = 4;
202882f2563fSPaolo Bonzini     }
202923326164SRichard Henderson 
203023326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
203123326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
203223326164SRichard Henderson         unsigned align_size_max = addr & -addr;
203323326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
203423326164SRichard Henderson             access_size_max = align_size_max;
203523326164SRichard Henderson         }
203623326164SRichard Henderson     }
203723326164SRichard Henderson 
203823326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
203923326164SRichard Henderson     if (l > access_size_max) {
204023326164SRichard Henderson         l = access_size_max;
204123326164SRichard Henderson     }
2042098178f2SPaolo Bonzini     if (l & (l - 1)) {
2043098178f2SPaolo Bonzini         l = 1 << (qemu_fls(l) - 1);
2044098178f2SPaolo Bonzini     }
204523326164SRichard Henderson 
204623326164SRichard Henderson     return l;
204782f2563fSPaolo Bonzini }
204882f2563fSPaolo Bonzini 
2049fd8aaa76SPaolo Bonzini bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
2050ac1970fbSAvi Kivity                       int len, bool is_write)
205113eb76e0Sbellard {
2052149f54b5SPaolo Bonzini     hwaddr l;
205313eb76e0Sbellard     uint8_t *ptr;
2054791af8c8SPaolo Bonzini     uint64_t val;
2055149f54b5SPaolo Bonzini     hwaddr addr1;
20565c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2057fd8aaa76SPaolo Bonzini     bool error = false;
205813eb76e0Sbellard 
205913eb76e0Sbellard     while (len > 0) {
206013eb76e0Sbellard         l = len;
20615c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, is_write);
206213eb76e0Sbellard 
206313eb76e0Sbellard         if (is_write) {
20645c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
20655c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
20664917cf44SAndreas Färber                 /* XXX: could force current_cpu to NULL to avoid
20676a00d601Sbellard                    potential bugs */
206823326164SRichard Henderson                 switch (l) {
206923326164SRichard Henderson                 case 8:
207023326164SRichard Henderson                     /* 64 bit write access */
207123326164SRichard Henderson                     val = ldq_p(buf);
207223326164SRichard Henderson                     error |= io_mem_write(mr, addr1, val, 8);
207323326164SRichard Henderson                     break;
207423326164SRichard Henderson                 case 4:
20751c213d19Sbellard                     /* 32 bit write access */
2076c27004ecSbellard                     val = ldl_p(buf);
20775c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 4);
207823326164SRichard Henderson                     break;
207923326164SRichard Henderson                 case 2:
20801c213d19Sbellard                     /* 16 bit write access */
2081c27004ecSbellard                     val = lduw_p(buf);
20825c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 2);
208323326164SRichard Henderson                     break;
208423326164SRichard Henderson                 case 1:
20851c213d19Sbellard                     /* 8 bit write access */
2086c27004ecSbellard                     val = ldub_p(buf);
20875c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 1);
208823326164SRichard Henderson                     break;
208923326164SRichard Henderson                 default:
209023326164SRichard Henderson                     abort();
209113eb76e0Sbellard                 }
20922bbfa05dSPaolo Bonzini             } else {
20935c8a00ceSPaolo Bonzini                 addr1 += memory_region_get_ram_addr(mr);
209413eb76e0Sbellard                 /* RAM case */
20955579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
209613eb76e0Sbellard                 memcpy(ptr, buf, l);
209751d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
20983a7d929eSbellard             }
209913eb76e0Sbellard         } else {
21005c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
210113eb76e0Sbellard                 /* I/O case */
21025c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
210323326164SRichard Henderson                 switch (l) {
210423326164SRichard Henderson                 case 8:
210523326164SRichard Henderson                     /* 64 bit read access */
210623326164SRichard Henderson                     error |= io_mem_read(mr, addr1, &val, 8);
210723326164SRichard Henderson                     stq_p(buf, val);
210823326164SRichard Henderson                     break;
210923326164SRichard Henderson                 case 4:
211013eb76e0Sbellard                     /* 32 bit read access */
21115c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 4);
2112c27004ecSbellard                     stl_p(buf, val);
211323326164SRichard Henderson                     break;
211423326164SRichard Henderson                 case 2:
211513eb76e0Sbellard                     /* 16 bit read access */
21165c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 2);
2117c27004ecSbellard                     stw_p(buf, val);
211823326164SRichard Henderson                     break;
211923326164SRichard Henderson                 case 1:
21201c213d19Sbellard                     /* 8 bit read access */
21215c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 1);
2122c27004ecSbellard                     stb_p(buf, val);
212323326164SRichard Henderson                     break;
212423326164SRichard Henderson                 default:
212523326164SRichard Henderson                     abort();
212613eb76e0Sbellard                 }
212713eb76e0Sbellard             } else {
212813eb76e0Sbellard                 /* RAM case */
21295c8a00ceSPaolo Bonzini                 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2130f3705d53SAvi Kivity                 memcpy(buf, ptr, l);
213113eb76e0Sbellard             }
213213eb76e0Sbellard         }
213313eb76e0Sbellard         len -= l;
213413eb76e0Sbellard         buf += l;
213513eb76e0Sbellard         addr += l;
213613eb76e0Sbellard     }
2137fd8aaa76SPaolo Bonzini 
2138fd8aaa76SPaolo Bonzini     return error;
213913eb76e0Sbellard }
21408df1cd07Sbellard 
2141fd8aaa76SPaolo Bonzini bool address_space_write(AddressSpace *as, hwaddr addr,
2142ac1970fbSAvi Kivity                          const uint8_t *buf, int len)
2143ac1970fbSAvi Kivity {
2144fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2145ac1970fbSAvi Kivity }
2146ac1970fbSAvi Kivity 
2147fd8aaa76SPaolo Bonzini bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2148ac1970fbSAvi Kivity {
2149fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, buf, len, false);
2150ac1970fbSAvi Kivity }
2151ac1970fbSAvi Kivity 
2152ac1970fbSAvi Kivity 
2153a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2154ac1970fbSAvi Kivity                             int len, int is_write)
2155ac1970fbSAvi Kivity {
2156fd8aaa76SPaolo Bonzini     address_space_rw(&address_space_memory, addr, buf, len, is_write);
2157ac1970fbSAvi Kivity }
2158ac1970fbSAvi Kivity 
2159582b55a9SAlexander Graf enum write_rom_type {
2160582b55a9SAlexander Graf     WRITE_DATA,
2161582b55a9SAlexander Graf     FLUSH_CACHE,
2162582b55a9SAlexander Graf };
2163582b55a9SAlexander Graf 
21642a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2165582b55a9SAlexander Graf     hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2166d0ecd2aaSbellard {
2167149f54b5SPaolo Bonzini     hwaddr l;
2168d0ecd2aaSbellard     uint8_t *ptr;
2169149f54b5SPaolo Bonzini     hwaddr addr1;
21705c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2171d0ecd2aaSbellard 
2172d0ecd2aaSbellard     while (len > 0) {
2173d0ecd2aaSbellard         l = len;
21742a221651SEdgar E. Iglesias         mr = address_space_translate(as, addr, &addr1, &l, true);
2175d0ecd2aaSbellard 
21765c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
21775c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2178d0ecd2aaSbellard             /* do nothing */
2179d0ecd2aaSbellard         } else {
21805c8a00ceSPaolo Bonzini             addr1 += memory_region_get_ram_addr(mr);
2181d0ecd2aaSbellard             /* ROM/RAM case */
21825579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
2183582b55a9SAlexander Graf             switch (type) {
2184582b55a9SAlexander Graf             case WRITE_DATA:
2185d0ecd2aaSbellard                 memcpy(ptr, buf, l);
218651d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
2187582b55a9SAlexander Graf                 break;
2188582b55a9SAlexander Graf             case FLUSH_CACHE:
2189582b55a9SAlexander Graf                 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2190582b55a9SAlexander Graf                 break;
2191582b55a9SAlexander Graf             }
2192d0ecd2aaSbellard         }
2193d0ecd2aaSbellard         len -= l;
2194d0ecd2aaSbellard         buf += l;
2195d0ecd2aaSbellard         addr += l;
2196d0ecd2aaSbellard     }
2197d0ecd2aaSbellard }
2198d0ecd2aaSbellard 
2199582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
22002a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2201582b55a9SAlexander Graf                                    const uint8_t *buf, int len)
2202582b55a9SAlexander Graf {
22032a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2204582b55a9SAlexander Graf }
2205582b55a9SAlexander Graf 
2206582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len)
2207582b55a9SAlexander Graf {
2208582b55a9SAlexander Graf     /*
2209582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
2210582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
2211582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
2212582b55a9SAlexander Graf      * the host's instruction cache at least.
2213582b55a9SAlexander Graf      */
2214582b55a9SAlexander Graf     if (tcg_enabled()) {
2215582b55a9SAlexander Graf         return;
2216582b55a9SAlexander Graf     }
2217582b55a9SAlexander Graf 
22182a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(&address_space_memory,
22192a221651SEdgar E. Iglesias                                            start, NULL, len, FLUSH_CACHE);
2220582b55a9SAlexander Graf }
2221582b55a9SAlexander Graf 
22226d16c2f8Saliguori typedef struct {
2223d3e71559SPaolo Bonzini     MemoryRegion *mr;
22246d16c2f8Saliguori     void *buffer;
2225a8170e5eSAvi Kivity     hwaddr addr;
2226a8170e5eSAvi Kivity     hwaddr len;
22276d16c2f8Saliguori } BounceBuffer;
22286d16c2f8Saliguori 
22296d16c2f8Saliguori static BounceBuffer bounce;
22306d16c2f8Saliguori 
2231ba223c29Saliguori typedef struct MapClient {
2232ba223c29Saliguori     void *opaque;
2233ba223c29Saliguori     void (*callback)(void *opaque);
223472cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2235ba223c29Saliguori } MapClient;
2236ba223c29Saliguori 
223772cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
223872cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2239ba223c29Saliguori 
2240ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2241ba223c29Saliguori {
22427267c094SAnthony Liguori     MapClient *client = g_malloc(sizeof(*client));
2243ba223c29Saliguori 
2244ba223c29Saliguori     client->opaque = opaque;
2245ba223c29Saliguori     client->callback = callback;
224672cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
2247ba223c29Saliguori     return client;
2248ba223c29Saliguori }
2249ba223c29Saliguori 
22508b9c99d9SBlue Swirl static void cpu_unregister_map_client(void *_client)
2251ba223c29Saliguori {
2252ba223c29Saliguori     MapClient *client = (MapClient *)_client;
2253ba223c29Saliguori 
225472cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
22557267c094SAnthony Liguori     g_free(client);
2256ba223c29Saliguori }
2257ba223c29Saliguori 
2258ba223c29Saliguori static void cpu_notify_map_clients(void)
2259ba223c29Saliguori {
2260ba223c29Saliguori     MapClient *client;
2261ba223c29Saliguori 
226272cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
226372cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2264ba223c29Saliguori         client->callback(client->opaque);
226534d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
2266ba223c29Saliguori     }
2267ba223c29Saliguori }
2268ba223c29Saliguori 
226951644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
227051644ab7SPaolo Bonzini {
22715c8a00ceSPaolo Bonzini     MemoryRegion *mr;
227251644ab7SPaolo Bonzini     hwaddr l, xlat;
227351644ab7SPaolo Bonzini 
227451644ab7SPaolo Bonzini     while (len > 0) {
227551644ab7SPaolo Bonzini         l = len;
22765c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
22775c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
22785c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
22795c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
228051644ab7SPaolo Bonzini                 return false;
228151644ab7SPaolo Bonzini             }
228251644ab7SPaolo Bonzini         }
228351644ab7SPaolo Bonzini 
228451644ab7SPaolo Bonzini         len -= l;
228551644ab7SPaolo Bonzini         addr += l;
228651644ab7SPaolo Bonzini     }
228751644ab7SPaolo Bonzini     return true;
228851644ab7SPaolo Bonzini }
228951644ab7SPaolo Bonzini 
22906d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
22916d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
22926d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
22936d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2294ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2295ba223c29Saliguori  * likely to succeed.
22966d16c2f8Saliguori  */
2297ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2298a8170e5eSAvi Kivity                         hwaddr addr,
2299a8170e5eSAvi Kivity                         hwaddr *plen,
2300ac1970fbSAvi Kivity                         bool is_write)
23016d16c2f8Saliguori {
2302a8170e5eSAvi Kivity     hwaddr len = *plen;
2303e3127ae0SPaolo Bonzini     hwaddr done = 0;
2304e3127ae0SPaolo Bonzini     hwaddr l, xlat, base;
2305e3127ae0SPaolo Bonzini     MemoryRegion *mr, *this_mr;
2306e3127ae0SPaolo Bonzini     ram_addr_t raddr;
23076d16c2f8Saliguori 
2308e3127ae0SPaolo Bonzini     if (len == 0) {
2309e3127ae0SPaolo Bonzini         return NULL;
2310e3127ae0SPaolo Bonzini     }
2311e3127ae0SPaolo Bonzini 
23126d16c2f8Saliguori     l = len;
23135c8a00ceSPaolo Bonzini     mr = address_space_translate(as, addr, &xlat, &l, is_write);
23145c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
2315e3127ae0SPaolo Bonzini         if (bounce.buffer) {
2316e3127ae0SPaolo Bonzini             return NULL;
23176d16c2f8Saliguori         }
2318e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
2319e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
2320e85d9db5SKevin Wolf         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
23216d16c2f8Saliguori         bounce.addr = addr;
23226d16c2f8Saliguori         bounce.len = l;
2323d3e71559SPaolo Bonzini 
2324d3e71559SPaolo Bonzini         memory_region_ref(mr);
2325d3e71559SPaolo Bonzini         bounce.mr = mr;
23266d16c2f8Saliguori         if (!is_write) {
2327ac1970fbSAvi Kivity             address_space_read(as, addr, bounce.buffer, l);
23286d16c2f8Saliguori         }
232938bee5dcSStefano Stabellini 
233038bee5dcSStefano Stabellini         *plen = l;
233138bee5dcSStefano Stabellini         return bounce.buffer;
23326d16c2f8Saliguori     }
2333e3127ae0SPaolo Bonzini 
2334e3127ae0SPaolo Bonzini     base = xlat;
2335e3127ae0SPaolo Bonzini     raddr = memory_region_get_ram_addr(mr);
2336e3127ae0SPaolo Bonzini 
2337e3127ae0SPaolo Bonzini     for (;;) {
2338e3127ae0SPaolo Bonzini         len -= l;
2339e3127ae0SPaolo Bonzini         addr += l;
2340e3127ae0SPaolo Bonzini         done += l;
2341e3127ae0SPaolo Bonzini         if (len == 0) {
2342e3127ae0SPaolo Bonzini             break;
2343e3127ae0SPaolo Bonzini         }
2344e3127ae0SPaolo Bonzini 
2345e3127ae0SPaolo Bonzini         l = len;
2346e3127ae0SPaolo Bonzini         this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2347e3127ae0SPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
2348149f54b5SPaolo Bonzini             break;
2349149f54b5SPaolo Bonzini         }
23508ab934f9SStefano Stabellini     }
23516d16c2f8Saliguori 
2352d3e71559SPaolo Bonzini     memory_region_ref(mr);
2353e3127ae0SPaolo Bonzini     *plen = done;
2354e3127ae0SPaolo Bonzini     return qemu_ram_ptr_length(raddr + base, plen);
23556d16c2f8Saliguori }
23566d16c2f8Saliguori 
2357ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
23586d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
23596d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
23606d16c2f8Saliguori  */
2361a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2362a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
23636d16c2f8Saliguori {
23646d16c2f8Saliguori     if (buffer != bounce.buffer) {
2365d3e71559SPaolo Bonzini         MemoryRegion *mr;
23667443b437SPaolo Bonzini         ram_addr_t addr1;
2367d3e71559SPaolo Bonzini 
2368d3e71559SPaolo Bonzini         mr = qemu_ram_addr_from_host(buffer, &addr1);
23691b5ec234SPaolo Bonzini         assert(mr != NULL);
2370d3e71559SPaolo Bonzini         if (is_write) {
23716886867eSPaolo Bonzini             invalidate_and_set_dirty(addr1, access_len);
23726d16c2f8Saliguori         }
2373868bb33fSJan Kiszka         if (xen_enabled()) {
2374e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
2375050a0ddfSAnthony PERARD         }
2376d3e71559SPaolo Bonzini         memory_region_unref(mr);
23776d16c2f8Saliguori         return;
23786d16c2f8Saliguori     }
23796d16c2f8Saliguori     if (is_write) {
2380ac1970fbSAvi Kivity         address_space_write(as, bounce.addr, bounce.buffer, access_len);
23816d16c2f8Saliguori     }
2382f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
23836d16c2f8Saliguori     bounce.buffer = NULL;
2384d3e71559SPaolo Bonzini     memory_region_unref(bounce.mr);
2385ba223c29Saliguori     cpu_notify_map_clients();
23866d16c2f8Saliguori }
2387d0ecd2aaSbellard 
2388a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
2389a8170e5eSAvi Kivity                               hwaddr *plen,
2390ac1970fbSAvi Kivity                               int is_write)
2391ac1970fbSAvi Kivity {
2392ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
2393ac1970fbSAvi Kivity }
2394ac1970fbSAvi Kivity 
2395a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2396a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
2397ac1970fbSAvi Kivity {
2398ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2399ac1970fbSAvi Kivity }
2400ac1970fbSAvi Kivity 
24018df1cd07Sbellard /* warning: addr must be aligned */
2402fdfba1a2SEdgar E. Iglesias static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
24031e78bcc1SAlexander Graf                                          enum device_endian endian)
24048df1cd07Sbellard {
24058df1cd07Sbellard     uint8_t *ptr;
2406791af8c8SPaolo Bonzini     uint64_t val;
24075c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2408149f54b5SPaolo Bonzini     hwaddr l = 4;
2409149f54b5SPaolo Bonzini     hwaddr addr1;
24108df1cd07Sbellard 
2411fdfba1a2SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, false);
24125c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
24138df1cd07Sbellard         /* I/O case */
24145c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 4);
24151e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
24161e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
24171e78bcc1SAlexander Graf             val = bswap32(val);
24181e78bcc1SAlexander Graf         }
24191e78bcc1SAlexander Graf #else
24201e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
24211e78bcc1SAlexander Graf             val = bswap32(val);
24221e78bcc1SAlexander Graf         }
24231e78bcc1SAlexander Graf #endif
24248df1cd07Sbellard     } else {
24258df1cd07Sbellard         /* RAM case */
24265c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
242706ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2428149f54b5SPaolo Bonzini                                + addr1);
24291e78bcc1SAlexander Graf         switch (endian) {
24301e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
24311e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
24321e78bcc1SAlexander Graf             break;
24331e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
24341e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
24351e78bcc1SAlexander Graf             break;
24361e78bcc1SAlexander Graf         default:
24378df1cd07Sbellard             val = ldl_p(ptr);
24381e78bcc1SAlexander Graf             break;
24391e78bcc1SAlexander Graf         }
24408df1cd07Sbellard     }
24418df1cd07Sbellard     return val;
24428df1cd07Sbellard }
24438df1cd07Sbellard 
2444fdfba1a2SEdgar E. Iglesias uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
24451e78bcc1SAlexander Graf {
2446fdfba1a2SEdgar E. Iglesias     return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
24471e78bcc1SAlexander Graf }
24481e78bcc1SAlexander Graf 
2449fdfba1a2SEdgar E. Iglesias uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
24501e78bcc1SAlexander Graf {
2451fdfba1a2SEdgar E. Iglesias     return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
24521e78bcc1SAlexander Graf }
24531e78bcc1SAlexander Graf 
2454fdfba1a2SEdgar E. Iglesias uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
24551e78bcc1SAlexander Graf {
2456fdfba1a2SEdgar E. Iglesias     return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
24571e78bcc1SAlexander Graf }
24581e78bcc1SAlexander Graf 
245984b7b8e7Sbellard /* warning: addr must be aligned */
24602c17449bSEdgar E. Iglesias static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
24611e78bcc1SAlexander Graf                                          enum device_endian endian)
246284b7b8e7Sbellard {
246384b7b8e7Sbellard     uint8_t *ptr;
246484b7b8e7Sbellard     uint64_t val;
24655c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2466149f54b5SPaolo Bonzini     hwaddr l = 8;
2467149f54b5SPaolo Bonzini     hwaddr addr1;
246884b7b8e7Sbellard 
24692c17449bSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2470149f54b5SPaolo Bonzini                                  false);
24715c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
247284b7b8e7Sbellard         /* I/O case */
24735c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 8);
2474968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
2475968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
2476968a5627SPaolo Bonzini             val = bswap64(val);
2477968a5627SPaolo Bonzini         }
2478968a5627SPaolo Bonzini #else
2479968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
2480968a5627SPaolo Bonzini             val = bswap64(val);
2481968a5627SPaolo Bonzini         }
2482968a5627SPaolo Bonzini #endif
248384b7b8e7Sbellard     } else {
248484b7b8e7Sbellard         /* RAM case */
24855c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
248606ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2487149f54b5SPaolo Bonzini                                + addr1);
24881e78bcc1SAlexander Graf         switch (endian) {
24891e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
24901e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
24911e78bcc1SAlexander Graf             break;
24921e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
24931e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
24941e78bcc1SAlexander Graf             break;
24951e78bcc1SAlexander Graf         default:
249684b7b8e7Sbellard             val = ldq_p(ptr);
24971e78bcc1SAlexander Graf             break;
24981e78bcc1SAlexander Graf         }
249984b7b8e7Sbellard     }
250084b7b8e7Sbellard     return val;
250184b7b8e7Sbellard }
250284b7b8e7Sbellard 
25032c17449bSEdgar E. Iglesias uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
25041e78bcc1SAlexander Graf {
25052c17449bSEdgar E. Iglesias     return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
25061e78bcc1SAlexander Graf }
25071e78bcc1SAlexander Graf 
25082c17449bSEdgar E. Iglesias uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
25091e78bcc1SAlexander Graf {
25102c17449bSEdgar E. Iglesias     return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
25111e78bcc1SAlexander Graf }
25121e78bcc1SAlexander Graf 
25132c17449bSEdgar E. Iglesias uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
25141e78bcc1SAlexander Graf {
25152c17449bSEdgar E. Iglesias     return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
25161e78bcc1SAlexander Graf }
25171e78bcc1SAlexander Graf 
2518aab33094Sbellard /* XXX: optimize */
25192c17449bSEdgar E. Iglesias uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2520aab33094Sbellard {
2521aab33094Sbellard     uint8_t val;
25222c17449bSEdgar E. Iglesias     address_space_rw(as, addr, &val, 1, 0);
2523aab33094Sbellard     return val;
2524aab33094Sbellard }
2525aab33094Sbellard 
2526733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
252741701aa4SEdgar E. Iglesias static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
25281e78bcc1SAlexander Graf                                           enum device_endian endian)
2529aab33094Sbellard {
2530733f0b02SMichael S. Tsirkin     uint8_t *ptr;
2531733f0b02SMichael S. Tsirkin     uint64_t val;
25325c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2533149f54b5SPaolo Bonzini     hwaddr l = 2;
2534149f54b5SPaolo Bonzini     hwaddr addr1;
2535733f0b02SMichael S. Tsirkin 
253641701aa4SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2537149f54b5SPaolo Bonzini                                  false);
25385c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
2539733f0b02SMichael S. Tsirkin         /* I/O case */
25405c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 2);
25411e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
25421e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
25431e78bcc1SAlexander Graf             val = bswap16(val);
25441e78bcc1SAlexander Graf         }
25451e78bcc1SAlexander Graf #else
25461e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
25471e78bcc1SAlexander Graf             val = bswap16(val);
25481e78bcc1SAlexander Graf         }
25491e78bcc1SAlexander Graf #endif
2550733f0b02SMichael S. Tsirkin     } else {
2551733f0b02SMichael S. Tsirkin         /* RAM case */
25525c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
255306ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2554149f54b5SPaolo Bonzini                                + addr1);
25551e78bcc1SAlexander Graf         switch (endian) {
25561e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
25571e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
25581e78bcc1SAlexander Graf             break;
25591e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
25601e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
25611e78bcc1SAlexander Graf             break;
25621e78bcc1SAlexander Graf         default:
2563733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
25641e78bcc1SAlexander Graf             break;
25651e78bcc1SAlexander Graf         }
2566733f0b02SMichael S. Tsirkin     }
2567733f0b02SMichael S. Tsirkin     return val;
2568aab33094Sbellard }
2569aab33094Sbellard 
257041701aa4SEdgar E. Iglesias uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
25711e78bcc1SAlexander Graf {
257241701aa4SEdgar E. Iglesias     return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
25731e78bcc1SAlexander Graf }
25741e78bcc1SAlexander Graf 
257541701aa4SEdgar E. Iglesias uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
25761e78bcc1SAlexander Graf {
257741701aa4SEdgar E. Iglesias     return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
25781e78bcc1SAlexander Graf }
25791e78bcc1SAlexander Graf 
258041701aa4SEdgar E. Iglesias uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
25811e78bcc1SAlexander Graf {
258241701aa4SEdgar E. Iglesias     return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
25831e78bcc1SAlexander Graf }
25841e78bcc1SAlexander Graf 
25858df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
25868df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
25878df1cd07Sbellard    bits are used to track modified PTEs */
25882198a121SEdgar E. Iglesias void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
25898df1cd07Sbellard {
25908df1cd07Sbellard     uint8_t *ptr;
25915c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2592149f54b5SPaolo Bonzini     hwaddr l = 4;
2593149f54b5SPaolo Bonzini     hwaddr addr1;
25948df1cd07Sbellard 
25952198a121SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2596149f54b5SPaolo Bonzini                                  true);
25975c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
25985c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
25998df1cd07Sbellard     } else {
26005c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
26015579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
26028df1cd07Sbellard         stl_p(ptr, val);
260374576198Saliguori 
260474576198Saliguori         if (unlikely(in_migration)) {
2605a2cd8c85SJuan Quintela             if (cpu_physical_memory_is_clean(addr1)) {
260674576198Saliguori                 /* invalidate code */
260774576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
260874576198Saliguori                 /* set dirty bit */
26096886867eSPaolo Bonzini                 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
261074576198Saliguori             }
261174576198Saliguori         }
26128df1cd07Sbellard     }
26138df1cd07Sbellard }
26148df1cd07Sbellard 
26158df1cd07Sbellard /* warning: addr must be aligned */
2616ab1da857SEdgar E. Iglesias static inline void stl_phys_internal(AddressSpace *as,
2617ab1da857SEdgar E. Iglesias                                      hwaddr addr, uint32_t val,
26181e78bcc1SAlexander Graf                                      enum device_endian endian)
26198df1cd07Sbellard {
26208df1cd07Sbellard     uint8_t *ptr;
26215c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2622149f54b5SPaolo Bonzini     hwaddr l = 4;
2623149f54b5SPaolo Bonzini     hwaddr addr1;
26248df1cd07Sbellard 
2625ab1da857SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2626149f54b5SPaolo Bonzini                                  true);
26275c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
26281e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
26291e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
26301e78bcc1SAlexander Graf             val = bswap32(val);
26311e78bcc1SAlexander Graf         }
26321e78bcc1SAlexander Graf #else
26331e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
26341e78bcc1SAlexander Graf             val = bswap32(val);
26351e78bcc1SAlexander Graf         }
26361e78bcc1SAlexander Graf #endif
26375c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
26388df1cd07Sbellard     } else {
26398df1cd07Sbellard         /* RAM case */
26405c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
26415579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
26421e78bcc1SAlexander Graf         switch (endian) {
26431e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
26441e78bcc1SAlexander Graf             stl_le_p(ptr, val);
26451e78bcc1SAlexander Graf             break;
26461e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
26471e78bcc1SAlexander Graf             stl_be_p(ptr, val);
26481e78bcc1SAlexander Graf             break;
26491e78bcc1SAlexander Graf         default:
26508df1cd07Sbellard             stl_p(ptr, val);
26511e78bcc1SAlexander Graf             break;
26521e78bcc1SAlexander Graf         }
265351d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 4);
26548df1cd07Sbellard     }
26553a7d929eSbellard }
26568df1cd07Sbellard 
2657ab1da857SEdgar E. Iglesias void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
26581e78bcc1SAlexander Graf {
2659ab1da857SEdgar E. Iglesias     stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
26601e78bcc1SAlexander Graf }
26611e78bcc1SAlexander Graf 
2662ab1da857SEdgar E. Iglesias void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
26631e78bcc1SAlexander Graf {
2664ab1da857SEdgar E. Iglesias     stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
26651e78bcc1SAlexander Graf }
26661e78bcc1SAlexander Graf 
2667ab1da857SEdgar E. Iglesias void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
26681e78bcc1SAlexander Graf {
2669ab1da857SEdgar E. Iglesias     stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
26701e78bcc1SAlexander Graf }
26711e78bcc1SAlexander Graf 
2672aab33094Sbellard /* XXX: optimize */
2673db3be60dSEdgar E. Iglesias void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2674aab33094Sbellard {
2675aab33094Sbellard     uint8_t v = val;
2676db3be60dSEdgar E. Iglesias     address_space_rw(as, addr, &v, 1, 1);
2677aab33094Sbellard }
2678aab33094Sbellard 
2679733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
26805ce5944dSEdgar E. Iglesias static inline void stw_phys_internal(AddressSpace *as,
26815ce5944dSEdgar E. Iglesias                                      hwaddr addr, uint32_t val,
26821e78bcc1SAlexander Graf                                      enum device_endian endian)
2683aab33094Sbellard {
2684733f0b02SMichael S. Tsirkin     uint8_t *ptr;
26855c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2686149f54b5SPaolo Bonzini     hwaddr l = 2;
2687149f54b5SPaolo Bonzini     hwaddr addr1;
2688733f0b02SMichael S. Tsirkin 
26895ce5944dSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, true);
26905c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
26911e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
26921e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
26931e78bcc1SAlexander Graf             val = bswap16(val);
26941e78bcc1SAlexander Graf         }
26951e78bcc1SAlexander Graf #else
26961e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
26971e78bcc1SAlexander Graf             val = bswap16(val);
26981e78bcc1SAlexander Graf         }
26991e78bcc1SAlexander Graf #endif
27005c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 2);
2701733f0b02SMichael S. Tsirkin     } else {
2702733f0b02SMichael S. Tsirkin         /* RAM case */
27035c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2704733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
27051e78bcc1SAlexander Graf         switch (endian) {
27061e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
27071e78bcc1SAlexander Graf             stw_le_p(ptr, val);
27081e78bcc1SAlexander Graf             break;
27091e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
27101e78bcc1SAlexander Graf             stw_be_p(ptr, val);
27111e78bcc1SAlexander Graf             break;
27121e78bcc1SAlexander Graf         default:
2713733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
27141e78bcc1SAlexander Graf             break;
27151e78bcc1SAlexander Graf         }
271651d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 2);
2717733f0b02SMichael S. Tsirkin     }
2718aab33094Sbellard }
2719aab33094Sbellard 
27205ce5944dSEdgar E. Iglesias void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
27211e78bcc1SAlexander Graf {
27225ce5944dSEdgar E. Iglesias     stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
27231e78bcc1SAlexander Graf }
27241e78bcc1SAlexander Graf 
27255ce5944dSEdgar E. Iglesias void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
27261e78bcc1SAlexander Graf {
27275ce5944dSEdgar E. Iglesias     stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
27281e78bcc1SAlexander Graf }
27291e78bcc1SAlexander Graf 
27305ce5944dSEdgar E. Iglesias void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
27311e78bcc1SAlexander Graf {
27325ce5944dSEdgar E. Iglesias     stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
27331e78bcc1SAlexander Graf }
27341e78bcc1SAlexander Graf 
2735aab33094Sbellard /* XXX: optimize */
2736f606604fSEdgar E. Iglesias void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2737aab33094Sbellard {
2738aab33094Sbellard     val = tswap64(val);
2739f606604fSEdgar E. Iglesias     address_space_rw(as, addr, (void *) &val, 8, 1);
2740aab33094Sbellard }
2741aab33094Sbellard 
2742f606604fSEdgar E. Iglesias void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
27431e78bcc1SAlexander Graf {
27441e78bcc1SAlexander Graf     val = cpu_to_le64(val);
2745f606604fSEdgar E. Iglesias     address_space_rw(as, addr, (void *) &val, 8, 1);
27461e78bcc1SAlexander Graf }
27471e78bcc1SAlexander Graf 
2748f606604fSEdgar E. Iglesias void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
27491e78bcc1SAlexander Graf {
27501e78bcc1SAlexander Graf     val = cpu_to_be64(val);
2751f606604fSEdgar E. Iglesias     address_space_rw(as, addr, (void *) &val, 8, 1);
27521e78bcc1SAlexander Graf }
27531e78bcc1SAlexander Graf 
27545e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
2755f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2756b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
275713eb76e0Sbellard {
275813eb76e0Sbellard     int l;
2759a8170e5eSAvi Kivity     hwaddr phys_addr;
27609b3c35e0Sj_mayer     target_ulong page;
276113eb76e0Sbellard 
276213eb76e0Sbellard     while (len > 0) {
276313eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
2764f17ec444SAndreas Färber         phys_addr = cpu_get_phys_page_debug(cpu, page);
276513eb76e0Sbellard         /* if no physical page mapped, return an error */
276613eb76e0Sbellard         if (phys_addr == -1)
276713eb76e0Sbellard             return -1;
276813eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
276913eb76e0Sbellard         if (l > len)
277013eb76e0Sbellard             l = len;
27715e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
27722e38847bSEdgar E. Iglesias         if (is_write) {
27732e38847bSEdgar E. Iglesias             cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
27742e38847bSEdgar E. Iglesias         } else {
27752e38847bSEdgar E. Iglesias             address_space_rw(cpu->as, phys_addr, buf, l, 0);
27762e38847bSEdgar E. Iglesias         }
277713eb76e0Sbellard         len -= l;
277813eb76e0Sbellard         buf += l;
277913eb76e0Sbellard         addr += l;
278013eb76e0Sbellard     }
278113eb76e0Sbellard     return 0;
278213eb76e0Sbellard }
2783a68fe89cSPaul Brook #endif
278413eb76e0Sbellard 
27858e4a424bSBlue Swirl /*
27868e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
27878e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
27888e4a424bSBlue Swirl  */
278998ed8ecfSGreg Kurz bool target_words_bigendian(void);
279098ed8ecfSGreg Kurz bool target_words_bigendian(void)
27918e4a424bSBlue Swirl {
27928e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
27938e4a424bSBlue Swirl     return true;
27948e4a424bSBlue Swirl #else
27958e4a424bSBlue Swirl     return false;
27968e4a424bSBlue Swirl #endif
27978e4a424bSBlue Swirl }
27988e4a424bSBlue Swirl 
279976f35538SWen Congyang #ifndef CONFIG_USER_ONLY
2800a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
280176f35538SWen Congyang {
28025c8a00ceSPaolo Bonzini     MemoryRegion*mr;
2803149f54b5SPaolo Bonzini     hwaddr l = 1;
280476f35538SWen Congyang 
28055c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
2806149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
280776f35538SWen Congyang 
28085c8a00ceSPaolo Bonzini     return !(memory_region_is_ram(mr) ||
28095c8a00ceSPaolo Bonzini              memory_region_is_romd(mr));
281076f35538SWen Congyang }
2811bd2fa51fSMichael R. Hines 
2812bd2fa51fSMichael R. Hines void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2813bd2fa51fSMichael R. Hines {
2814bd2fa51fSMichael R. Hines     RAMBlock *block;
2815bd2fa51fSMichael R. Hines 
2816bd2fa51fSMichael R. Hines     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2817bd2fa51fSMichael R. Hines         func(block->host, block->offset, block->length, opaque);
2818bd2fa51fSMichael R. Hines     }
2819bd2fa51fSMichael R. Hines }
2820ec3f8c99SPeter Maydell #endif
2821