xref: /qemu/system/physmem.c (revision 651a5bc03705102de519ebf079a40ecc1da991db)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20777872e5SStefan Weil #ifndef _WIN32
21a98d49b1Sbellard #include <sys/types.h>
22d5a8f07cSbellard #include <sys/mman.h>
23d5a8f07cSbellard #endif
2454936004Sbellard 
25055403b2SStefan Weil #include "qemu-common.h"
266180a181Sbellard #include "cpu.h"
27b67d9a52Sbellard #include "tcg.h"
28b3c7724cSpbrook #include "hw/hw.h"
294485bd26SMichael S. Tsirkin #if !defined(CONFIG_USER_ONLY)
3047c8ca53SMarcel Apfelbaum #include "hw/boards.h"
314485bd26SMichael S. Tsirkin #endif
32cc9e98cbSAlex Williamson #include "hw/qdev.h"
331de7afc9SPaolo Bonzini #include "qemu/osdep.h"
349c17d615SPaolo Bonzini #include "sysemu/kvm.h"
352ff3de68SMarkus Armbruster #include "sysemu/sysemu.h"
360d09e41aSPaolo Bonzini #include "hw/xen/xen.h"
371de7afc9SPaolo Bonzini #include "qemu/timer.h"
381de7afc9SPaolo Bonzini #include "qemu/config-file.h"
3975a34036SAndreas Färber #include "qemu/error-report.h"
40022c62cbSPaolo Bonzini #include "exec/memory.h"
419c17d615SPaolo Bonzini #include "sysemu/dma.h"
42022c62cbSPaolo Bonzini #include "exec/address-spaces.h"
4353a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4453a5960aSpbrook #include <qemu.h>
45432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
469c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
476506e4f9SStefano Stabellini #include "trace.h"
4853a5960aSpbrook #endif
490d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
500dc3f44aSMike Day #include "qemu/rcu_queue.h"
514840f10eSJan Kiszka #include "qemu/main-loop.h"
525b6dd868SBlue Swirl #include "translate-all.h"
537615936eSPavel Dovgalyuk #include "sysemu/replay.h"
540cac1b66SBlue Swirl 
55022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
56220c3ebdSJuan Quintela #include "exec/ram_addr.h"
5767d95c15SAvi Kivity 
58b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
59794e8f30SMichael S. Tsirkin #ifndef _WIN32
60794e8f30SMichael S. Tsirkin #include "qemu/mmap-alloc.h"
61794e8f30SMichael S. Tsirkin #endif
62b35ba30fSMichael S. Tsirkin 
63db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
641196be37Sths 
6599773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
660dc3f44aSMike Day /* ram_list is read under rcu_read_lock()/rcu_read_unlock().  Writes
670dc3f44aSMike Day  * are protected by the ramlist lock.
680dc3f44aSMike Day  */
690d53d9feSMike Day RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
7062152b8aSAvi Kivity 
7162152b8aSAvi Kivity static MemoryRegion *system_memory;
72309cb471SAvi Kivity static MemoryRegion *system_io;
7362152b8aSAvi Kivity 
74f6790af6SAvi Kivity AddressSpace address_space_io;
75f6790af6SAvi Kivity AddressSpace address_space_memory;
762673a5daSAvi Kivity 
770844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
78acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
790e0df1e2SAvi Kivity 
807bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
817bd4f430SPaolo Bonzini #define RAM_PREALLOC   (1 << 0)
827bd4f430SPaolo Bonzini 
83dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */
84dbcb8981SPaolo Bonzini #define RAM_SHARED     (1 << 1)
85dbcb8981SPaolo Bonzini 
8662be4e3aSMichael S. Tsirkin /* Only a portion of RAM (used_length) is actually used, and migrated.
8762be4e3aSMichael S. Tsirkin  * This used_length size can change across reboots.
8862be4e3aSMichael S. Tsirkin  */
8962be4e3aSMichael S. Tsirkin #define RAM_RESIZEABLE (1 << 2)
9062be4e3aSMichael S. Tsirkin 
91e2eef170Spbrook #endif
929fa3e853Sbellard 
93bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
946a00d601Sbellard /* current CPU in the current thread. It is only valid inside
956a00d601Sbellard    cpu_exec() */
96f240eb6fSPaolo Bonzini __thread CPUState *current_cpu;
972e70f6efSpbrook /* 0 = Do not count executed instructions.
98bf20dc07Sths    1 = Precise instruction counting.
992e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1005708fc66SPaolo Bonzini int use_icount;
1016a00d601Sbellard 
102e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1034346ae3eSAvi Kivity 
1041db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
1051db8abb1SPaolo Bonzini 
1061db8abb1SPaolo Bonzini struct PhysPageEntry {
1079736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
1088b795765SMichael S. Tsirkin     uint32_t skip : 6;
1099736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
1108b795765SMichael S. Tsirkin     uint32_t ptr : 26;
1111db8abb1SPaolo Bonzini };
1121db8abb1SPaolo Bonzini 
1138b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
1148b795765SMichael S. Tsirkin 
11503f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
11657271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
11703f49957SPaolo Bonzini 
118026736ceSMichael S. Tsirkin #define P_L2_BITS 9
11903f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
12003f49957SPaolo Bonzini 
12103f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
12203f49957SPaolo Bonzini 
12303f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1240475d94fSPaolo Bonzini 
12553cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
12679e2b9aeSPaolo Bonzini     struct rcu_head rcu;
12779e2b9aeSPaolo Bonzini 
12853cb28cbSMarcel Apfelbaum     unsigned sections_nb;
12953cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
13053cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
13153cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
13253cb28cbSMarcel Apfelbaum     Node *nodes;
13353cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
13453cb28cbSMarcel Apfelbaum } PhysPageMap;
13553cb28cbSMarcel Apfelbaum 
1361db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
13779e2b9aeSPaolo Bonzini     struct rcu_head rcu;
13879e2b9aeSPaolo Bonzini 
1391db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1401db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1411db8abb1SPaolo Bonzini      */
1421db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
14353cb28cbSMarcel Apfelbaum     PhysPageMap map;
144acc9d80bSJan Kiszka     AddressSpace *as;
1451db8abb1SPaolo Bonzini };
1461db8abb1SPaolo Bonzini 
14790260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
14890260c6cSJan Kiszka typedef struct subpage_t {
14990260c6cSJan Kiszka     MemoryRegion iomem;
150acc9d80bSJan Kiszka     AddressSpace *as;
15190260c6cSJan Kiszka     hwaddr base;
15290260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
15390260c6cSJan Kiszka } subpage_t;
15490260c6cSJan Kiszka 
155b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
156b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1
157b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2
158b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3
1595312bd8bSAvi Kivity 
160e2eef170Spbrook static void io_mem_init(void);
16162152b8aSAvi Kivity static void memory_map_init(void);
16209daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener);
163e2eef170Spbrook 
1641ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
16532857f4dSPeter Maydell 
16632857f4dSPeter Maydell /**
16732857f4dSPeter Maydell  * CPUAddressSpace: all the information a CPU needs about an AddressSpace
16832857f4dSPeter Maydell  * @cpu: the CPU whose AddressSpace this is
16932857f4dSPeter Maydell  * @as: the AddressSpace itself
17032857f4dSPeter Maydell  * @memory_dispatch: its dispatch pointer (cached, RCU protected)
17132857f4dSPeter Maydell  * @tcg_as_listener: listener for tracking changes to the AddressSpace
17232857f4dSPeter Maydell  */
17332857f4dSPeter Maydell struct CPUAddressSpace {
17432857f4dSPeter Maydell     CPUState *cpu;
17532857f4dSPeter Maydell     AddressSpace *as;
17632857f4dSPeter Maydell     struct AddressSpaceDispatch *memory_dispatch;
17732857f4dSPeter Maydell     MemoryListener tcg_as_listener;
17832857f4dSPeter Maydell };
17932857f4dSPeter Maydell 
1806658ffb8Spbrook #endif
18154936004Sbellard 
1826d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
183d6f2ea22SAvi Kivity 
18453cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
185f7bf5461SAvi Kivity {
18653cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
18753cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
18853cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
18953cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
190f7bf5461SAvi Kivity     }
191f7bf5461SAvi Kivity }
192f7bf5461SAvi Kivity 
193db94604bSPaolo Bonzini static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
194d6f2ea22SAvi Kivity {
195d6f2ea22SAvi Kivity     unsigned i;
1968b795765SMichael S. Tsirkin     uint32_t ret;
197db94604bSPaolo Bonzini     PhysPageEntry e;
198db94604bSPaolo Bonzini     PhysPageEntry *p;
199d6f2ea22SAvi Kivity 
20053cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
201db94604bSPaolo Bonzini     p = map->nodes[ret];
202d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
20353cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
204db94604bSPaolo Bonzini 
205db94604bSPaolo Bonzini     e.skip = leaf ? 0 : 1;
206db94604bSPaolo Bonzini     e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
20703f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
208db94604bSPaolo Bonzini         memcpy(&p[i], &e, sizeof(e));
209d6f2ea22SAvi Kivity     }
210f7bf5461SAvi Kivity     return ret;
211d6f2ea22SAvi Kivity }
212d6f2ea22SAvi Kivity 
21353cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
21453cb28cbSMarcel Apfelbaum                                 hwaddr *index, hwaddr *nb, uint16_t leaf,
2152999097bSAvi Kivity                                 int level)
21692e873b9Sbellard {
217f7bf5461SAvi Kivity     PhysPageEntry *p;
21803f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
2195cd2c5b6SRichard Henderson 
2209736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
221db94604bSPaolo Bonzini         lp->ptr = phys_map_node_alloc(map, level == 0);
222db94604bSPaolo Bonzini     }
22353cb28cbSMarcel Apfelbaum     p = map->nodes[lp->ptr];
22403f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
225f7bf5461SAvi Kivity 
22603f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
22707f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2289736e55bSMichael S. Tsirkin             lp->skip = 0;
229c19e8800SAvi Kivity             lp->ptr = leaf;
23007f07b31SAvi Kivity             *index += step;
23107f07b31SAvi Kivity             *nb -= step;
232f7bf5461SAvi Kivity         } else {
23353cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2342999097bSAvi Kivity         }
2352999097bSAvi Kivity         ++lp;
236f7bf5461SAvi Kivity     }
2374346ae3eSAvi Kivity }
2385cd2c5b6SRichard Henderson 
239ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
240a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2412999097bSAvi Kivity                           uint16_t leaf)
242f7bf5461SAvi Kivity {
2432999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
24453cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
245f7bf5461SAvi Kivity 
24653cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
24792e873b9Sbellard }
24892e873b9Sbellard 
249b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
251b35ba30fSMichael S. Tsirkin  */
252b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253b35ba30fSMichael S. Tsirkin {
254b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
255b35ba30fSMichael S. Tsirkin     int valid = 0;
256b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
257b35ba30fSMichael S. Tsirkin     int i;
258b35ba30fSMichael S. Tsirkin 
259b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
260b35ba30fSMichael S. Tsirkin         return;
261b35ba30fSMichael S. Tsirkin     }
262b35ba30fSMichael S. Tsirkin 
263b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
264b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
265b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266b35ba30fSMichael S. Tsirkin             continue;
267b35ba30fSMichael S. Tsirkin         }
268b35ba30fSMichael S. Tsirkin 
269b35ba30fSMichael S. Tsirkin         valid_ptr = i;
270b35ba30fSMichael S. Tsirkin         valid++;
271b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
272b35ba30fSMichael S. Tsirkin             phys_page_compact(&p[i], nodes, compacted);
273b35ba30fSMichael S. Tsirkin         }
274b35ba30fSMichael S. Tsirkin     }
275b35ba30fSMichael S. Tsirkin 
276b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
277b35ba30fSMichael S. Tsirkin     if (valid != 1) {
278b35ba30fSMichael S. Tsirkin         return;
279b35ba30fSMichael S. Tsirkin     }
280b35ba30fSMichael S. Tsirkin 
281b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
282b35ba30fSMichael S. Tsirkin 
283b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
284b35ba30fSMichael S. Tsirkin     if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285b35ba30fSMichael S. Tsirkin         return;
286b35ba30fSMichael S. Tsirkin     }
287b35ba30fSMichael S. Tsirkin 
288b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
289b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
290b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
291b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
292b35ba30fSMichael S. Tsirkin          * should never reach here.
293b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
294b35ba30fSMichael S. Tsirkin          * change this rule.
295b35ba30fSMichael S. Tsirkin          */
296b35ba30fSMichael S. Tsirkin         lp->skip = 0;
297b35ba30fSMichael S. Tsirkin     } else {
298b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
299b35ba30fSMichael S. Tsirkin     }
300b35ba30fSMichael S. Tsirkin }
301b35ba30fSMichael S. Tsirkin 
302b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303b35ba30fSMichael S. Tsirkin {
304b35ba30fSMichael S. Tsirkin     DECLARE_BITMAP(compacted, nodes_nb);
305b35ba30fSMichael S. Tsirkin 
306b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
30753cb28cbSMarcel Apfelbaum         phys_page_compact(&d->phys_map, d->map.nodes, compacted);
308b35ba30fSMichael S. Tsirkin     }
309b35ba30fSMichael S. Tsirkin }
310b35ba30fSMichael S. Tsirkin 
31197115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
3129affd6fcSPaolo Bonzini                                            Node *nodes, MemoryRegionSection *sections)
31392e873b9Sbellard {
31431ab2b4aSAvi Kivity     PhysPageEntry *p;
31597115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
31631ab2b4aSAvi Kivity     int i;
317f1f6e3b8SAvi Kivity 
3189736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
319c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
3209affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
321f1f6e3b8SAvi Kivity         }
3229affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
32303f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
32431ab2b4aSAvi Kivity     }
325b35ba30fSMichael S. Tsirkin 
326b35ba30fSMichael S. Tsirkin     if (sections[lp.ptr].size.hi ||
327b35ba30fSMichael S. Tsirkin         range_covers_byte(sections[lp.ptr].offset_within_address_space,
328b35ba30fSMichael S. Tsirkin                           sections[lp.ptr].size.lo, addr)) {
3299affd6fcSPaolo Bonzini         return &sections[lp.ptr];
330b35ba30fSMichael S. Tsirkin     } else {
331b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
332b35ba30fSMichael S. Tsirkin     }
333f3705d53SAvi Kivity }
334f3705d53SAvi Kivity 
335e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
336e5548617SBlue Swirl {
3372a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
338e5548617SBlue Swirl         && mr != &io_mem_watch;
339e5548617SBlue Swirl }
340149f54b5SPaolo Bonzini 
34179e2b9aeSPaolo Bonzini /* Called from RCU critical section */
342c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
34390260c6cSJan Kiszka                                                         hwaddr addr,
34490260c6cSJan Kiszka                                                         bool resolve_subpage)
3459f029603SJan Kiszka {
34690260c6cSJan Kiszka     MemoryRegionSection *section;
34790260c6cSJan Kiszka     subpage_t *subpage;
34890260c6cSJan Kiszka 
34953cb28cbSMarcel Apfelbaum     section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
35090260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
35190260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
35253cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
35390260c6cSJan Kiszka     }
35490260c6cSJan Kiszka     return section;
3559f029603SJan Kiszka }
3569f029603SJan Kiszka 
35779e2b9aeSPaolo Bonzini /* Called from RCU critical section */
35890260c6cSJan Kiszka static MemoryRegionSection *
359c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
36090260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
361149f54b5SPaolo Bonzini {
362149f54b5SPaolo Bonzini     MemoryRegionSection *section;
363965eb2fcSPaolo Bonzini     MemoryRegion *mr;
364a87f3954SPaolo Bonzini     Int128 diff;
365149f54b5SPaolo Bonzini 
366c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
367149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
368149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
369149f54b5SPaolo Bonzini 
370149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
371149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
372149f54b5SPaolo Bonzini 
373965eb2fcSPaolo Bonzini     mr = section->mr;
374b242e0e0SPaolo Bonzini 
375b242e0e0SPaolo Bonzini     /* MMIO registers can be expected to perform full-width accesses based only
376b242e0e0SPaolo Bonzini      * on their address, without considering adjacent registers that could
377b242e0e0SPaolo Bonzini      * decode to completely different MemoryRegions.  When such registers
378b242e0e0SPaolo Bonzini      * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379b242e0e0SPaolo Bonzini      * regions overlap wildly.  For this reason we cannot clamp the accesses
380b242e0e0SPaolo Bonzini      * here.
381b242e0e0SPaolo Bonzini      *
382b242e0e0SPaolo Bonzini      * If the length is small (as is the case for address_space_ldl/stl),
383b242e0e0SPaolo Bonzini      * everything works fine.  If the incoming length is large, however,
384b242e0e0SPaolo Bonzini      * the caller really has to do the clamping through memory_access_size.
385b242e0e0SPaolo Bonzini      */
386965eb2fcSPaolo Bonzini     if (memory_region_is_ram(mr)) {
387e4a511f8SPaolo Bonzini         diff = int128_sub(section->size, int128_make64(addr));
3883752a036SPeter Maydell         *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
389965eb2fcSPaolo Bonzini     }
390149f54b5SPaolo Bonzini     return section;
391149f54b5SPaolo Bonzini }
39290260c6cSJan Kiszka 
39341063e1eSPaolo Bonzini /* Called from RCU critical section */
3945c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
39590260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
39690260c6cSJan Kiszka                                       bool is_write)
39790260c6cSJan Kiszka {
39830951157SAvi Kivity     IOMMUTLBEntry iotlb;
39930951157SAvi Kivity     MemoryRegionSection *section;
40030951157SAvi Kivity     MemoryRegion *mr;
40130951157SAvi Kivity 
40230951157SAvi Kivity     for (;;) {
40379e2b9aeSPaolo Bonzini         AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
40479e2b9aeSPaolo Bonzini         section = address_space_translate_internal(d, addr, &addr, plen, true);
40530951157SAvi Kivity         mr = section->mr;
40630951157SAvi Kivity 
40730951157SAvi Kivity         if (!mr->iommu_ops) {
40830951157SAvi Kivity             break;
40930951157SAvi Kivity         }
41030951157SAvi Kivity 
4118d7b8cb9SLe Tan         iotlb = mr->iommu_ops->translate(mr, addr, is_write);
41230951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
41330951157SAvi Kivity                 | (addr & iotlb.addr_mask));
41423820dbfSPeter Crosthwaite         *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
41530951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
41630951157SAvi Kivity             mr = &io_mem_unassigned;
41730951157SAvi Kivity             break;
41830951157SAvi Kivity         }
41930951157SAvi Kivity 
42030951157SAvi Kivity         as = iotlb.target_as;
42130951157SAvi Kivity     }
42230951157SAvi Kivity 
423fe680d0dSAlexey Kardashevskiy     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
424a87f3954SPaolo Bonzini         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
42523820dbfSPeter Crosthwaite         *plen = MIN(page, *plen);
426a87f3954SPaolo Bonzini     }
427a87f3954SPaolo Bonzini 
42830951157SAvi Kivity     *xlat = addr;
42930951157SAvi Kivity     return mr;
43090260c6cSJan Kiszka }
43190260c6cSJan Kiszka 
43279e2b9aeSPaolo Bonzini /* Called from RCU critical section */
43390260c6cSJan Kiszka MemoryRegionSection *
434d7898cdaSPeter Maydell address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
4359d82b5a7SPaolo Bonzini                                   hwaddr *xlat, hwaddr *plen)
43690260c6cSJan Kiszka {
43730951157SAvi Kivity     MemoryRegionSection *section;
438d7898cdaSPeter Maydell     AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
439d7898cdaSPeter Maydell 
440d7898cdaSPeter Maydell     section = address_space_translate_internal(d, addr, xlat, plen, false);
44130951157SAvi Kivity 
44230951157SAvi Kivity     assert(!section->mr->iommu_ops);
44330951157SAvi Kivity     return section;
44490260c6cSJan Kiszka }
4459fa3e853Sbellard #endif
446fd6ce8f6Sbellard 
447b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
4489656f324Spbrook 
449e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
450e7f4eff7SJuan Quintela {
451259186a7SAndreas Färber     CPUState *cpu = opaque;
452e7f4eff7SJuan Quintela 
4533098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
4543098dba0Saurel32        version_id is increased. */
455259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
456c01a71c1SChristian Borntraeger     tlb_flush(cpu, 1);
4579656f324Spbrook 
4589656f324Spbrook     return 0;
4599656f324Spbrook }
460e7f4eff7SJuan Quintela 
4616c3bff0eSPavel Dovgaluk static int cpu_common_pre_load(void *opaque)
4626c3bff0eSPavel Dovgaluk {
4636c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4646c3bff0eSPavel Dovgaluk 
465adee6424SPaolo Bonzini     cpu->exception_index = -1;
4666c3bff0eSPavel Dovgaluk 
4676c3bff0eSPavel Dovgaluk     return 0;
4686c3bff0eSPavel Dovgaluk }
4696c3bff0eSPavel Dovgaluk 
4706c3bff0eSPavel Dovgaluk static bool cpu_common_exception_index_needed(void *opaque)
4716c3bff0eSPavel Dovgaluk {
4726c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4736c3bff0eSPavel Dovgaluk 
474adee6424SPaolo Bonzini     return tcg_enabled() && cpu->exception_index != -1;
4756c3bff0eSPavel Dovgaluk }
4766c3bff0eSPavel Dovgaluk 
4776c3bff0eSPavel Dovgaluk static const VMStateDescription vmstate_cpu_common_exception_index = {
4786c3bff0eSPavel Dovgaluk     .name = "cpu_common/exception_index",
4796c3bff0eSPavel Dovgaluk     .version_id = 1,
4806c3bff0eSPavel Dovgaluk     .minimum_version_id = 1,
4815cd8cadaSJuan Quintela     .needed = cpu_common_exception_index_needed,
4826c3bff0eSPavel Dovgaluk     .fields = (VMStateField[]) {
4836c3bff0eSPavel Dovgaluk         VMSTATE_INT32(exception_index, CPUState),
4846c3bff0eSPavel Dovgaluk         VMSTATE_END_OF_LIST()
4856c3bff0eSPavel Dovgaluk     }
4866c3bff0eSPavel Dovgaluk };
4876c3bff0eSPavel Dovgaluk 
488bac05aa9SAndrey Smetanin static bool cpu_common_crash_occurred_needed(void *opaque)
489bac05aa9SAndrey Smetanin {
490bac05aa9SAndrey Smetanin     CPUState *cpu = opaque;
491bac05aa9SAndrey Smetanin 
492bac05aa9SAndrey Smetanin     return cpu->crash_occurred;
493bac05aa9SAndrey Smetanin }
494bac05aa9SAndrey Smetanin 
495bac05aa9SAndrey Smetanin static const VMStateDescription vmstate_cpu_common_crash_occurred = {
496bac05aa9SAndrey Smetanin     .name = "cpu_common/crash_occurred",
497bac05aa9SAndrey Smetanin     .version_id = 1,
498bac05aa9SAndrey Smetanin     .minimum_version_id = 1,
499bac05aa9SAndrey Smetanin     .needed = cpu_common_crash_occurred_needed,
500bac05aa9SAndrey Smetanin     .fields = (VMStateField[]) {
501bac05aa9SAndrey Smetanin         VMSTATE_BOOL(crash_occurred, CPUState),
502bac05aa9SAndrey Smetanin         VMSTATE_END_OF_LIST()
503bac05aa9SAndrey Smetanin     }
504bac05aa9SAndrey Smetanin };
505bac05aa9SAndrey Smetanin 
5061a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
507e7f4eff7SJuan Quintela     .name = "cpu_common",
508e7f4eff7SJuan Quintela     .version_id = 1,
509e7f4eff7SJuan Quintela     .minimum_version_id = 1,
5106c3bff0eSPavel Dovgaluk     .pre_load = cpu_common_pre_load,
511e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
512e7f4eff7SJuan Quintela     .fields = (VMStateField[]) {
513259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
514259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
515e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
5166c3bff0eSPavel Dovgaluk     },
5175cd8cadaSJuan Quintela     .subsections = (const VMStateDescription*[]) {
5185cd8cadaSJuan Quintela         &vmstate_cpu_common_exception_index,
519bac05aa9SAndrey Smetanin         &vmstate_cpu_common_crash_occurred,
5205cd8cadaSJuan Quintela         NULL
521e7f4eff7SJuan Quintela     }
522e7f4eff7SJuan Quintela };
5231a1562f5SAndreas Färber 
5249656f324Spbrook #endif
5259656f324Spbrook 
52638d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
527950f1472SGlauber Costa {
528bdc44640SAndreas Färber     CPUState *cpu;
529950f1472SGlauber Costa 
530bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
53155e5c285SAndreas Färber         if (cpu->cpu_index == index) {
532bdc44640SAndreas Färber             return cpu;
53355e5c285SAndreas Färber         }
534950f1472SGlauber Costa     }
535950f1472SGlauber Costa 
536bdc44640SAndreas Färber     return NULL;
537950f1472SGlauber Costa }
538950f1472SGlauber Costa 
53909daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY)
54056943e8cSPeter Maydell void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
54109daed84SEdgar E. Iglesias {
54212ebc9a7SPeter Maydell     CPUAddressSpace *newas;
54312ebc9a7SPeter Maydell 
54412ebc9a7SPeter Maydell     /* Target code should have set num_ases before calling us */
54512ebc9a7SPeter Maydell     assert(asidx < cpu->num_ases);
54612ebc9a7SPeter Maydell 
54756943e8cSPeter Maydell     if (asidx == 0) {
54856943e8cSPeter Maydell         /* address space 0 gets the convenience alias */
54956943e8cSPeter Maydell         cpu->as = as;
55056943e8cSPeter Maydell     }
55156943e8cSPeter Maydell 
55212ebc9a7SPeter Maydell     /* KVM cannot currently support multiple address spaces. */
55312ebc9a7SPeter Maydell     assert(asidx == 0 || !kvm_enabled());
55409daed84SEdgar E. Iglesias 
55512ebc9a7SPeter Maydell     if (!cpu->cpu_ases) {
55612ebc9a7SPeter Maydell         cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
55709daed84SEdgar E. Iglesias     }
55832857f4dSPeter Maydell 
55912ebc9a7SPeter Maydell     newas = &cpu->cpu_ases[asidx];
56012ebc9a7SPeter Maydell     newas->cpu = cpu;
56112ebc9a7SPeter Maydell     newas->as = as;
56256943e8cSPeter Maydell     if (tcg_enabled()) {
56312ebc9a7SPeter Maydell         newas->tcg_as_listener.commit = tcg_commit;
56412ebc9a7SPeter Maydell         memory_listener_register(&newas->tcg_as_listener, as);
56509daed84SEdgar E. Iglesias     }
56656943e8cSPeter Maydell }
567651a5bc0SPeter Maydell 
568651a5bc0SPeter Maydell AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
569651a5bc0SPeter Maydell {
570651a5bc0SPeter Maydell     /* Return the AddressSpace corresponding to the specified index */
571651a5bc0SPeter Maydell     return cpu->cpu_ases[asidx].as;
572651a5bc0SPeter Maydell }
57309daed84SEdgar E. Iglesias #endif
57409daed84SEdgar E. Iglesias 
575b7bca733SBharata B Rao #ifndef CONFIG_USER_ONLY
576b7bca733SBharata B Rao static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
577b7bca733SBharata B Rao 
578b7bca733SBharata B Rao static int cpu_get_free_index(Error **errp)
579b7bca733SBharata B Rao {
580b7bca733SBharata B Rao     int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
581b7bca733SBharata B Rao 
582b7bca733SBharata B Rao     if (cpu >= MAX_CPUMASK_BITS) {
583b7bca733SBharata B Rao         error_setg(errp, "Trying to use more CPUs than max of %d",
584b7bca733SBharata B Rao                    MAX_CPUMASK_BITS);
585b7bca733SBharata B Rao         return -1;
586b7bca733SBharata B Rao     }
587b7bca733SBharata B Rao 
588b7bca733SBharata B Rao     bitmap_set(cpu_index_map, cpu, 1);
589b7bca733SBharata B Rao     return cpu;
590b7bca733SBharata B Rao }
591b7bca733SBharata B Rao 
592b7bca733SBharata B Rao void cpu_exec_exit(CPUState *cpu)
593b7bca733SBharata B Rao {
594b7bca733SBharata B Rao     if (cpu->cpu_index == -1) {
595b7bca733SBharata B Rao         /* cpu_index was never allocated by this @cpu or was already freed. */
596b7bca733SBharata B Rao         return;
597b7bca733SBharata B Rao     }
598b7bca733SBharata B Rao 
599b7bca733SBharata B Rao     bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
600b7bca733SBharata B Rao     cpu->cpu_index = -1;
601b7bca733SBharata B Rao }
602b7bca733SBharata B Rao #else
603b7bca733SBharata B Rao 
604b7bca733SBharata B Rao static int cpu_get_free_index(Error **errp)
605b7bca733SBharata B Rao {
606b7bca733SBharata B Rao     CPUState *some_cpu;
607b7bca733SBharata B Rao     int cpu_index = 0;
608b7bca733SBharata B Rao 
609b7bca733SBharata B Rao     CPU_FOREACH(some_cpu) {
610b7bca733SBharata B Rao         cpu_index++;
611b7bca733SBharata B Rao     }
612b7bca733SBharata B Rao     return cpu_index;
613b7bca733SBharata B Rao }
614b7bca733SBharata B Rao 
615b7bca733SBharata B Rao void cpu_exec_exit(CPUState *cpu)
616b7bca733SBharata B Rao {
617b7bca733SBharata B Rao }
618b7bca733SBharata B Rao #endif
619b7bca733SBharata B Rao 
6204bad9e39SPeter Crosthwaite void cpu_exec_init(CPUState *cpu, Error **errp)
621fd6ce8f6Sbellard {
622b170fce3SAndreas Färber     CPUClass *cc = CPU_GET_CLASS(cpu);
6236a00d601Sbellard     int cpu_index;
624b7bca733SBharata B Rao     Error *local_err = NULL;
6256a00d601Sbellard 
62656943e8cSPeter Maydell     cpu->as = NULL;
62712ebc9a7SPeter Maydell     cpu->num_ases = 0;
62856943e8cSPeter Maydell 
629291135b5SEduardo Habkost #ifndef CONFIG_USER_ONLY
630291135b5SEduardo Habkost     cpu->thread_id = qemu_get_thread_id();
631291135b5SEduardo Habkost #endif
632291135b5SEduardo Habkost 
633c2764719Spbrook #if defined(CONFIG_USER_ONLY)
634c2764719Spbrook     cpu_list_lock();
635c2764719Spbrook #endif
636b7bca733SBharata B Rao     cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
637b7bca733SBharata B Rao     if (local_err) {
638b7bca733SBharata B Rao         error_propagate(errp, local_err);
639b7bca733SBharata B Rao #if defined(CONFIG_USER_ONLY)
640b7bca733SBharata B Rao         cpu_list_unlock();
641b7bca733SBharata B Rao #endif
642b7bca733SBharata B Rao         return;
6436a00d601Sbellard     }
644bdc44640SAndreas Färber     QTAILQ_INSERT_TAIL(&cpus, cpu, node);
645c2764719Spbrook #if defined(CONFIG_USER_ONLY)
646c2764719Spbrook     cpu_list_unlock();
647c2764719Spbrook #endif
648e0d47944SAndreas Färber     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
649259186a7SAndreas Färber         vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
650e0d47944SAndreas Färber     }
651b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
6520be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
6534bad9e39SPeter Crosthwaite                     cpu_save, cpu_load, cpu->env_ptr);
654b170fce3SAndreas Färber     assert(cc->vmsd == NULL);
655e0d47944SAndreas Färber     assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
656b3c7724cSpbrook #endif
657b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
658b170fce3SAndreas Färber         vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
659b170fce3SAndreas Färber     }
660fd6ce8f6Sbellard }
661fd6ce8f6Sbellard 
66294df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
66300b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
66494df27fdSPaul Brook {
66594df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
66694df27fdSPaul Brook }
66794df27fdSPaul Brook #else
66800b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
6691e7855a5SMax Filippov {
670e8262a1bSMax Filippov     hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
671e8262a1bSMax Filippov     if (phys != -1) {
67209daed84SEdgar E. Iglesias         tb_invalidate_phys_addr(cpu->as,
67329d8ec7bSEdgar E. Iglesias                                 phys | (pc & ~TARGET_PAGE_MASK));
674e8262a1bSMax Filippov     }
6751e7855a5SMax Filippov }
676c27004ecSbellard #endif
677d720b93dSbellard 
678c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
67975a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
680c527ee8fSPaul Brook 
681c527ee8fSPaul Brook {
682c527ee8fSPaul Brook }
683c527ee8fSPaul Brook 
6843ee887e8SPeter Maydell int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
6853ee887e8SPeter Maydell                           int flags)
6863ee887e8SPeter Maydell {
6873ee887e8SPeter Maydell     return -ENOSYS;
6883ee887e8SPeter Maydell }
6893ee887e8SPeter Maydell 
6903ee887e8SPeter Maydell void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
6913ee887e8SPeter Maydell {
6923ee887e8SPeter Maydell }
6933ee887e8SPeter Maydell 
69475a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
695c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
696c527ee8fSPaul Brook {
697c527ee8fSPaul Brook     return -ENOSYS;
698c527ee8fSPaul Brook }
699c527ee8fSPaul Brook #else
7006658ffb8Spbrook /* Add a watchpoint.  */
70175a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
702a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
7036658ffb8Spbrook {
704c0ce998eSaliguori     CPUWatchpoint *wp;
7056658ffb8Spbrook 
70605068c0dSPeter Maydell     /* forbid ranges which are empty or run off the end of the address space */
70707e2863dSMax Filippov     if (len == 0 || (addr + len - 1) < addr) {
70875a34036SAndreas Färber         error_report("tried to set invalid watchpoint at %"
70975a34036SAndreas Färber                      VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
710b4051334Saliguori         return -EINVAL;
711b4051334Saliguori     }
7127267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
7136658ffb8Spbrook 
714a1d1bb31Saliguori     wp->vaddr = addr;
71505068c0dSPeter Maydell     wp->len = len;
716a1d1bb31Saliguori     wp->flags = flags;
717a1d1bb31Saliguori 
7182dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
719ff4700b0SAndreas Färber     if (flags & BP_GDB) {
720ff4700b0SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
721ff4700b0SAndreas Färber     } else {
722ff4700b0SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
723ff4700b0SAndreas Färber     }
724a1d1bb31Saliguori 
72531b030d4SAndreas Färber     tlb_flush_page(cpu, addr);
726a1d1bb31Saliguori 
727a1d1bb31Saliguori     if (watchpoint)
728a1d1bb31Saliguori         *watchpoint = wp;
729a1d1bb31Saliguori     return 0;
7306658ffb8Spbrook }
7316658ffb8Spbrook 
732a1d1bb31Saliguori /* Remove a specific watchpoint.  */
73375a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
734a1d1bb31Saliguori                           int flags)
7356658ffb8Spbrook {
736a1d1bb31Saliguori     CPUWatchpoint *wp;
7376658ffb8Spbrook 
738ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
73905068c0dSPeter Maydell         if (addr == wp->vaddr && len == wp->len
7406e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
74175a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
7426658ffb8Spbrook             return 0;
7436658ffb8Spbrook         }
7446658ffb8Spbrook     }
745a1d1bb31Saliguori     return -ENOENT;
7466658ffb8Spbrook }
7476658ffb8Spbrook 
748a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
74975a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
750a1d1bb31Saliguori {
751ff4700b0SAndreas Färber     QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7527d03f82fSedgar_igl 
75331b030d4SAndreas Färber     tlb_flush_page(cpu, watchpoint->vaddr);
754a1d1bb31Saliguori 
7557267c094SAnthony Liguori     g_free(watchpoint);
7567d03f82fSedgar_igl }
7577d03f82fSedgar_igl 
758a1d1bb31Saliguori /* Remove all matching watchpoints.  */
75975a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
760a1d1bb31Saliguori {
761c0ce998eSaliguori     CPUWatchpoint *wp, *next;
762a1d1bb31Saliguori 
763ff4700b0SAndreas Färber     QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
76475a34036SAndreas Färber         if (wp->flags & mask) {
76575a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
76675a34036SAndreas Färber         }
767a1d1bb31Saliguori     }
768c0ce998eSaliguori }
76905068c0dSPeter Maydell 
77005068c0dSPeter Maydell /* Return true if this watchpoint address matches the specified
77105068c0dSPeter Maydell  * access (ie the address range covered by the watchpoint overlaps
77205068c0dSPeter Maydell  * partially or completely with the address range covered by the
77305068c0dSPeter Maydell  * access).
77405068c0dSPeter Maydell  */
77505068c0dSPeter Maydell static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
77605068c0dSPeter Maydell                                                   vaddr addr,
77705068c0dSPeter Maydell                                                   vaddr len)
77805068c0dSPeter Maydell {
77905068c0dSPeter Maydell     /* We know the lengths are non-zero, but a little caution is
78005068c0dSPeter Maydell      * required to avoid errors in the case where the range ends
78105068c0dSPeter Maydell      * exactly at the top of the address space and so addr + len
78205068c0dSPeter Maydell      * wraps round to zero.
78305068c0dSPeter Maydell      */
78405068c0dSPeter Maydell     vaddr wpend = wp->vaddr + wp->len - 1;
78505068c0dSPeter Maydell     vaddr addrend = addr + len - 1;
78605068c0dSPeter Maydell 
78705068c0dSPeter Maydell     return !(addr > wpend || wp->vaddr > addrend);
78805068c0dSPeter Maydell }
78905068c0dSPeter Maydell 
790c527ee8fSPaul Brook #endif
791a1d1bb31Saliguori 
792a1d1bb31Saliguori /* Add a breakpoint.  */
793b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
794a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
7954c3a88a2Sbellard {
796c0ce998eSaliguori     CPUBreakpoint *bp;
7974c3a88a2Sbellard 
7987267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
7994c3a88a2Sbellard 
800a1d1bb31Saliguori     bp->pc = pc;
801a1d1bb31Saliguori     bp->flags = flags;
802a1d1bb31Saliguori 
8032dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
80400b941e5SAndreas Färber     if (flags & BP_GDB) {
805f0c3c505SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
80600b941e5SAndreas Färber     } else {
807f0c3c505SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
80800b941e5SAndreas Färber     }
809d720b93dSbellard 
810f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, pc);
811a1d1bb31Saliguori 
81200b941e5SAndreas Färber     if (breakpoint) {
813a1d1bb31Saliguori         *breakpoint = bp;
81400b941e5SAndreas Färber     }
8154c3a88a2Sbellard     return 0;
8164c3a88a2Sbellard }
8174c3a88a2Sbellard 
818a1d1bb31Saliguori /* Remove a specific breakpoint.  */
819b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
820a1d1bb31Saliguori {
821a1d1bb31Saliguori     CPUBreakpoint *bp;
822a1d1bb31Saliguori 
823f0c3c505SAndreas Färber     QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
824a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
825b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
826a1d1bb31Saliguori             return 0;
8277d03f82fSedgar_igl         }
828a1d1bb31Saliguori     }
829a1d1bb31Saliguori     return -ENOENT;
8307d03f82fSedgar_igl }
8317d03f82fSedgar_igl 
832a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
833b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
8344c3a88a2Sbellard {
835f0c3c505SAndreas Färber     QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
836f0c3c505SAndreas Färber 
837f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, breakpoint->pc);
838a1d1bb31Saliguori 
8397267c094SAnthony Liguori     g_free(breakpoint);
840a1d1bb31Saliguori }
841a1d1bb31Saliguori 
842a1d1bb31Saliguori /* Remove all matching breakpoints. */
843b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
844a1d1bb31Saliguori {
845c0ce998eSaliguori     CPUBreakpoint *bp, *next;
846a1d1bb31Saliguori 
847f0c3c505SAndreas Färber     QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
848b3310ab3SAndreas Färber         if (bp->flags & mask) {
849b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
850b3310ab3SAndreas Färber         }
851c0ce998eSaliguori     }
8524c3a88a2Sbellard }
8534c3a88a2Sbellard 
854c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
855c33a346eSbellard    CPU loop after each instruction */
8563825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled)
857c33a346eSbellard {
858ed2803daSAndreas Färber     if (cpu->singlestep_enabled != enabled) {
859ed2803daSAndreas Färber         cpu->singlestep_enabled = enabled;
860ed2803daSAndreas Färber         if (kvm_enabled()) {
86138e478ecSStefan Weil             kvm_update_guest_debug(cpu, 0);
862ed2803daSAndreas Färber         } else {
863ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
8649fa3e853Sbellard             /* XXX: only flush what is necessary */
865bbd77c18SPeter Crosthwaite             tb_flush(cpu);
866c33a346eSbellard         }
867e22a25c9Saliguori     }
868c33a346eSbellard }
869c33a346eSbellard 
870a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...)
8717501267eSbellard {
8727501267eSbellard     va_list ap;
873493ae1f0Spbrook     va_list ap2;
8747501267eSbellard 
8757501267eSbellard     va_start(ap, fmt);
876493ae1f0Spbrook     va_copy(ap2, ap);
8777501267eSbellard     fprintf(stderr, "qemu: fatal: ");
8787501267eSbellard     vfprintf(stderr, fmt, ap);
8797501267eSbellard     fprintf(stderr, "\n");
880878096eeSAndreas Färber     cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
881013a2942SPaolo Bonzini     if (qemu_log_separate()) {
88293fcfe39Saliguori         qemu_log("qemu: fatal: ");
88393fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
88493fcfe39Saliguori         qemu_log("\n");
885a0762859SAndreas Färber         log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
88631b1a7b4Saliguori         qemu_log_flush();
88793fcfe39Saliguori         qemu_log_close();
888924edcaeSbalrog     }
889493ae1f0Spbrook     va_end(ap2);
890f9373291Sj_mayer     va_end(ap);
8917615936eSPavel Dovgalyuk     replay_finish();
892fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
893fd052bf6SRiku Voipio     {
894fd052bf6SRiku Voipio         struct sigaction act;
895fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
896fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
897fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
898fd052bf6SRiku Voipio     }
899fd052bf6SRiku Voipio #endif
9007501267eSbellard     abort();
9017501267eSbellard }
9027501267eSbellard 
9030124311eSbellard #if !defined(CONFIG_USER_ONLY)
9040dc3f44aSMike Day /* Called from RCU critical section */
905041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
906041603feSPaolo Bonzini {
907041603feSPaolo Bonzini     RAMBlock *block;
908041603feSPaolo Bonzini 
90943771539SPaolo Bonzini     block = atomic_rcu_read(&ram_list.mru_block);
9109b8424d5SMichael S. Tsirkin     if (block && addr - block->offset < block->max_length) {
91168851b98SPaolo Bonzini         return block;
912041603feSPaolo Bonzini     }
9130dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9149b8424d5SMichael S. Tsirkin         if (addr - block->offset < block->max_length) {
915041603feSPaolo Bonzini             goto found;
916041603feSPaolo Bonzini         }
917041603feSPaolo Bonzini     }
918041603feSPaolo Bonzini 
919041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
920041603feSPaolo Bonzini     abort();
921041603feSPaolo Bonzini 
922041603feSPaolo Bonzini found:
92343771539SPaolo Bonzini     /* It is safe to write mru_block outside the iothread lock.  This
92443771539SPaolo Bonzini      * is what happens:
92543771539SPaolo Bonzini      *
92643771539SPaolo Bonzini      *     mru_block = xxx
92743771539SPaolo Bonzini      *     rcu_read_unlock()
92843771539SPaolo Bonzini      *                                        xxx removed from list
92943771539SPaolo Bonzini      *                  rcu_read_lock()
93043771539SPaolo Bonzini      *                  read mru_block
93143771539SPaolo Bonzini      *                                        mru_block = NULL;
93243771539SPaolo Bonzini      *                                        call_rcu(reclaim_ramblock, xxx);
93343771539SPaolo Bonzini      *                  rcu_read_unlock()
93443771539SPaolo Bonzini      *
93543771539SPaolo Bonzini      * atomic_rcu_set is not needed here.  The block was already published
93643771539SPaolo Bonzini      * when it was placed into the list.  Here we're just making an extra
93743771539SPaolo Bonzini      * copy of the pointer.
93843771539SPaolo Bonzini      */
939041603feSPaolo Bonzini     ram_list.mru_block = block;
940041603feSPaolo Bonzini     return block;
941041603feSPaolo Bonzini }
942041603feSPaolo Bonzini 
943a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
9441ccde1cbSbellard {
9459a13565dSPeter Crosthwaite     CPUState *cpu;
946041603feSPaolo Bonzini     ram_addr_t start1;
947a2f4d5beSJuan Quintela     RAMBlock *block;
948a2f4d5beSJuan Quintela     ram_addr_t end;
949a2f4d5beSJuan Quintela 
950a2f4d5beSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length);
951a2f4d5beSJuan Quintela     start &= TARGET_PAGE_MASK;
952f23db169Sbellard 
9530dc3f44aSMike Day     rcu_read_lock();
954041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
955041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
9561240be24SMichael S. Tsirkin     start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
9579a13565dSPeter Crosthwaite     CPU_FOREACH(cpu) {
9589a13565dSPeter Crosthwaite         tlb_reset_dirty(cpu, start1, length);
9599a13565dSPeter Crosthwaite     }
9600dc3f44aSMike Day     rcu_read_unlock();
961d24981d3SJuan Quintela }
962d24981d3SJuan Quintela 
963d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
96403eebc9eSStefan Hajnoczi bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
96503eebc9eSStefan Hajnoczi                                               ram_addr_t length,
96652159192SJuan Quintela                                               unsigned client)
967d24981d3SJuan Quintela {
96803eebc9eSStefan Hajnoczi     unsigned long end, page;
96903eebc9eSStefan Hajnoczi     bool dirty;
970d24981d3SJuan Quintela 
97103eebc9eSStefan Hajnoczi     if (length == 0) {
97203eebc9eSStefan Hajnoczi         return false;
97303eebc9eSStefan Hajnoczi     }
97403eebc9eSStefan Hajnoczi 
97503eebc9eSStefan Hajnoczi     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
97603eebc9eSStefan Hajnoczi     page = start >> TARGET_PAGE_BITS;
97703eebc9eSStefan Hajnoczi     dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
97803eebc9eSStefan Hajnoczi                                          page, end - page);
97903eebc9eSStefan Hajnoczi 
98003eebc9eSStefan Hajnoczi     if (dirty && tcg_enabled()) {
981a2f4d5beSJuan Quintela         tlb_reset_dirty_range_all(start, length);
982d24981d3SJuan Quintela     }
98303eebc9eSStefan Hajnoczi 
98403eebc9eSStefan Hajnoczi     return dirty;
9851ccde1cbSbellard }
9861ccde1cbSbellard 
98779e2b9aeSPaolo Bonzini /* Called from RCU critical section */
988bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu,
989e5548617SBlue Swirl                                        MemoryRegionSection *section,
990e5548617SBlue Swirl                                        target_ulong vaddr,
991149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
992e5548617SBlue Swirl                                        int prot,
993e5548617SBlue Swirl                                        target_ulong *address)
994e5548617SBlue Swirl {
995a8170e5eSAvi Kivity     hwaddr iotlb;
996e5548617SBlue Swirl     CPUWatchpoint *wp;
997e5548617SBlue Swirl 
998cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
999e5548617SBlue Swirl         /* Normal RAM.  */
1000e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1001149f54b5SPaolo Bonzini             + xlat;
1002e5548617SBlue Swirl         if (!section->readonly) {
1003b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_NOTDIRTY;
1004e5548617SBlue Swirl         } else {
1005b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_ROM;
1006e5548617SBlue Swirl         }
1007e5548617SBlue Swirl     } else {
10080b8e2c10SPeter Maydell         AddressSpaceDispatch *d;
10090b8e2c10SPeter Maydell 
10100b8e2c10SPeter Maydell         d = atomic_rcu_read(&section->address_space->dispatch);
10110b8e2c10SPeter Maydell         iotlb = section - d->map.sections;
1012149f54b5SPaolo Bonzini         iotlb += xlat;
1013e5548617SBlue Swirl     }
1014e5548617SBlue Swirl 
1015e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
1016e5548617SBlue Swirl        watchpoint trap routines.  */
1017ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
101805068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1019e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
1020e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1021b41aac4fSLiu Ping Fan                 iotlb = PHYS_SECTION_WATCH + paddr;
1022e5548617SBlue Swirl                 *address |= TLB_MMIO;
1023e5548617SBlue Swirl                 break;
1024e5548617SBlue Swirl             }
1025e5548617SBlue Swirl         }
1026e5548617SBlue Swirl     }
1027e5548617SBlue Swirl 
1028e5548617SBlue Swirl     return iotlb;
1029e5548617SBlue Swirl }
10309fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
103133417e70Sbellard 
1032e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
10338da3ff18Spbrook 
1034c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
10355312bd8bSAvi Kivity                              uint16_t section);
1036acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
103754688b1eSAvi Kivity 
1038a2b257d6SIgor Mammedov static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1039a2b257d6SIgor Mammedov                                qemu_anon_ram_alloc;
104091138037SMarkus Armbruster 
104191138037SMarkus Armbruster /*
104291138037SMarkus Armbruster  * Set a custom physical guest memory alloator.
104391138037SMarkus Armbruster  * Accelerators with unusual needs may need this.  Hopefully, we can
104491138037SMarkus Armbruster  * get rid of it eventually.
104591138037SMarkus Armbruster  */
1046a2b257d6SIgor Mammedov void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
104791138037SMarkus Armbruster {
104891138037SMarkus Armbruster     phys_mem_alloc = alloc;
104991138037SMarkus Armbruster }
105091138037SMarkus Armbruster 
105153cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
105253cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
10535312bd8bSAvi Kivity {
105468f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
105568f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
105668f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
105768f3f65bSPaolo Bonzini      */
105853cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
105968f3f65bSPaolo Bonzini 
106053cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
106153cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
106253cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
106353cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
10645312bd8bSAvi Kivity     }
106553cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
1066dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
106753cb28cbSMarcel Apfelbaum     return map->sections_nb++;
10685312bd8bSAvi Kivity }
10695312bd8bSAvi Kivity 
1070058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
1071058bc4b5SPaolo Bonzini {
107255b4e80bSDon Slutz     bool have_sub_page = mr->subpage;
107355b4e80bSDon Slutz 
1074dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
1075dfde4e6eSPaolo Bonzini 
107655b4e80bSDon Slutz     if (have_sub_page) {
1077058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
1078b4fefef9SPeter Crosthwaite         object_unref(OBJECT(&subpage->iomem));
1079058bc4b5SPaolo Bonzini         g_free(subpage);
1080058bc4b5SPaolo Bonzini     }
1081058bc4b5SPaolo Bonzini }
1082058bc4b5SPaolo Bonzini 
10836092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
10845312bd8bSAvi Kivity {
10859affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
10869affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
1087058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
1088058bc4b5SPaolo Bonzini     }
10899affd6fcSPaolo Bonzini     g_free(map->sections);
10909affd6fcSPaolo Bonzini     g_free(map->nodes);
10915312bd8bSAvi Kivity }
10925312bd8bSAvi Kivity 
1093ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
10940f0cb164SAvi Kivity {
10950f0cb164SAvi Kivity     subpage_t *subpage;
1096a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
10970f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
109897115a8dSMichael S. Tsirkin     MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
109953cb28cbSMarcel Apfelbaum                                                    d->map.nodes, d->map.sections);
11000f0cb164SAvi Kivity     MemoryRegionSection subsection = {
11010f0cb164SAvi Kivity         .offset_within_address_space = base,
1102052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
11030f0cb164SAvi Kivity     };
1104a8170e5eSAvi Kivity     hwaddr start, end;
11050f0cb164SAvi Kivity 
1106f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
11070f0cb164SAvi Kivity 
1108f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
1109acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
11103be91e86SEdgar E. Iglesias         subsection.address_space = d->as;
11110f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
1112ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
111353cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
11140f0cb164SAvi Kivity     } else {
1115f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
11160f0cb164SAvi Kivity     }
11170f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1118052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
111953cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
112053cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
11210f0cb164SAvi Kivity }
11220f0cb164SAvi Kivity 
11230f0cb164SAvi Kivity 
1124052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
1125052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
112633417e70Sbellard {
1127a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
112853cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
1129052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
1130052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
1131dd81124bSAvi Kivity 
1132733d5ef5SPaolo Bonzini     assert(num_pages);
1133733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
113433417e70Sbellard }
113533417e70Sbellard 
1136ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
11370f0cb164SAvi Kivity {
113889ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
113900752703SPaolo Bonzini     AddressSpaceDispatch *d = as->next_dispatch;
114099b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
1141052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
11420f0cb164SAvi Kivity 
1143733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1144733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1145733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
1146733d5ef5SPaolo Bonzini 
1147052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
1148ac1970fbSAvi Kivity         register_subpage(d, &now);
1149733d5ef5SPaolo Bonzini     } else {
1150052e87b0SPaolo Bonzini         now.size = int128_zero();
1151733d5ef5SPaolo Bonzini     }
1152052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
1153052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
1154052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
1155052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
11560f0cb164SAvi Kivity         now = remain;
1157052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
1158733d5ef5SPaolo Bonzini             register_subpage(d, &now);
115988266249SHu Tao         } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1160052e87b0SPaolo Bonzini             now.size = page_size;
1161ac1970fbSAvi Kivity             register_subpage(d, &now);
116269b67646STyler Hall         } else {
1163052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
1164ac1970fbSAvi Kivity             register_multipage(d, &now);
116569b67646STyler Hall         }
11660f0cb164SAvi Kivity     }
11670f0cb164SAvi Kivity }
11680f0cb164SAvi Kivity 
116962a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
117062a2744cSSheng Yang {
117162a2744cSSheng Yang     if (kvm_enabled())
117262a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
117362a2744cSSheng Yang }
117462a2744cSSheng Yang 
1175b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
1176b2a8658eSUmesh Deshpande {
1177b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
1178b2a8658eSUmesh Deshpande }
1179b2a8658eSUmesh Deshpande 
1180b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
1181b2a8658eSUmesh Deshpande {
1182b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
1183b2a8658eSUmesh Deshpande }
1184b2a8658eSUmesh Deshpande 
1185e1e84ba0SMarkus Armbruster #ifdef __linux__
1186c902760fSMarcelo Tosatti 
1187c902760fSMarcelo Tosatti #include <sys/vfs.h>
1188c902760fSMarcelo Tosatti 
1189c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
1190c902760fSMarcelo Tosatti 
1191fc7a5800SHu Tao static long gethugepagesize(const char *path, Error **errp)
1192c902760fSMarcelo Tosatti {
1193c902760fSMarcelo Tosatti     struct statfs fs;
1194c902760fSMarcelo Tosatti     int ret;
1195c902760fSMarcelo Tosatti 
1196c902760fSMarcelo Tosatti     do {
1197c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
1198c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
1199c902760fSMarcelo Tosatti 
1200c902760fSMarcelo Tosatti     if (ret != 0) {
1201fc7a5800SHu Tao         error_setg_errno(errp, errno, "failed to get page size of file %s",
1202fc7a5800SHu Tao                          path);
1203c902760fSMarcelo Tosatti         return 0;
1204c902760fSMarcelo Tosatti     }
1205c902760fSMarcelo Tosatti 
1206c902760fSMarcelo Tosatti     return fs.f_bsize;
1207c902760fSMarcelo Tosatti }
1208c902760fSMarcelo Tosatti 
120904b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
121004b16653SAlex Williamson                             ram_addr_t memory,
12117f56e740SPaolo Bonzini                             const char *path,
12127f56e740SPaolo Bonzini                             Error **errp)
1213c902760fSMarcelo Tosatti {
12148d31d6b6SPavel Fedin     struct stat st;
1215c902760fSMarcelo Tosatti     char *filename;
12168ca761f6SPeter Feiner     char *sanitized_name;
12178ca761f6SPeter Feiner     char *c;
1218794e8f30SMichael S. Tsirkin     void *area;
1219c902760fSMarcelo Tosatti     int fd;
1220557529ddSHu Tao     uint64_t hpagesize;
1221fc7a5800SHu Tao     Error *local_err = NULL;
1222c902760fSMarcelo Tosatti 
1223fc7a5800SHu Tao     hpagesize = gethugepagesize(path, &local_err);
1224fc7a5800SHu Tao     if (local_err) {
1225fc7a5800SHu Tao         error_propagate(errp, local_err);
1226f9a49dfaSMarcelo Tosatti         goto error;
1227c902760fSMarcelo Tosatti     }
1228a2b257d6SIgor Mammedov     block->mr->align = hpagesize;
1229c902760fSMarcelo Tosatti 
1230c902760fSMarcelo Tosatti     if (memory < hpagesize) {
1231557529ddSHu Tao         error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1232557529ddSHu Tao                    "or larger than huge page size 0x%" PRIx64,
1233557529ddSHu Tao                    memory, hpagesize);
1234557529ddSHu Tao         goto error;
1235c902760fSMarcelo Tosatti     }
1236c902760fSMarcelo Tosatti 
1237c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
12387f56e740SPaolo Bonzini         error_setg(errp,
12397f56e740SPaolo Bonzini                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1240f9a49dfaSMarcelo Tosatti         goto error;
1241c902760fSMarcelo Tosatti     }
1242c902760fSMarcelo Tosatti 
12438d31d6b6SPavel Fedin     if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
12448ca761f6SPeter Feiner         /* Make name safe to use with mkstemp by replacing '/' with '_'. */
124583234bf2SPeter Crosthwaite         sanitized_name = g_strdup(memory_region_name(block->mr));
12468ca761f6SPeter Feiner         for (c = sanitized_name; *c != '\0'; c++) {
12478d31d6b6SPavel Fedin             if (*c == '/') {
12488ca761f6SPeter Feiner                 *c = '_';
12498ca761f6SPeter Feiner             }
12508d31d6b6SPavel Fedin         }
12518ca761f6SPeter Feiner 
12528ca761f6SPeter Feiner         filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
12538ca761f6SPeter Feiner                                    sanitized_name);
12548ca761f6SPeter Feiner         g_free(sanitized_name);
1255c902760fSMarcelo Tosatti 
1256c902760fSMarcelo Tosatti         fd = mkstemp(filename);
12578d31d6b6SPavel Fedin         if (fd >= 0) {
12588d31d6b6SPavel Fedin             unlink(filename);
12598d31d6b6SPavel Fedin         }
12608d31d6b6SPavel Fedin         g_free(filename);
12618d31d6b6SPavel Fedin     } else {
12628d31d6b6SPavel Fedin         fd = open(path, O_RDWR | O_CREAT, 0644);
12638d31d6b6SPavel Fedin     }
12648d31d6b6SPavel Fedin 
1265c902760fSMarcelo Tosatti     if (fd < 0) {
12667f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
12677f56e740SPaolo Bonzini                          "unable to create backing store for hugepages");
1268f9a49dfaSMarcelo Tosatti         goto error;
1269c902760fSMarcelo Tosatti     }
1270c902760fSMarcelo Tosatti 
12719284f319SChen Hanxiao     memory = ROUND_UP(memory, hpagesize);
1272c902760fSMarcelo Tosatti 
1273c902760fSMarcelo Tosatti     /*
1274c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1275c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1276c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1277c902760fSMarcelo Tosatti      * mmap will fail.
1278c902760fSMarcelo Tosatti      */
12797f56e740SPaolo Bonzini     if (ftruncate(fd, memory)) {
1280c902760fSMarcelo Tosatti         perror("ftruncate");
12817f56e740SPaolo Bonzini     }
1282c902760fSMarcelo Tosatti 
1283794e8f30SMichael S. Tsirkin     area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
1284c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
12857f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
12867f56e740SPaolo Bonzini                          "unable to map backing store for hugepages");
1287c902760fSMarcelo Tosatti         close(fd);
1288f9a49dfaSMarcelo Tosatti         goto error;
1289c902760fSMarcelo Tosatti     }
1290ef36fa14SMarcelo Tosatti 
1291ef36fa14SMarcelo Tosatti     if (mem_prealloc) {
129238183310SPaolo Bonzini         os_mem_prealloc(fd, area, memory);
1293ef36fa14SMarcelo Tosatti     }
1294ef36fa14SMarcelo Tosatti 
129504b16653SAlex Williamson     block->fd = fd;
1296c902760fSMarcelo Tosatti     return area;
1297f9a49dfaSMarcelo Tosatti 
1298f9a49dfaSMarcelo Tosatti error:
1299f9a49dfaSMarcelo Tosatti     return NULL;
1300c902760fSMarcelo Tosatti }
1301c902760fSMarcelo Tosatti #endif
1302c902760fSMarcelo Tosatti 
13030dc3f44aSMike Day /* Called with the ramlist lock held.  */
1304d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1305d17b5288SAlex Williamson {
130604b16653SAlex Williamson     RAMBlock *block, *next_block;
13073e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
130804b16653SAlex Williamson 
130949cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
131049cd9ac6SStefan Hajnoczi 
13110dc3f44aSMike Day     if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
131204b16653SAlex Williamson         return 0;
13130d53d9feSMike Day     }
131404b16653SAlex Williamson 
13150dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1316f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
131704b16653SAlex Williamson 
131862be4e3aSMichael S. Tsirkin         end = block->offset + block->max_length;
131904b16653SAlex Williamson 
13200dc3f44aSMike Day         QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
132104b16653SAlex Williamson             if (next_block->offset >= end) {
132204b16653SAlex Williamson                 next = MIN(next, next_block->offset);
132304b16653SAlex Williamson             }
132404b16653SAlex Williamson         }
132504b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
132604b16653SAlex Williamson             offset = end;
132704b16653SAlex Williamson             mingap = next - end;
132804b16653SAlex Williamson         }
132904b16653SAlex Williamson     }
13303e837b2cSAlex Williamson 
13313e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
13323e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
13333e837b2cSAlex Williamson                 (uint64_t)size);
13343e837b2cSAlex Williamson         abort();
13353e837b2cSAlex Williamson     }
13363e837b2cSAlex Williamson 
133704b16653SAlex Williamson     return offset;
133804b16653SAlex Williamson }
133904b16653SAlex Williamson 
1340652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
134104b16653SAlex Williamson {
1342d17b5288SAlex Williamson     RAMBlock *block;
1343d17b5288SAlex Williamson     ram_addr_t last = 0;
1344d17b5288SAlex Williamson 
13450dc3f44aSMike Day     rcu_read_lock();
13460dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
134762be4e3aSMichael S. Tsirkin         last = MAX(last, block->offset + block->max_length);
13480d53d9feSMike Day     }
13490dc3f44aSMike Day     rcu_read_unlock();
1350d17b5288SAlex Williamson     return last;
1351d17b5288SAlex Williamson }
1352d17b5288SAlex Williamson 
1353ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1354ddb97f1dSJason Baron {
1355ddb97f1dSJason Baron     int ret;
1356ddb97f1dSJason Baron 
1357ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
135847c8ca53SMarcel Apfelbaum     if (!machine_dump_guest_core(current_machine)) {
1359ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1360ddb97f1dSJason Baron         if (ret) {
1361ddb97f1dSJason Baron             perror("qemu_madvise");
1362ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1363ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1364ddb97f1dSJason Baron         }
1365ddb97f1dSJason Baron     }
1366ddb97f1dSJason Baron }
1367ddb97f1dSJason Baron 
13680dc3f44aSMike Day /* Called within an RCU critical section, or while the ramlist lock
13690dc3f44aSMike Day  * is held.
13700dc3f44aSMike Day  */
137120cfe881SHu Tao static RAMBlock *find_ram_block(ram_addr_t addr)
137284b89d78SCam Macdonell {
137320cfe881SHu Tao     RAMBlock *block;
137484b89d78SCam Macdonell 
13750dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1376c5705a77SAvi Kivity         if (block->offset == addr) {
137720cfe881SHu Tao             return block;
1378c5705a77SAvi Kivity         }
1379c5705a77SAvi Kivity     }
138020cfe881SHu Tao 
138120cfe881SHu Tao     return NULL;
138220cfe881SHu Tao }
138320cfe881SHu Tao 
1384422148d3SDr. David Alan Gilbert const char *qemu_ram_get_idstr(RAMBlock *rb)
1385422148d3SDr. David Alan Gilbert {
1386422148d3SDr. David Alan Gilbert     return rb->idstr;
1387422148d3SDr. David Alan Gilbert }
1388422148d3SDr. David Alan Gilbert 
1389ae3a7047SMike Day /* Called with iothread lock held.  */
139020cfe881SHu Tao void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
139120cfe881SHu Tao {
1392ae3a7047SMike Day     RAMBlock *new_block, *block;
139320cfe881SHu Tao 
13940dc3f44aSMike Day     rcu_read_lock();
1395ae3a7047SMike Day     new_block = find_ram_block(addr);
1396c5705a77SAvi Kivity     assert(new_block);
1397c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
139884b89d78SCam Macdonell 
139909e5ab63SAnthony Liguori     if (dev) {
140009e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
140184b89d78SCam Macdonell         if (id) {
140284b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
14037267c094SAnthony Liguori             g_free(id);
140484b89d78SCam Macdonell         }
140584b89d78SCam Macdonell     }
140684b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
140784b89d78SCam Macdonell 
14080dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1409c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
141084b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
141184b89d78SCam Macdonell                     new_block->idstr);
141284b89d78SCam Macdonell             abort();
141384b89d78SCam Macdonell         }
141484b89d78SCam Macdonell     }
14150dc3f44aSMike Day     rcu_read_unlock();
1416c5705a77SAvi Kivity }
1417c5705a77SAvi Kivity 
1418ae3a7047SMike Day /* Called with iothread lock held.  */
141920cfe881SHu Tao void qemu_ram_unset_idstr(ram_addr_t addr)
142020cfe881SHu Tao {
1421ae3a7047SMike Day     RAMBlock *block;
142220cfe881SHu Tao 
1423ae3a7047SMike Day     /* FIXME: arch_init.c assumes that this is not called throughout
1424ae3a7047SMike Day      * migration.  Ignore the problem since hot-unplug during migration
1425ae3a7047SMike Day      * does not work anyway.
1426ae3a7047SMike Day      */
1427ae3a7047SMike Day 
14280dc3f44aSMike Day     rcu_read_lock();
1429ae3a7047SMike Day     block = find_ram_block(addr);
143020cfe881SHu Tao     if (block) {
143120cfe881SHu Tao         memset(block->idstr, 0, sizeof(block->idstr));
143220cfe881SHu Tao     }
14330dc3f44aSMike Day     rcu_read_unlock();
143420cfe881SHu Tao }
143520cfe881SHu Tao 
14368490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
14378490fc78SLuiz Capitulino {
143875cc7f01SMarcel Apfelbaum     if (!machine_mem_merge(current_machine)) {
14398490fc78SLuiz Capitulino         /* disabled by the user */
14408490fc78SLuiz Capitulino         return 0;
14418490fc78SLuiz Capitulino     }
14428490fc78SLuiz Capitulino 
14438490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
14448490fc78SLuiz Capitulino }
14458490fc78SLuiz Capitulino 
144662be4e3aSMichael S. Tsirkin /* Only legal before guest might have detected the memory size: e.g. on
144762be4e3aSMichael S. Tsirkin  * incoming migration, or right after reset.
144862be4e3aSMichael S. Tsirkin  *
144962be4e3aSMichael S. Tsirkin  * As memory core doesn't know how is memory accessed, it is up to
145062be4e3aSMichael S. Tsirkin  * resize callback to update device state and/or add assertions to detect
145162be4e3aSMichael S. Tsirkin  * misuse, if necessary.
145262be4e3aSMichael S. Tsirkin  */
145362be4e3aSMichael S. Tsirkin int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
145462be4e3aSMichael S. Tsirkin {
145562be4e3aSMichael S. Tsirkin     RAMBlock *block = find_ram_block(base);
145662be4e3aSMichael S. Tsirkin 
145762be4e3aSMichael S. Tsirkin     assert(block);
145862be4e3aSMichael S. Tsirkin 
14594ed023ceSDr. David Alan Gilbert     newsize = HOST_PAGE_ALIGN(newsize);
1460129ddaf3SMichael S. Tsirkin 
146162be4e3aSMichael S. Tsirkin     if (block->used_length == newsize) {
146262be4e3aSMichael S. Tsirkin         return 0;
146362be4e3aSMichael S. Tsirkin     }
146462be4e3aSMichael S. Tsirkin 
146562be4e3aSMichael S. Tsirkin     if (!(block->flags & RAM_RESIZEABLE)) {
146662be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
146762be4e3aSMichael S. Tsirkin                          "Length mismatch: %s: 0x" RAM_ADDR_FMT
146862be4e3aSMichael S. Tsirkin                          " in != 0x" RAM_ADDR_FMT, block->idstr,
146962be4e3aSMichael S. Tsirkin                          newsize, block->used_length);
147062be4e3aSMichael S. Tsirkin         return -EINVAL;
147162be4e3aSMichael S. Tsirkin     }
147262be4e3aSMichael S. Tsirkin 
147362be4e3aSMichael S. Tsirkin     if (block->max_length < newsize) {
147462be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
147562be4e3aSMichael S. Tsirkin                          "Length too large: %s: 0x" RAM_ADDR_FMT
147662be4e3aSMichael S. Tsirkin                          " > 0x" RAM_ADDR_FMT, block->idstr,
147762be4e3aSMichael S. Tsirkin                          newsize, block->max_length);
147862be4e3aSMichael S. Tsirkin         return -EINVAL;
147962be4e3aSMichael S. Tsirkin     }
148062be4e3aSMichael S. Tsirkin 
148162be4e3aSMichael S. Tsirkin     cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
148262be4e3aSMichael S. Tsirkin     block->used_length = newsize;
148358d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
148458d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
148562be4e3aSMichael S. Tsirkin     memory_region_set_size(block->mr, newsize);
148662be4e3aSMichael S. Tsirkin     if (block->resized) {
148762be4e3aSMichael S. Tsirkin         block->resized(block->idstr, newsize, block->host);
148862be4e3aSMichael S. Tsirkin     }
148962be4e3aSMichael S. Tsirkin     return 0;
149062be4e3aSMichael S. Tsirkin }
149162be4e3aSMichael S. Tsirkin 
1492ef701d7bSHu Tao static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1493c5705a77SAvi Kivity {
1494e1c57ab8SPaolo Bonzini     RAMBlock *block;
14950d53d9feSMike Day     RAMBlock *last_block = NULL;
14962152f5caSJuan Quintela     ram_addr_t old_ram_size, new_ram_size;
14972152f5caSJuan Quintela 
14982152f5caSJuan Quintela     old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1499c5705a77SAvi Kivity 
1500b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
15019b8424d5SMichael S. Tsirkin     new_block->offset = find_ram_offset(new_block->max_length);
1502e1c57ab8SPaolo Bonzini 
15030628c182SMarkus Armbruster     if (!new_block->host) {
1504e1c57ab8SPaolo Bonzini         if (xen_enabled()) {
15059b8424d5SMichael S. Tsirkin             xen_ram_alloc(new_block->offset, new_block->max_length,
15069b8424d5SMichael S. Tsirkin                           new_block->mr);
1507e1c57ab8SPaolo Bonzini         } else {
15089b8424d5SMichael S. Tsirkin             new_block->host = phys_mem_alloc(new_block->max_length,
1509a2b257d6SIgor Mammedov                                              &new_block->mr->align);
151039228250SMarkus Armbruster             if (!new_block->host) {
1511ef701d7bSHu Tao                 error_setg_errno(errp, errno,
1512ef701d7bSHu Tao                                  "cannot set up guest memory '%s'",
1513ef701d7bSHu Tao                                  memory_region_name(new_block->mr));
1514ef701d7bSHu Tao                 qemu_mutex_unlock_ramlist();
1515ef701d7bSHu Tao                 return -1;
151639228250SMarkus Armbruster             }
15179b8424d5SMichael S. Tsirkin             memory_try_enable_merging(new_block->host, new_block->max_length);
1518c902760fSMarcelo Tosatti         }
15196977dfe6SYoshiaki Tamura     }
152094a6b54fSpbrook 
1521dd631697SLi Zhijian     new_ram_size = MAX(old_ram_size,
1522dd631697SLi Zhijian               (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1523dd631697SLi Zhijian     if (new_ram_size > old_ram_size) {
1524dd631697SLi Zhijian         migration_bitmap_extend(old_ram_size, new_ram_size);
1525dd631697SLi Zhijian     }
15260d53d9feSMike Day     /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
15270d53d9feSMike Day      * QLIST (which has an RCU-friendly variant) does not have insertion at
15280d53d9feSMike Day      * tail, so save the last element in last_block.
15290d53d9feSMike Day      */
15300dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
15310d53d9feSMike Day         last_block = block;
15329b8424d5SMichael S. Tsirkin         if (block->max_length < new_block->max_length) {
1533abb26d63SPaolo Bonzini             break;
1534abb26d63SPaolo Bonzini         }
1535abb26d63SPaolo Bonzini     }
1536abb26d63SPaolo Bonzini     if (block) {
15370dc3f44aSMike Day         QLIST_INSERT_BEFORE_RCU(block, new_block, next);
15380d53d9feSMike Day     } else if (last_block) {
15390dc3f44aSMike Day         QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
15400d53d9feSMike Day     } else { /* list is empty */
15410dc3f44aSMike Day         QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1542abb26d63SPaolo Bonzini     }
15430d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
154494a6b54fSpbrook 
15450dc3f44aSMike Day     /* Write list before version */
15460dc3f44aSMike Day     smp_wmb();
1547f798b07fSUmesh Deshpande     ram_list.version++;
1548b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1549f798b07fSUmesh Deshpande 
15502152f5caSJuan Quintela     new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
15512152f5caSJuan Quintela 
15522152f5caSJuan Quintela     if (new_ram_size > old_ram_size) {
15531ab4c8ceSJuan Quintela         int i;
1554ae3a7047SMike Day 
1555ae3a7047SMike Day         /* ram_list.dirty_memory[] is protected by the iothread lock.  */
15561ab4c8ceSJuan Quintela         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
15571ab4c8ceSJuan Quintela             ram_list.dirty_memory[i] =
15581ab4c8ceSJuan Quintela                 bitmap_zero_extend(ram_list.dirty_memory[i],
15591ab4c8ceSJuan Quintela                                    old_ram_size, new_ram_size);
15601ab4c8ceSJuan Quintela        }
15612152f5caSJuan Quintela     }
15629b8424d5SMichael S. Tsirkin     cpu_physical_memory_set_dirty_range(new_block->offset,
156358d2707eSPaolo Bonzini                                         new_block->used_length,
156458d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
156594a6b54fSpbrook 
1566a904c911SPaolo Bonzini     if (new_block->host) {
15679b8424d5SMichael S. Tsirkin         qemu_ram_setup_dump(new_block->host, new_block->max_length);
15689b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
15699b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1570e1c57ab8SPaolo Bonzini         if (kvm_enabled()) {
15719b8424d5SMichael S. Tsirkin             kvm_setup_guest_memory(new_block->host, new_block->max_length);
1572e1c57ab8SPaolo Bonzini         }
1573a904c911SPaolo Bonzini     }
15746f0437e8SJan Kiszka 
157594a6b54fSpbrook     return new_block->offset;
157694a6b54fSpbrook }
1577e9a1ab19Sbellard 
15780b183fc8SPaolo Bonzini #ifdef __linux__
1579e1c57ab8SPaolo Bonzini ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1580dbcb8981SPaolo Bonzini                                     bool share, const char *mem_path,
15817f56e740SPaolo Bonzini                                     Error **errp)
1582e1c57ab8SPaolo Bonzini {
1583e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1584ef701d7bSHu Tao     ram_addr_t addr;
1585ef701d7bSHu Tao     Error *local_err = NULL;
1586e1c57ab8SPaolo Bonzini 
1587e1c57ab8SPaolo Bonzini     if (xen_enabled()) {
15887f56e740SPaolo Bonzini         error_setg(errp, "-mem-path not supported with Xen");
15897f56e740SPaolo Bonzini         return -1;
1590e1c57ab8SPaolo Bonzini     }
1591e1c57ab8SPaolo Bonzini 
1592e1c57ab8SPaolo Bonzini     if (phys_mem_alloc != qemu_anon_ram_alloc) {
1593e1c57ab8SPaolo Bonzini         /*
1594e1c57ab8SPaolo Bonzini          * file_ram_alloc() needs to allocate just like
1595e1c57ab8SPaolo Bonzini          * phys_mem_alloc, but we haven't bothered to provide
1596e1c57ab8SPaolo Bonzini          * a hook there.
1597e1c57ab8SPaolo Bonzini          */
15987f56e740SPaolo Bonzini         error_setg(errp,
15997f56e740SPaolo Bonzini                    "-mem-path not supported with this accelerator");
16007f56e740SPaolo Bonzini         return -1;
1601e1c57ab8SPaolo Bonzini     }
1602e1c57ab8SPaolo Bonzini 
16034ed023ceSDr. David Alan Gilbert     size = HOST_PAGE_ALIGN(size);
1604e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1605e1c57ab8SPaolo Bonzini     new_block->mr = mr;
16069b8424d5SMichael S. Tsirkin     new_block->used_length = size;
16079b8424d5SMichael S. Tsirkin     new_block->max_length = size;
1608dbcb8981SPaolo Bonzini     new_block->flags = share ? RAM_SHARED : 0;
16097f56e740SPaolo Bonzini     new_block->host = file_ram_alloc(new_block, size,
16107f56e740SPaolo Bonzini                                      mem_path, errp);
16117f56e740SPaolo Bonzini     if (!new_block->host) {
16127f56e740SPaolo Bonzini         g_free(new_block);
16137f56e740SPaolo Bonzini         return -1;
16147f56e740SPaolo Bonzini     }
16157f56e740SPaolo Bonzini 
1616ef701d7bSHu Tao     addr = ram_block_add(new_block, &local_err);
1617ef701d7bSHu Tao     if (local_err) {
1618ef701d7bSHu Tao         g_free(new_block);
1619ef701d7bSHu Tao         error_propagate(errp, local_err);
1620ef701d7bSHu Tao         return -1;
1621ef701d7bSHu Tao     }
1622ef701d7bSHu Tao     return addr;
1623e1c57ab8SPaolo Bonzini }
16240b183fc8SPaolo Bonzini #endif
1625e1c57ab8SPaolo Bonzini 
162662be4e3aSMichael S. Tsirkin static
162762be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
162862be4e3aSMichael S. Tsirkin                                    void (*resized)(const char*,
162962be4e3aSMichael S. Tsirkin                                                    uint64_t length,
163062be4e3aSMichael S. Tsirkin                                                    void *host),
163162be4e3aSMichael S. Tsirkin                                    void *host, bool resizeable,
1632ef701d7bSHu Tao                                    MemoryRegion *mr, Error **errp)
1633e1c57ab8SPaolo Bonzini {
1634e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1635ef701d7bSHu Tao     ram_addr_t addr;
1636ef701d7bSHu Tao     Error *local_err = NULL;
1637e1c57ab8SPaolo Bonzini 
16384ed023ceSDr. David Alan Gilbert     size = HOST_PAGE_ALIGN(size);
16394ed023ceSDr. David Alan Gilbert     max_size = HOST_PAGE_ALIGN(max_size);
1640e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1641e1c57ab8SPaolo Bonzini     new_block->mr = mr;
164262be4e3aSMichael S. Tsirkin     new_block->resized = resized;
16439b8424d5SMichael S. Tsirkin     new_block->used_length = size;
16449b8424d5SMichael S. Tsirkin     new_block->max_length = max_size;
164562be4e3aSMichael S. Tsirkin     assert(max_size >= size);
1646e1c57ab8SPaolo Bonzini     new_block->fd = -1;
1647e1c57ab8SPaolo Bonzini     new_block->host = host;
1648e1c57ab8SPaolo Bonzini     if (host) {
16497bd4f430SPaolo Bonzini         new_block->flags |= RAM_PREALLOC;
1650e1c57ab8SPaolo Bonzini     }
165162be4e3aSMichael S. Tsirkin     if (resizeable) {
165262be4e3aSMichael S. Tsirkin         new_block->flags |= RAM_RESIZEABLE;
165362be4e3aSMichael S. Tsirkin     }
1654ef701d7bSHu Tao     addr = ram_block_add(new_block, &local_err);
1655ef701d7bSHu Tao     if (local_err) {
1656ef701d7bSHu Tao         g_free(new_block);
1657ef701d7bSHu Tao         error_propagate(errp, local_err);
1658ef701d7bSHu Tao         return -1;
1659ef701d7bSHu Tao     }
1660ef701d7bSHu Tao     return addr;
1661e1c57ab8SPaolo Bonzini }
1662e1c57ab8SPaolo Bonzini 
166362be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
166462be4e3aSMichael S. Tsirkin                                    MemoryRegion *mr, Error **errp)
166562be4e3aSMichael S. Tsirkin {
166662be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
166762be4e3aSMichael S. Tsirkin }
166862be4e3aSMichael S. Tsirkin 
1669ef701d7bSHu Tao ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
16706977dfe6SYoshiaki Tamura {
167162be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
167262be4e3aSMichael S. Tsirkin }
167362be4e3aSMichael S. Tsirkin 
167462be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
167562be4e3aSMichael S. Tsirkin                                      void (*resized)(const char*,
167662be4e3aSMichael S. Tsirkin                                                      uint64_t length,
167762be4e3aSMichael S. Tsirkin                                                      void *host),
167862be4e3aSMichael S. Tsirkin                                      MemoryRegion *mr, Error **errp)
167962be4e3aSMichael S. Tsirkin {
168062be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
16816977dfe6SYoshiaki Tamura }
16826977dfe6SYoshiaki Tamura 
168343771539SPaolo Bonzini static void reclaim_ramblock(RAMBlock *block)
1684e9a1ab19Sbellard {
16857bd4f430SPaolo Bonzini     if (block->flags & RAM_PREALLOC) {
1686cd19cfa2SHuang Ying         ;
1687dfeaf2abSMarkus Armbruster     } else if (xen_enabled()) {
1688dfeaf2abSMarkus Armbruster         xen_invalidate_map_cache_entry(block->host);
1689089f3f76SStefan Weil #ifndef _WIN32
16903435f395SMarkus Armbruster     } else if (block->fd >= 0) {
1691794e8f30SMichael S. Tsirkin         qemu_ram_munmap(block->host, block->max_length);
169204b16653SAlex Williamson         close(block->fd);
1693089f3f76SStefan Weil #endif
169404b16653SAlex Williamson     } else {
16959b8424d5SMichael S. Tsirkin         qemu_anon_ram_free(block->host, block->max_length);
169604b16653SAlex Williamson     }
16977267c094SAnthony Liguori     g_free(block);
169843771539SPaolo Bonzini }
169943771539SPaolo Bonzini 
170043771539SPaolo Bonzini void qemu_ram_free(ram_addr_t addr)
170143771539SPaolo Bonzini {
170243771539SPaolo Bonzini     RAMBlock *block;
170343771539SPaolo Bonzini 
170443771539SPaolo Bonzini     qemu_mutex_lock_ramlist();
17050dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
170643771539SPaolo Bonzini         if (addr == block->offset) {
17070dc3f44aSMike Day             QLIST_REMOVE_RCU(block, next);
170843771539SPaolo Bonzini             ram_list.mru_block = NULL;
17090dc3f44aSMike Day             /* Write list before version */
17100dc3f44aSMike Day             smp_wmb();
171143771539SPaolo Bonzini             ram_list.version++;
171243771539SPaolo Bonzini             call_rcu(block, reclaim_ramblock, rcu);
1713b2a8658eSUmesh Deshpande             break;
171404b16653SAlex Williamson         }
171504b16653SAlex Williamson     }
1716b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1717e9a1ab19Sbellard }
1718e9a1ab19Sbellard 
1719cd19cfa2SHuang Ying #ifndef _WIN32
1720cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1721cd19cfa2SHuang Ying {
1722cd19cfa2SHuang Ying     RAMBlock *block;
1723cd19cfa2SHuang Ying     ram_addr_t offset;
1724cd19cfa2SHuang Ying     int flags;
1725cd19cfa2SHuang Ying     void *area, *vaddr;
1726cd19cfa2SHuang Ying 
17270dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1728cd19cfa2SHuang Ying         offset = addr - block->offset;
17299b8424d5SMichael S. Tsirkin         if (offset < block->max_length) {
17301240be24SMichael S. Tsirkin             vaddr = ramblock_ptr(block, offset);
17317bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1732cd19cfa2SHuang Ying                 ;
1733dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1734dfeaf2abSMarkus Armbruster                 abort();
1735cd19cfa2SHuang Ying             } else {
1736cd19cfa2SHuang Ying                 flags = MAP_FIXED;
17373435f395SMarkus Armbruster                 if (block->fd >= 0) {
1738dbcb8981SPaolo Bonzini                     flags |= (block->flags & RAM_SHARED ?
1739dbcb8981SPaolo Bonzini                               MAP_SHARED : MAP_PRIVATE);
1740cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1741cd19cfa2SHuang Ying                                 flags, block->fd, offset);
1742cd19cfa2SHuang Ying                 } else {
17432eb9fbaaSMarkus Armbruster                     /*
17442eb9fbaaSMarkus Armbruster                      * Remap needs to match alloc.  Accelerators that
17452eb9fbaaSMarkus Armbruster                      * set phys_mem_alloc never remap.  If they did,
17462eb9fbaaSMarkus Armbruster                      * we'd need a remap hook here.
17472eb9fbaaSMarkus Armbruster                      */
17482eb9fbaaSMarkus Armbruster                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
17492eb9fbaaSMarkus Armbruster 
1750cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1751cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1752cd19cfa2SHuang Ying                                 flags, -1, 0);
1753cd19cfa2SHuang Ying                 }
1754cd19cfa2SHuang Ying                 if (area != vaddr) {
1755f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1756f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1757cd19cfa2SHuang Ying                             length, addr);
1758cd19cfa2SHuang Ying                     exit(1);
1759cd19cfa2SHuang Ying                 }
17608490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1761ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1762cd19cfa2SHuang Ying             }
1763cd19cfa2SHuang Ying         }
1764cd19cfa2SHuang Ying     }
1765cd19cfa2SHuang Ying }
1766cd19cfa2SHuang Ying #endif /* !_WIN32 */
1767cd19cfa2SHuang Ying 
1768a35ba7beSPaolo Bonzini int qemu_get_ram_fd(ram_addr_t addr)
1769a35ba7beSPaolo Bonzini {
1770ae3a7047SMike Day     RAMBlock *block;
1771ae3a7047SMike Day     int fd;
1772a35ba7beSPaolo Bonzini 
17730dc3f44aSMike Day     rcu_read_lock();
1774ae3a7047SMike Day     block = qemu_get_ram_block(addr);
1775ae3a7047SMike Day     fd = block->fd;
17760dc3f44aSMike Day     rcu_read_unlock();
1777ae3a7047SMike Day     return fd;
1778a35ba7beSPaolo Bonzini }
1779a35ba7beSPaolo Bonzini 
178056a571d9STetsuya Mukawa void qemu_set_ram_fd(ram_addr_t addr, int fd)
178156a571d9STetsuya Mukawa {
178256a571d9STetsuya Mukawa     RAMBlock *block;
178356a571d9STetsuya Mukawa 
178456a571d9STetsuya Mukawa     rcu_read_lock();
178556a571d9STetsuya Mukawa     block = qemu_get_ram_block(addr);
178656a571d9STetsuya Mukawa     block->fd = fd;
178756a571d9STetsuya Mukawa     rcu_read_unlock();
178856a571d9STetsuya Mukawa }
178956a571d9STetsuya Mukawa 
17903fd74b84SDamjan Marion void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
17913fd74b84SDamjan Marion {
1792ae3a7047SMike Day     RAMBlock *block;
1793ae3a7047SMike Day     void *ptr;
17943fd74b84SDamjan Marion 
17950dc3f44aSMike Day     rcu_read_lock();
1796ae3a7047SMike Day     block = qemu_get_ram_block(addr);
1797ae3a7047SMike Day     ptr = ramblock_ptr(block, 0);
17980dc3f44aSMike Day     rcu_read_unlock();
1799ae3a7047SMike Day     return ptr;
18003fd74b84SDamjan Marion }
18013fd74b84SDamjan Marion 
18021b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.
1803ae3a7047SMike Day  * This should not be used for general purpose DMA.  Use address_space_map
1804ae3a7047SMike Day  * or address_space_rw instead. For local memory (e.g. video ram) that the
1805ae3a7047SMike Day  * device owns, use memory_region_get_ram_ptr.
18060dc3f44aSMike Day  *
180749b24afcSPaolo Bonzini  * Called within RCU critical section.
18081b5ec234SPaolo Bonzini  */
18091b5ec234SPaolo Bonzini void *qemu_get_ram_ptr(ram_addr_t addr)
18101b5ec234SPaolo Bonzini {
181149b24afcSPaolo Bonzini     RAMBlock *block = qemu_get_ram_block(addr);
1812ae3a7047SMike Day 
1813ae3a7047SMike Day     if (xen_enabled() && block->host == NULL) {
1814432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1815432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1816712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1817432d268cSJun Nakajima          */
1818432d268cSJun Nakajima         if (block->offset == 0) {
181949b24afcSPaolo Bonzini             return xen_map_cache(addr, 0, 0);
1820432d268cSJun Nakajima         }
1821ae3a7047SMike Day 
1822ae3a7047SMike Day         block->host = xen_map_cache(block->offset, block->max_length, 1);
1823432d268cSJun Nakajima     }
182449b24afcSPaolo Bonzini     return ramblock_ptr(block, addr - block->offset);
182594a6b54fSpbrook }
1826f471a17eSAlex Williamson 
182738bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1828ae3a7047SMike Day  * but takes a size argument.
18290dc3f44aSMike Day  *
1830e81bcda5SPaolo Bonzini  * Called within RCU critical section.
1831ae3a7047SMike Day  */
1832cb85f7abSPeter Maydell static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
183338bee5dcSStefano Stabellini {
1834e81bcda5SPaolo Bonzini     RAMBlock *block;
1835e81bcda5SPaolo Bonzini     ram_addr_t offset_inside_block;
18368ab934f9SStefano Stabellini     if (*size == 0) {
18378ab934f9SStefano Stabellini         return NULL;
18388ab934f9SStefano Stabellini     }
1839e81bcda5SPaolo Bonzini 
1840e81bcda5SPaolo Bonzini     block = qemu_get_ram_block(addr);
1841e81bcda5SPaolo Bonzini     offset_inside_block = addr - block->offset;
1842e81bcda5SPaolo Bonzini     *size = MIN(*size, block->max_length - offset_inside_block);
1843e81bcda5SPaolo Bonzini 
1844e81bcda5SPaolo Bonzini     if (xen_enabled() && block->host == NULL) {
1845e81bcda5SPaolo Bonzini         /* We need to check if the requested address is in the RAM
1846e81bcda5SPaolo Bonzini          * because we don't want to map the entire memory in QEMU.
1847e81bcda5SPaolo Bonzini          * In that case just map the requested area.
1848e81bcda5SPaolo Bonzini          */
1849e81bcda5SPaolo Bonzini         if (block->offset == 0) {
1850e41d7c69SJan Kiszka             return xen_map_cache(addr, *size, 1);
185138bee5dcSStefano Stabellini         }
185238bee5dcSStefano Stabellini 
1853e81bcda5SPaolo Bonzini         block->host = xen_map_cache(block->offset, block->max_length, 1);
185438bee5dcSStefano Stabellini     }
1855e81bcda5SPaolo Bonzini 
1856e81bcda5SPaolo Bonzini     return ramblock_ptr(block, offset_inside_block);
185738bee5dcSStefano Stabellini }
185838bee5dcSStefano Stabellini 
1859422148d3SDr. David Alan Gilbert /*
1860422148d3SDr. David Alan Gilbert  * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1861422148d3SDr. David Alan Gilbert  * in that RAMBlock.
1862422148d3SDr. David Alan Gilbert  *
1863422148d3SDr. David Alan Gilbert  * ptr: Host pointer to look up
1864422148d3SDr. David Alan Gilbert  * round_offset: If true round the result offset down to a page boundary
1865422148d3SDr. David Alan Gilbert  * *ram_addr: set to result ram_addr
1866422148d3SDr. David Alan Gilbert  * *offset: set to result offset within the RAMBlock
1867422148d3SDr. David Alan Gilbert  *
1868422148d3SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
1869ae3a7047SMike Day  *
1870ae3a7047SMike Day  * By the time this function returns, the returned pointer is not protected
1871ae3a7047SMike Day  * by RCU anymore.  If the caller is not within an RCU critical section and
1872ae3a7047SMike Day  * does not hold the iothread lock, it must have other means of protecting the
1873ae3a7047SMike Day  * pointer, such as a reference to the region that includes the incoming
1874ae3a7047SMike Day  * ram_addr_t.
1875ae3a7047SMike Day  */
1876422148d3SDr. David Alan Gilbert RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1877422148d3SDr. David Alan Gilbert                                    ram_addr_t *ram_addr,
1878422148d3SDr. David Alan Gilbert                                    ram_addr_t *offset)
18795579c7f3Spbrook {
188094a6b54fSpbrook     RAMBlock *block;
188194a6b54fSpbrook     uint8_t *host = ptr;
188294a6b54fSpbrook 
1883868bb33fSJan Kiszka     if (xen_enabled()) {
18840dc3f44aSMike Day         rcu_read_lock();
1885e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
1886422148d3SDr. David Alan Gilbert         block = qemu_get_ram_block(*ram_addr);
1887422148d3SDr. David Alan Gilbert         if (block) {
1888422148d3SDr. David Alan Gilbert             *offset = (host - block->host);
1889422148d3SDr. David Alan Gilbert         }
18900dc3f44aSMike Day         rcu_read_unlock();
1891422148d3SDr. David Alan Gilbert         return block;
1892712c2b41SStefano Stabellini     }
1893712c2b41SStefano Stabellini 
18940dc3f44aSMike Day     rcu_read_lock();
18950dc3f44aSMike Day     block = atomic_rcu_read(&ram_list.mru_block);
18969b8424d5SMichael S. Tsirkin     if (block && block->host && host - block->host < block->max_length) {
189723887b79SPaolo Bonzini         goto found;
189823887b79SPaolo Bonzini     }
189923887b79SPaolo Bonzini 
19000dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1901432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1902432d268cSJun Nakajima         if (block->host == NULL) {
1903432d268cSJun Nakajima             continue;
1904432d268cSJun Nakajima         }
19059b8424d5SMichael S. Tsirkin         if (host - block->host < block->max_length) {
190623887b79SPaolo Bonzini             goto found;
190794a6b54fSpbrook         }
1908f471a17eSAlex Williamson     }
1909432d268cSJun Nakajima 
19100dc3f44aSMike Day     rcu_read_unlock();
19111b5ec234SPaolo Bonzini     return NULL;
191223887b79SPaolo Bonzini 
191323887b79SPaolo Bonzini found:
1914422148d3SDr. David Alan Gilbert     *offset = (host - block->host);
1915422148d3SDr. David Alan Gilbert     if (round_offset) {
1916422148d3SDr. David Alan Gilbert         *offset &= TARGET_PAGE_MASK;
1917422148d3SDr. David Alan Gilbert     }
1918422148d3SDr. David Alan Gilbert     *ram_addr = block->offset + *offset;
19190dc3f44aSMike Day     rcu_read_unlock();
1920422148d3SDr. David Alan Gilbert     return block;
1921422148d3SDr. David Alan Gilbert }
1922422148d3SDr. David Alan Gilbert 
1923e3dd7493SDr. David Alan Gilbert /*
1924e3dd7493SDr. David Alan Gilbert  * Finds the named RAMBlock
1925e3dd7493SDr. David Alan Gilbert  *
1926e3dd7493SDr. David Alan Gilbert  * name: The name of RAMBlock to find
1927e3dd7493SDr. David Alan Gilbert  *
1928e3dd7493SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
1929e3dd7493SDr. David Alan Gilbert  */
1930e3dd7493SDr. David Alan Gilbert RAMBlock *qemu_ram_block_by_name(const char *name)
1931e3dd7493SDr. David Alan Gilbert {
1932e3dd7493SDr. David Alan Gilbert     RAMBlock *block;
1933e3dd7493SDr. David Alan Gilbert 
1934e3dd7493SDr. David Alan Gilbert     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1935e3dd7493SDr. David Alan Gilbert         if (!strcmp(name, block->idstr)) {
1936e3dd7493SDr. David Alan Gilbert             return block;
1937e3dd7493SDr. David Alan Gilbert         }
1938e3dd7493SDr. David Alan Gilbert     }
1939e3dd7493SDr. David Alan Gilbert 
1940e3dd7493SDr. David Alan Gilbert     return NULL;
1941e3dd7493SDr. David Alan Gilbert }
1942e3dd7493SDr. David Alan Gilbert 
1943422148d3SDr. David Alan Gilbert /* Some of the softmmu routines need to translate from a host pointer
1944422148d3SDr. David Alan Gilbert    (typically a TLB entry) back to a ram offset.  */
1945422148d3SDr. David Alan Gilbert MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1946422148d3SDr. David Alan Gilbert {
1947422148d3SDr. David Alan Gilbert     RAMBlock *block;
1948422148d3SDr. David Alan Gilbert     ram_addr_t offset; /* Not used */
1949422148d3SDr. David Alan Gilbert 
1950422148d3SDr. David Alan Gilbert     block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1951422148d3SDr. David Alan Gilbert 
1952422148d3SDr. David Alan Gilbert     if (!block) {
1953422148d3SDr. David Alan Gilbert         return NULL;
1954422148d3SDr. David Alan Gilbert     }
1955422148d3SDr. David Alan Gilbert 
1956422148d3SDr. David Alan Gilbert     return block->mr;
1957e890261fSMarcelo Tosatti }
1958f471a17eSAlex Williamson 
195949b24afcSPaolo Bonzini /* Called within RCU critical section.  */
1960a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
19610e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
19621ccde1cbSbellard {
196352159192SJuan Quintela     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
19640e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
19653a7d929eSbellard     }
19660e0df1e2SAvi Kivity     switch (size) {
19670e0df1e2SAvi Kivity     case 1:
19685579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
19690e0df1e2SAvi Kivity         break;
19700e0df1e2SAvi Kivity     case 2:
19715579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
19720e0df1e2SAvi Kivity         break;
19730e0df1e2SAvi Kivity     case 4:
19745579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
19750e0df1e2SAvi Kivity         break;
19760e0df1e2SAvi Kivity     default:
19770e0df1e2SAvi Kivity         abort();
19780e0df1e2SAvi Kivity     }
197958d2707eSPaolo Bonzini     /* Set both VGA and migration bits for simplicity and to remove
198058d2707eSPaolo Bonzini      * the notdirty callback faster.
198158d2707eSPaolo Bonzini      */
198258d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(ram_addr, size,
198358d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_NOCODE);
1984f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1985f23db169Sbellard        flushed */
1986a2cd8c85SJuan Quintela     if (!cpu_physical_memory_is_clean(ram_addr)) {
1987bcae01e4SPeter Crosthwaite         tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
19884917cf44SAndreas Färber     }
19891ccde1cbSbellard }
19901ccde1cbSbellard 
1991b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1992b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
1993b018ddf6SPaolo Bonzini {
1994b018ddf6SPaolo Bonzini     return is_write;
1995b018ddf6SPaolo Bonzini }
1996b018ddf6SPaolo Bonzini 
19970e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
19980e0df1e2SAvi Kivity     .write = notdirty_mem_write,
1999b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
20000e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
20011ccde1cbSbellard };
20021ccde1cbSbellard 
20030f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
200466b9b43cSPeter Maydell static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
20050f459d16Spbrook {
200693afeadeSAndreas Färber     CPUState *cpu = current_cpu;
200793afeadeSAndreas Färber     CPUArchState *env = cpu->env_ptr;
200806d55cc1Saliguori     target_ulong pc, cs_base;
20090f459d16Spbrook     target_ulong vaddr;
2010a1d1bb31Saliguori     CPUWatchpoint *wp;
201106d55cc1Saliguori     int cpu_flags;
20120f459d16Spbrook 
2013ff4700b0SAndreas Färber     if (cpu->watchpoint_hit) {
201406d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
201506d55cc1Saliguori          * the debug interrupt so that is will trigger after the
201606d55cc1Saliguori          * current instruction. */
201793afeadeSAndreas Färber         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
201806d55cc1Saliguori         return;
201906d55cc1Saliguori     }
202093afeadeSAndreas Färber     vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2021ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
202205068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, len)
202305068c0dSPeter Maydell             && (wp->flags & flags)) {
202408225676SPeter Maydell             if (flags == BP_MEM_READ) {
202508225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_READ;
202608225676SPeter Maydell             } else {
202708225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
202808225676SPeter Maydell             }
202908225676SPeter Maydell             wp->hitaddr = vaddr;
203066b9b43cSPeter Maydell             wp->hitattrs = attrs;
2031ff4700b0SAndreas Färber             if (!cpu->watchpoint_hit) {
2032ff4700b0SAndreas Färber                 cpu->watchpoint_hit = wp;
2033239c51a5SAndreas Färber                 tb_check_watchpoint(cpu);
203406d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
203527103424SAndreas Färber                     cpu->exception_index = EXCP_DEBUG;
20365638d180SAndreas Färber                     cpu_loop_exit(cpu);
203706d55cc1Saliguori                 } else {
203806d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2039648f034cSAndreas Färber                     tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
20400ea8cb88SAndreas Färber                     cpu_resume_from_signal(cpu, NULL);
20410f459d16Spbrook                 }
2042488d6577SMax Filippov             }
20436e140f28Saliguori         } else {
20446e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
20456e140f28Saliguori         }
20460f459d16Spbrook     }
20470f459d16Spbrook }
20480f459d16Spbrook 
20496658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
20506658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
20516658ffb8Spbrook    phys routines.  */
205266b9b43cSPeter Maydell static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
205366b9b43cSPeter Maydell                                   unsigned size, MemTxAttrs attrs)
20546658ffb8Spbrook {
205566b9b43cSPeter Maydell     MemTxResult res;
205666b9b43cSPeter Maydell     uint64_t data;
20576658ffb8Spbrook 
205866b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
20591ec9b909SAvi Kivity     switch (size) {
206067364150SMax Filippov     case 1:
206166b9b43cSPeter Maydell         data = address_space_ldub(&address_space_memory, addr, attrs, &res);
206267364150SMax Filippov         break;
206367364150SMax Filippov     case 2:
206466b9b43cSPeter Maydell         data = address_space_lduw(&address_space_memory, addr, attrs, &res);
206567364150SMax Filippov         break;
206667364150SMax Filippov     case 4:
206766b9b43cSPeter Maydell         data = address_space_ldl(&address_space_memory, addr, attrs, &res);
206867364150SMax Filippov         break;
20691ec9b909SAvi Kivity     default: abort();
20701ec9b909SAvi Kivity     }
207166b9b43cSPeter Maydell     *pdata = data;
207266b9b43cSPeter Maydell     return res;
207366b9b43cSPeter Maydell }
207466b9b43cSPeter Maydell 
207566b9b43cSPeter Maydell static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
207666b9b43cSPeter Maydell                                    uint64_t val, unsigned size,
207766b9b43cSPeter Maydell                                    MemTxAttrs attrs)
207866b9b43cSPeter Maydell {
207966b9b43cSPeter Maydell     MemTxResult res;
208066b9b43cSPeter Maydell 
208166b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
208266b9b43cSPeter Maydell     switch (size) {
208366b9b43cSPeter Maydell     case 1:
208466b9b43cSPeter Maydell         address_space_stb(&address_space_memory, addr, val, attrs, &res);
208566b9b43cSPeter Maydell         break;
208666b9b43cSPeter Maydell     case 2:
208766b9b43cSPeter Maydell         address_space_stw(&address_space_memory, addr, val, attrs, &res);
208866b9b43cSPeter Maydell         break;
208966b9b43cSPeter Maydell     case 4:
209066b9b43cSPeter Maydell         address_space_stl(&address_space_memory, addr, val, attrs, &res);
209166b9b43cSPeter Maydell         break;
209266b9b43cSPeter Maydell     default: abort();
209366b9b43cSPeter Maydell     }
209466b9b43cSPeter Maydell     return res;
20956658ffb8Spbrook }
20966658ffb8Spbrook 
20971ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
209866b9b43cSPeter Maydell     .read_with_attrs = watch_mem_read,
209966b9b43cSPeter Maydell     .write_with_attrs = watch_mem_write,
21001ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
21016658ffb8Spbrook };
21026658ffb8Spbrook 
2103f25a49e0SPeter Maydell static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2104f25a49e0SPeter Maydell                                 unsigned len, MemTxAttrs attrs)
2105db7b5426Sblueswir1 {
2106acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2107ff6cff75SPaolo Bonzini     uint8_t buf[8];
21085c9eb028SPeter Maydell     MemTxResult res;
2109791af8c8SPaolo Bonzini 
2110db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2111016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2112acc9d80bSJan Kiszka            subpage, len, addr);
2113db7b5426Sblueswir1 #endif
21145c9eb028SPeter Maydell     res = address_space_read(subpage->as, addr + subpage->base,
21155c9eb028SPeter Maydell                              attrs, buf, len);
21165c9eb028SPeter Maydell     if (res) {
21175c9eb028SPeter Maydell         return res;
2118f25a49e0SPeter Maydell     }
2119acc9d80bSJan Kiszka     switch (len) {
2120acc9d80bSJan Kiszka     case 1:
2121f25a49e0SPeter Maydell         *data = ldub_p(buf);
2122f25a49e0SPeter Maydell         return MEMTX_OK;
2123acc9d80bSJan Kiszka     case 2:
2124f25a49e0SPeter Maydell         *data = lduw_p(buf);
2125f25a49e0SPeter Maydell         return MEMTX_OK;
2126acc9d80bSJan Kiszka     case 4:
2127f25a49e0SPeter Maydell         *data = ldl_p(buf);
2128f25a49e0SPeter Maydell         return MEMTX_OK;
2129ff6cff75SPaolo Bonzini     case 8:
2130f25a49e0SPeter Maydell         *data = ldq_p(buf);
2131f25a49e0SPeter Maydell         return MEMTX_OK;
2132acc9d80bSJan Kiszka     default:
2133acc9d80bSJan Kiszka         abort();
2134acc9d80bSJan Kiszka     }
2135db7b5426Sblueswir1 }
2136db7b5426Sblueswir1 
2137f25a49e0SPeter Maydell static MemTxResult subpage_write(void *opaque, hwaddr addr,
2138f25a49e0SPeter Maydell                                  uint64_t value, unsigned len, MemTxAttrs attrs)
2139db7b5426Sblueswir1 {
2140acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2141ff6cff75SPaolo Bonzini     uint8_t buf[8];
2142acc9d80bSJan Kiszka 
2143db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2144016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2145acc9d80bSJan Kiszka            " value %"PRIx64"\n",
2146acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
2147db7b5426Sblueswir1 #endif
2148acc9d80bSJan Kiszka     switch (len) {
2149acc9d80bSJan Kiszka     case 1:
2150acc9d80bSJan Kiszka         stb_p(buf, value);
2151acc9d80bSJan Kiszka         break;
2152acc9d80bSJan Kiszka     case 2:
2153acc9d80bSJan Kiszka         stw_p(buf, value);
2154acc9d80bSJan Kiszka         break;
2155acc9d80bSJan Kiszka     case 4:
2156acc9d80bSJan Kiszka         stl_p(buf, value);
2157acc9d80bSJan Kiszka         break;
2158ff6cff75SPaolo Bonzini     case 8:
2159ff6cff75SPaolo Bonzini         stq_p(buf, value);
2160ff6cff75SPaolo Bonzini         break;
2161acc9d80bSJan Kiszka     default:
2162acc9d80bSJan Kiszka         abort();
2163acc9d80bSJan Kiszka     }
21645c9eb028SPeter Maydell     return address_space_write(subpage->as, addr + subpage->base,
21655c9eb028SPeter Maydell                                attrs, buf, len);
2166db7b5426Sblueswir1 }
2167db7b5426Sblueswir1 
2168c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
2169016e9d62SAmos Kong                             unsigned len, bool is_write)
2170c353e4ccSPaolo Bonzini {
2171acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2172c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
2173016e9d62SAmos Kong     printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2174acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
2175c353e4ccSPaolo Bonzini #endif
2176c353e4ccSPaolo Bonzini 
2177acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
2178016e9d62SAmos Kong                                       len, is_write);
2179c353e4ccSPaolo Bonzini }
2180c353e4ccSPaolo Bonzini 
218170c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
2182f25a49e0SPeter Maydell     .read_with_attrs = subpage_read,
2183f25a49e0SPeter Maydell     .write_with_attrs = subpage_write,
2184ff6cff75SPaolo Bonzini     .impl.min_access_size = 1,
2185ff6cff75SPaolo Bonzini     .impl.max_access_size = 8,
2186ff6cff75SPaolo Bonzini     .valid.min_access_size = 1,
2187ff6cff75SPaolo Bonzini     .valid.max_access_size = 8,
2188c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
218970c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
2190db7b5426Sblueswir1 };
2191db7b5426Sblueswir1 
2192c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
21935312bd8bSAvi Kivity                              uint16_t section)
2194db7b5426Sblueswir1 {
2195db7b5426Sblueswir1     int idx, eidx;
2196db7b5426Sblueswir1 
2197db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2198db7b5426Sblueswir1         return -1;
2199db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2200db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2201db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2202016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2203016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
2204db7b5426Sblueswir1 #endif
2205db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
22065312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
2207db7b5426Sblueswir1     }
2208db7b5426Sblueswir1 
2209db7b5426Sblueswir1     return 0;
2210db7b5426Sblueswir1 }
2211db7b5426Sblueswir1 
2212acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2213db7b5426Sblueswir1 {
2214c227f099SAnthony Liguori     subpage_t *mmio;
2215db7b5426Sblueswir1 
22167267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
22171eec614bSaliguori 
2218acc9d80bSJan Kiszka     mmio->as = as;
2219db7b5426Sblueswir1     mmio->base = base;
22202c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2221b4fefef9SPeter Crosthwaite                           NULL, TARGET_PAGE_SIZE);
2222b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
2223db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2224016e9d62SAmos Kong     printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2225016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
2226db7b5426Sblueswir1 #endif
2227b41aac4fSLiu Ping Fan     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2228db7b5426Sblueswir1 
2229db7b5426Sblueswir1     return mmio;
2230db7b5426Sblueswir1 }
2231db7b5426Sblueswir1 
2232a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2233a656e22fSPeter Crosthwaite                               MemoryRegion *mr)
22345312bd8bSAvi Kivity {
2235a656e22fSPeter Crosthwaite     assert(as);
22365312bd8bSAvi Kivity     MemoryRegionSection section = {
2237a656e22fSPeter Crosthwaite         .address_space = as,
22385312bd8bSAvi Kivity         .mr = mr,
22395312bd8bSAvi Kivity         .offset_within_address_space = 0,
22405312bd8bSAvi Kivity         .offset_within_region = 0,
2241052e87b0SPaolo Bonzini         .size = int128_2_64(),
22425312bd8bSAvi Kivity     };
22435312bd8bSAvi Kivity 
224453cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
22455312bd8bSAvi Kivity }
22465312bd8bSAvi Kivity 
2247a54c87b6SPeter Maydell MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2248aa102231SAvi Kivity {
2249a54c87b6SPeter Maydell     int asidx = cpu_asidx_from_attrs(cpu, attrs);
2250a54c87b6SPeter Maydell     CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
225132857f4dSPeter Maydell     AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
225279e2b9aeSPaolo Bonzini     MemoryRegionSection *sections = d->map.sections;
22539d82b5a7SPaolo Bonzini 
22549d82b5a7SPaolo Bonzini     return sections[index & ~TARGET_PAGE_MASK].mr;
2255aa102231SAvi Kivity }
2256aa102231SAvi Kivity 
2257e9179ce1SAvi Kivity static void io_mem_init(void)
2258e9179ce1SAvi Kivity {
22591f6245e5SPaolo Bonzini     memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
22602c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
22611f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
22622c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
22631f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
22642c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
22651f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
2266e9179ce1SAvi Kivity }
2267e9179ce1SAvi Kivity 
2268ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
2269ac1970fbSAvi Kivity {
227089ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
227153cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
227253cb28cbSMarcel Apfelbaum     uint16_t n;
227353cb28cbSMarcel Apfelbaum 
2274a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_unassigned);
227553cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
2276a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_notdirty);
227753cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_NOTDIRTY);
2278a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_rom);
227953cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_ROM);
2280a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_watch);
228153cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_WATCH);
228200752703SPaolo Bonzini 
22839736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
228400752703SPaolo Bonzini     d->as = as;
228500752703SPaolo Bonzini     as->next_dispatch = d;
228600752703SPaolo Bonzini }
228700752703SPaolo Bonzini 
228879e2b9aeSPaolo Bonzini static void address_space_dispatch_free(AddressSpaceDispatch *d)
228979e2b9aeSPaolo Bonzini {
229079e2b9aeSPaolo Bonzini     phys_sections_free(&d->map);
229179e2b9aeSPaolo Bonzini     g_free(d);
229279e2b9aeSPaolo Bonzini }
229379e2b9aeSPaolo Bonzini 
229400752703SPaolo Bonzini static void mem_commit(MemoryListener *listener)
229500752703SPaolo Bonzini {
229600752703SPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
22970475d94fSPaolo Bonzini     AddressSpaceDispatch *cur = as->dispatch;
22980475d94fSPaolo Bonzini     AddressSpaceDispatch *next = as->next_dispatch;
2299ac1970fbSAvi Kivity 
230053cb28cbSMarcel Apfelbaum     phys_page_compact_all(next, next->map.nodes_nb);
2301b35ba30fSMichael S. Tsirkin 
230279e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, next);
230353cb28cbSMarcel Apfelbaum     if (cur) {
230479e2b9aeSPaolo Bonzini         call_rcu(cur, address_space_dispatch_free, rcu);
2305ac1970fbSAvi Kivity     }
23069affd6fcSPaolo Bonzini }
23079affd6fcSPaolo Bonzini 
23081d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
230950c1e149SAvi Kivity {
231032857f4dSPeter Maydell     CPUAddressSpace *cpuas;
231132857f4dSPeter Maydell     AddressSpaceDispatch *d;
2312117712c3SAvi Kivity 
2313117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
2314117712c3SAvi Kivity        reset the modified entries */
231532857f4dSPeter Maydell     cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
231632857f4dSPeter Maydell     cpu_reloading_memory_map();
231732857f4dSPeter Maydell     /* The CPU and TLB are protected by the iothread lock.
231832857f4dSPeter Maydell      * We reload the dispatch pointer now because cpu_reloading_memory_map()
231932857f4dSPeter Maydell      * may have split the RCU critical section.
232032857f4dSPeter Maydell      */
232132857f4dSPeter Maydell     d = atomic_rcu_read(&cpuas->as->dispatch);
232232857f4dSPeter Maydell     cpuas->memory_dispatch = d;
232332857f4dSPeter Maydell     tlb_flush(cpuas->cpu, 1);
232450c1e149SAvi Kivity }
232550c1e149SAvi Kivity 
2326ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
2327ac1970fbSAvi Kivity {
232800752703SPaolo Bonzini     as->dispatch = NULL;
232989ae337aSPaolo Bonzini     as->dispatch_listener = (MemoryListener) {
2330ac1970fbSAvi Kivity         .begin = mem_begin,
233100752703SPaolo Bonzini         .commit = mem_commit,
2332ac1970fbSAvi Kivity         .region_add = mem_add,
2333ac1970fbSAvi Kivity         .region_nop = mem_add,
2334ac1970fbSAvi Kivity         .priority = 0,
2335ac1970fbSAvi Kivity     };
233689ae337aSPaolo Bonzini     memory_listener_register(&as->dispatch_listener, as);
2337ac1970fbSAvi Kivity }
2338ac1970fbSAvi Kivity 
23396e48e8f9SPaolo Bonzini void address_space_unregister(AddressSpace *as)
23406e48e8f9SPaolo Bonzini {
23416e48e8f9SPaolo Bonzini     memory_listener_unregister(&as->dispatch_listener);
23426e48e8f9SPaolo Bonzini }
23436e48e8f9SPaolo Bonzini 
234483f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
234583f3c251SAvi Kivity {
234683f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
234783f3c251SAvi Kivity 
234879e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, NULL);
234979e2b9aeSPaolo Bonzini     if (d) {
235079e2b9aeSPaolo Bonzini         call_rcu(d, address_space_dispatch_free, rcu);
235179e2b9aeSPaolo Bonzini     }
235283f3c251SAvi Kivity }
235383f3c251SAvi Kivity 
235462152b8aSAvi Kivity static void memory_map_init(void)
235562152b8aSAvi Kivity {
23567267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
235703f49957SPaolo Bonzini 
235857271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
23597dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
2360309cb471SAvi Kivity 
23617267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
23623bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
23633bb28b72SJan Kiszka                           65536);
23647dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
23652641689aSliguang }
236662152b8aSAvi Kivity 
236762152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
236862152b8aSAvi Kivity {
236962152b8aSAvi Kivity     return system_memory;
237062152b8aSAvi Kivity }
237162152b8aSAvi Kivity 
2372309cb471SAvi Kivity MemoryRegion *get_system_io(void)
2373309cb471SAvi Kivity {
2374309cb471SAvi Kivity     return system_io;
2375309cb471SAvi Kivity }
2376309cb471SAvi Kivity 
2377e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2378e2eef170Spbrook 
237913eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
238013eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
2381f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2382a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
238313eb76e0Sbellard {
238413eb76e0Sbellard     int l, flags;
238513eb76e0Sbellard     target_ulong page;
238653a5960aSpbrook     void * p;
238713eb76e0Sbellard 
238813eb76e0Sbellard     while (len > 0) {
238913eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
239013eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
239113eb76e0Sbellard         if (l > len)
239213eb76e0Sbellard             l = len;
239313eb76e0Sbellard         flags = page_get_flags(page);
239413eb76e0Sbellard         if (!(flags & PAGE_VALID))
2395a68fe89cSPaul Brook             return -1;
239613eb76e0Sbellard         if (is_write) {
239713eb76e0Sbellard             if (!(flags & PAGE_WRITE))
2398a68fe89cSPaul Brook                 return -1;
2399579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
240072fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2401a68fe89cSPaul Brook                 return -1;
240272fb7daaSaurel32             memcpy(p, buf, l);
240372fb7daaSaurel32             unlock_user(p, addr, l);
240413eb76e0Sbellard         } else {
240513eb76e0Sbellard             if (!(flags & PAGE_READ))
2406a68fe89cSPaul Brook                 return -1;
2407579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
240872fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2409a68fe89cSPaul Brook                 return -1;
241072fb7daaSaurel32             memcpy(buf, p, l);
24115b257578Saurel32             unlock_user(p, addr, 0);
241213eb76e0Sbellard         }
241313eb76e0Sbellard         len -= l;
241413eb76e0Sbellard         buf += l;
241513eb76e0Sbellard         addr += l;
241613eb76e0Sbellard     }
2417a68fe89cSPaul Brook     return 0;
241813eb76e0Sbellard }
24198df1cd07Sbellard 
242013eb76e0Sbellard #else
242151d7a9ebSAnthony PERARD 
2422845b6214SPaolo Bonzini static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2423a8170e5eSAvi Kivity                                      hwaddr length)
242451d7a9ebSAnthony PERARD {
2425845b6214SPaolo Bonzini     uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2426e87f7778SPaolo Bonzini     /* No early return if dirty_log_mask is or becomes 0, because
2427e87f7778SPaolo Bonzini      * cpu_physical_memory_set_dirty_range will still call
2428e87f7778SPaolo Bonzini      * xen_modified_memory.
2429e87f7778SPaolo Bonzini      */
2430e87f7778SPaolo Bonzini     if (dirty_log_mask) {
2431e87f7778SPaolo Bonzini         dirty_log_mask =
2432e87f7778SPaolo Bonzini             cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2433e87f7778SPaolo Bonzini     }
2434845b6214SPaolo Bonzini     if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
243535865339SPaolo Bonzini         tb_invalidate_phys_range(addr, addr + length);
2436845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2437845b6214SPaolo Bonzini     }
243858d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
243949dfcec4SPaolo Bonzini }
244051d7a9ebSAnthony PERARD 
244123326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
244282f2563fSPaolo Bonzini {
2443e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
244423326164SRichard Henderson 
244523326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
244623326164SRichard Henderson        otherwise specified.  */
244723326164SRichard Henderson     if (access_size_max == 0) {
244823326164SRichard Henderson         access_size_max = 4;
244982f2563fSPaolo Bonzini     }
245023326164SRichard Henderson 
245123326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
245223326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
245323326164SRichard Henderson         unsigned align_size_max = addr & -addr;
245423326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
245523326164SRichard Henderson             access_size_max = align_size_max;
245623326164SRichard Henderson         }
245723326164SRichard Henderson     }
245823326164SRichard Henderson 
245923326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
246023326164SRichard Henderson     if (l > access_size_max) {
246123326164SRichard Henderson         l = access_size_max;
246223326164SRichard Henderson     }
24636554f5c0SPeter Maydell     l = pow2floor(l);
246423326164SRichard Henderson 
246523326164SRichard Henderson     return l;
246682f2563fSPaolo Bonzini }
246782f2563fSPaolo Bonzini 
24684840f10eSJan Kiszka static bool prepare_mmio_access(MemoryRegion *mr)
2469125b3806SPaolo Bonzini {
24704840f10eSJan Kiszka     bool unlocked = !qemu_mutex_iothread_locked();
24714840f10eSJan Kiszka     bool release_lock = false;
24724840f10eSJan Kiszka 
24734840f10eSJan Kiszka     if (unlocked && mr->global_locking) {
24744840f10eSJan Kiszka         qemu_mutex_lock_iothread();
24754840f10eSJan Kiszka         unlocked = false;
24764840f10eSJan Kiszka         release_lock = true;
2477125b3806SPaolo Bonzini     }
24784840f10eSJan Kiszka     if (mr->flush_coalesced_mmio) {
24794840f10eSJan Kiszka         if (unlocked) {
24804840f10eSJan Kiszka             qemu_mutex_lock_iothread();
24814840f10eSJan Kiszka         }
24824840f10eSJan Kiszka         qemu_flush_coalesced_mmio_buffer();
24834840f10eSJan Kiszka         if (unlocked) {
24844840f10eSJan Kiszka             qemu_mutex_unlock_iothread();
24854840f10eSJan Kiszka         }
24864840f10eSJan Kiszka     }
24874840f10eSJan Kiszka 
24884840f10eSJan Kiszka     return release_lock;
2489125b3806SPaolo Bonzini }
2490125b3806SPaolo Bonzini 
2491a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
2492a203ac70SPaolo Bonzini static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2493a203ac70SPaolo Bonzini                                                 MemTxAttrs attrs,
2494a203ac70SPaolo Bonzini                                                 const uint8_t *buf,
2495a203ac70SPaolo Bonzini                                                 int len, hwaddr addr1,
2496a203ac70SPaolo Bonzini                                                 hwaddr l, MemoryRegion *mr)
249713eb76e0Sbellard {
249813eb76e0Sbellard     uint8_t *ptr;
2499791af8c8SPaolo Bonzini     uint64_t val;
25003b643495SPeter Maydell     MemTxResult result = MEMTX_OK;
25014840f10eSJan Kiszka     bool release_lock = false;
250213eb76e0Sbellard 
2503a203ac70SPaolo Bonzini     for (;;) {
2504eb7eeb88SPaolo Bonzini         if (!memory_access_is_direct(mr, true)) {
25054840f10eSJan Kiszka             release_lock |= prepare_mmio_access(mr);
25065c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr1);
25074917cf44SAndreas Färber             /* XXX: could force current_cpu to NULL to avoid
25086a00d601Sbellard                potential bugs */
250923326164SRichard Henderson             switch (l) {
251023326164SRichard Henderson             case 8:
251123326164SRichard Henderson                 /* 64 bit write access */
251223326164SRichard Henderson                 val = ldq_p(buf);
25133b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 8,
25143b643495SPeter Maydell                                                        attrs);
251523326164SRichard Henderson                 break;
251623326164SRichard Henderson             case 4:
25171c213d19Sbellard                 /* 32 bit write access */
2518c27004ecSbellard                 val = ldl_p(buf);
25193b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 4,
25203b643495SPeter Maydell                                                        attrs);
252123326164SRichard Henderson                 break;
252223326164SRichard Henderson             case 2:
25231c213d19Sbellard                 /* 16 bit write access */
2524c27004ecSbellard                 val = lduw_p(buf);
25253b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 2,
25263b643495SPeter Maydell                                                        attrs);
252723326164SRichard Henderson                 break;
252823326164SRichard Henderson             case 1:
25291c213d19Sbellard                 /* 8 bit write access */
2530c27004ecSbellard                 val = ldub_p(buf);
25313b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 1,
25323b643495SPeter Maydell                                                        attrs);
253323326164SRichard Henderson                 break;
253423326164SRichard Henderson             default:
253523326164SRichard Henderson                 abort();
253613eb76e0Sbellard             }
25372bbfa05dSPaolo Bonzini         } else {
25385c8a00ceSPaolo Bonzini             addr1 += memory_region_get_ram_addr(mr);
253913eb76e0Sbellard             /* RAM case */
25405579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
254113eb76e0Sbellard             memcpy(ptr, buf, l);
2542845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, l);
25433a7d929eSbellard         }
2544eb7eeb88SPaolo Bonzini 
2545eb7eeb88SPaolo Bonzini         if (release_lock) {
2546eb7eeb88SPaolo Bonzini             qemu_mutex_unlock_iothread();
2547eb7eeb88SPaolo Bonzini             release_lock = false;
2548eb7eeb88SPaolo Bonzini         }
2549eb7eeb88SPaolo Bonzini 
2550eb7eeb88SPaolo Bonzini         len -= l;
2551eb7eeb88SPaolo Bonzini         buf += l;
2552eb7eeb88SPaolo Bonzini         addr += l;
2553a203ac70SPaolo Bonzini 
2554a203ac70SPaolo Bonzini         if (!len) {
2555a203ac70SPaolo Bonzini             break;
2556eb7eeb88SPaolo Bonzini         }
2557a203ac70SPaolo Bonzini 
2558a203ac70SPaolo Bonzini         l = len;
2559a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, true);
2560a203ac70SPaolo Bonzini     }
2561eb7eeb88SPaolo Bonzini 
2562eb7eeb88SPaolo Bonzini     return result;
2563eb7eeb88SPaolo Bonzini }
2564eb7eeb88SPaolo Bonzini 
2565a203ac70SPaolo Bonzini MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2566a203ac70SPaolo Bonzini                                 const uint8_t *buf, int len)
2567eb7eeb88SPaolo Bonzini {
2568eb7eeb88SPaolo Bonzini     hwaddr l;
2569eb7eeb88SPaolo Bonzini     hwaddr addr1;
2570eb7eeb88SPaolo Bonzini     MemoryRegion *mr;
2571eb7eeb88SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2572a203ac70SPaolo Bonzini 
2573a203ac70SPaolo Bonzini     if (len > 0) {
2574a203ac70SPaolo Bonzini         rcu_read_lock();
2575a203ac70SPaolo Bonzini         l = len;
2576a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, true);
2577a203ac70SPaolo Bonzini         result = address_space_write_continue(as, addr, attrs, buf, len,
2578a203ac70SPaolo Bonzini                                               addr1, l, mr);
2579a203ac70SPaolo Bonzini         rcu_read_unlock();
2580a203ac70SPaolo Bonzini     }
2581a203ac70SPaolo Bonzini 
2582a203ac70SPaolo Bonzini     return result;
2583a203ac70SPaolo Bonzini }
2584a203ac70SPaolo Bonzini 
2585a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
2586a203ac70SPaolo Bonzini MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2587a203ac70SPaolo Bonzini                                         MemTxAttrs attrs, uint8_t *buf,
2588a203ac70SPaolo Bonzini                                         int len, hwaddr addr1, hwaddr l,
2589a203ac70SPaolo Bonzini                                         MemoryRegion *mr)
2590a203ac70SPaolo Bonzini {
2591a203ac70SPaolo Bonzini     uint8_t *ptr;
2592a203ac70SPaolo Bonzini     uint64_t val;
2593a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2594eb7eeb88SPaolo Bonzini     bool release_lock = false;
2595eb7eeb88SPaolo Bonzini 
2596a203ac70SPaolo Bonzini     for (;;) {
2597eb7eeb88SPaolo Bonzini         if (!memory_access_is_direct(mr, false)) {
259813eb76e0Sbellard             /* I/O case */
25994840f10eSJan Kiszka             release_lock |= prepare_mmio_access(mr);
26005c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr1);
260123326164SRichard Henderson             switch (l) {
260223326164SRichard Henderson             case 8:
260323326164SRichard Henderson                 /* 64 bit read access */
26043b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
26053b643495SPeter Maydell                                                       attrs);
260623326164SRichard Henderson                 stq_p(buf, val);
260723326164SRichard Henderson                 break;
260823326164SRichard Henderson             case 4:
260913eb76e0Sbellard                 /* 32 bit read access */
26103b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
26113b643495SPeter Maydell                                                       attrs);
2612c27004ecSbellard                 stl_p(buf, val);
261323326164SRichard Henderson                 break;
261423326164SRichard Henderson             case 2:
261513eb76e0Sbellard                 /* 16 bit read access */
26163b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
26173b643495SPeter Maydell                                                       attrs);
2618c27004ecSbellard                 stw_p(buf, val);
261923326164SRichard Henderson                 break;
262023326164SRichard Henderson             case 1:
26211c213d19Sbellard                 /* 8 bit read access */
26223b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
26233b643495SPeter Maydell                                                       attrs);
2624c27004ecSbellard                 stb_p(buf, val);
262523326164SRichard Henderson                 break;
262623326164SRichard Henderson             default:
262723326164SRichard Henderson                 abort();
262813eb76e0Sbellard             }
262913eb76e0Sbellard         } else {
263013eb76e0Sbellard             /* RAM case */
26315c8a00ceSPaolo Bonzini             ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2632f3705d53SAvi Kivity             memcpy(buf, ptr, l);
263313eb76e0Sbellard         }
26344840f10eSJan Kiszka 
26354840f10eSJan Kiszka         if (release_lock) {
26364840f10eSJan Kiszka             qemu_mutex_unlock_iothread();
26374840f10eSJan Kiszka             release_lock = false;
26384840f10eSJan Kiszka         }
26394840f10eSJan Kiszka 
264013eb76e0Sbellard         len -= l;
264113eb76e0Sbellard         buf += l;
264213eb76e0Sbellard         addr += l;
2643a203ac70SPaolo Bonzini 
2644a203ac70SPaolo Bonzini         if (!len) {
2645a203ac70SPaolo Bonzini             break;
264613eb76e0Sbellard         }
2647a203ac70SPaolo Bonzini 
2648a203ac70SPaolo Bonzini         l = len;
2649a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, false);
2650a203ac70SPaolo Bonzini     }
2651a203ac70SPaolo Bonzini 
2652a203ac70SPaolo Bonzini     return result;
2653a203ac70SPaolo Bonzini }
2654a203ac70SPaolo Bonzini 
26553cc8f884SPaolo Bonzini MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
26563cc8f884SPaolo Bonzini                                     MemTxAttrs attrs, uint8_t *buf, int len)
2657a203ac70SPaolo Bonzini {
2658a203ac70SPaolo Bonzini     hwaddr l;
2659a203ac70SPaolo Bonzini     hwaddr addr1;
2660a203ac70SPaolo Bonzini     MemoryRegion *mr;
2661a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2662a203ac70SPaolo Bonzini 
2663a203ac70SPaolo Bonzini     if (len > 0) {
2664a203ac70SPaolo Bonzini         rcu_read_lock();
2665a203ac70SPaolo Bonzini         l = len;
2666a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, false);
2667a203ac70SPaolo Bonzini         result = address_space_read_continue(as, addr, attrs, buf, len,
2668a203ac70SPaolo Bonzini                                              addr1, l, mr);
266941063e1eSPaolo Bonzini         rcu_read_unlock();
2670a203ac70SPaolo Bonzini     }
2671fd8aaa76SPaolo Bonzini 
26723b643495SPeter Maydell     return result;
267313eb76e0Sbellard }
26748df1cd07Sbellard 
2675eb7eeb88SPaolo Bonzini MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2676eb7eeb88SPaolo Bonzini                              uint8_t *buf, int len, bool is_write)
2677ac1970fbSAvi Kivity {
2678eb7eeb88SPaolo Bonzini     if (is_write) {
2679eb7eeb88SPaolo Bonzini         return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2680eb7eeb88SPaolo Bonzini     } else {
2681eb7eeb88SPaolo Bonzini         return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2682ac1970fbSAvi Kivity     }
2683ac1970fbSAvi Kivity }
2684ac1970fbSAvi Kivity 
2685a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2686ac1970fbSAvi Kivity                             int len, int is_write)
2687ac1970fbSAvi Kivity {
26885c9eb028SPeter Maydell     address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
26895c9eb028SPeter Maydell                      buf, len, is_write);
2690ac1970fbSAvi Kivity }
2691ac1970fbSAvi Kivity 
2692582b55a9SAlexander Graf enum write_rom_type {
2693582b55a9SAlexander Graf     WRITE_DATA,
2694582b55a9SAlexander Graf     FLUSH_CACHE,
2695582b55a9SAlexander Graf };
2696582b55a9SAlexander Graf 
26972a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2698582b55a9SAlexander Graf     hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2699d0ecd2aaSbellard {
2700149f54b5SPaolo Bonzini     hwaddr l;
2701d0ecd2aaSbellard     uint8_t *ptr;
2702149f54b5SPaolo Bonzini     hwaddr addr1;
27035c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2704d0ecd2aaSbellard 
270541063e1eSPaolo Bonzini     rcu_read_lock();
2706d0ecd2aaSbellard     while (len > 0) {
2707d0ecd2aaSbellard         l = len;
27082a221651SEdgar E. Iglesias         mr = address_space_translate(as, addr, &addr1, &l, true);
2709d0ecd2aaSbellard 
27105c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
27115c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2712b242e0e0SPaolo Bonzini             l = memory_access_size(mr, l, addr1);
2713d0ecd2aaSbellard         } else {
27145c8a00ceSPaolo Bonzini             addr1 += memory_region_get_ram_addr(mr);
2715d0ecd2aaSbellard             /* ROM/RAM case */
27165579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
2717582b55a9SAlexander Graf             switch (type) {
2718582b55a9SAlexander Graf             case WRITE_DATA:
2719d0ecd2aaSbellard                 memcpy(ptr, buf, l);
2720845b6214SPaolo Bonzini                 invalidate_and_set_dirty(mr, addr1, l);
2721582b55a9SAlexander Graf                 break;
2722582b55a9SAlexander Graf             case FLUSH_CACHE:
2723582b55a9SAlexander Graf                 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2724582b55a9SAlexander Graf                 break;
2725582b55a9SAlexander Graf             }
2726d0ecd2aaSbellard         }
2727d0ecd2aaSbellard         len -= l;
2728d0ecd2aaSbellard         buf += l;
2729d0ecd2aaSbellard         addr += l;
2730d0ecd2aaSbellard     }
273141063e1eSPaolo Bonzini     rcu_read_unlock();
2732d0ecd2aaSbellard }
2733d0ecd2aaSbellard 
2734582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
27352a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2736582b55a9SAlexander Graf                                    const uint8_t *buf, int len)
2737582b55a9SAlexander Graf {
27382a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2739582b55a9SAlexander Graf }
2740582b55a9SAlexander Graf 
2741582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len)
2742582b55a9SAlexander Graf {
2743582b55a9SAlexander Graf     /*
2744582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
2745582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
2746582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
2747582b55a9SAlexander Graf      * the host's instruction cache at least.
2748582b55a9SAlexander Graf      */
2749582b55a9SAlexander Graf     if (tcg_enabled()) {
2750582b55a9SAlexander Graf         return;
2751582b55a9SAlexander Graf     }
2752582b55a9SAlexander Graf 
27532a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(&address_space_memory,
27542a221651SEdgar E. Iglesias                                            start, NULL, len, FLUSH_CACHE);
2755582b55a9SAlexander Graf }
2756582b55a9SAlexander Graf 
27576d16c2f8Saliguori typedef struct {
2758d3e71559SPaolo Bonzini     MemoryRegion *mr;
27596d16c2f8Saliguori     void *buffer;
2760a8170e5eSAvi Kivity     hwaddr addr;
2761a8170e5eSAvi Kivity     hwaddr len;
2762c2cba0ffSFam Zheng     bool in_use;
27636d16c2f8Saliguori } BounceBuffer;
27646d16c2f8Saliguori 
27656d16c2f8Saliguori static BounceBuffer bounce;
27666d16c2f8Saliguori 
2767ba223c29Saliguori typedef struct MapClient {
2768e95205e1SFam Zheng     QEMUBH *bh;
276972cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2770ba223c29Saliguori } MapClient;
2771ba223c29Saliguori 
277238e047b5SFam Zheng QemuMutex map_client_list_lock;
277372cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
277472cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2775ba223c29Saliguori 
2776e95205e1SFam Zheng static void cpu_unregister_map_client_do(MapClient *client)
2777ba223c29Saliguori {
277872cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
27797267c094SAnthony Liguori     g_free(client);
2780ba223c29Saliguori }
2781ba223c29Saliguori 
278233b6c2edSFam Zheng static void cpu_notify_map_clients_locked(void)
2783ba223c29Saliguori {
2784ba223c29Saliguori     MapClient *client;
2785ba223c29Saliguori 
278672cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
278772cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2788e95205e1SFam Zheng         qemu_bh_schedule(client->bh);
2789e95205e1SFam Zheng         cpu_unregister_map_client_do(client);
2790ba223c29Saliguori     }
2791ba223c29Saliguori }
2792ba223c29Saliguori 
2793e95205e1SFam Zheng void cpu_register_map_client(QEMUBH *bh)
2794d0ecd2aaSbellard {
2795d0ecd2aaSbellard     MapClient *client = g_malloc(sizeof(*client));
2796d0ecd2aaSbellard 
279738e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2798e95205e1SFam Zheng     client->bh = bh;
2799d0ecd2aaSbellard     QLIST_INSERT_HEAD(&map_client_list, client, link);
280033b6c2edSFam Zheng     if (!atomic_read(&bounce.in_use)) {
280133b6c2edSFam Zheng         cpu_notify_map_clients_locked();
280233b6c2edSFam Zheng     }
280338e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2804d0ecd2aaSbellard }
2805d0ecd2aaSbellard 
280638e047b5SFam Zheng void cpu_exec_init_all(void)
280738e047b5SFam Zheng {
280838e047b5SFam Zheng     qemu_mutex_init(&ram_list.mutex);
280938e047b5SFam Zheng     io_mem_init();
2810680a4783SPaolo Bonzini     memory_map_init();
281138e047b5SFam Zheng     qemu_mutex_init(&map_client_list_lock);
281238e047b5SFam Zheng }
281338e047b5SFam Zheng 
2814e95205e1SFam Zheng void cpu_unregister_map_client(QEMUBH *bh)
2815d0ecd2aaSbellard {
2816e95205e1SFam Zheng     MapClient *client;
2817d0ecd2aaSbellard 
2818e95205e1SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2819e95205e1SFam Zheng     QLIST_FOREACH(client, &map_client_list, link) {
2820e95205e1SFam Zheng         if (client->bh == bh) {
2821e95205e1SFam Zheng             cpu_unregister_map_client_do(client);
2822e95205e1SFam Zheng             break;
2823e95205e1SFam Zheng         }
2824e95205e1SFam Zheng     }
2825e95205e1SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2826d0ecd2aaSbellard }
2827d0ecd2aaSbellard 
2828d0ecd2aaSbellard static void cpu_notify_map_clients(void)
2829d0ecd2aaSbellard {
283038e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
283133b6c2edSFam Zheng     cpu_notify_map_clients_locked();
283238e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
28336d16c2f8Saliguori }
28346d16c2f8Saliguori 
283551644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
283651644ab7SPaolo Bonzini {
28375c8a00ceSPaolo Bonzini     MemoryRegion *mr;
283851644ab7SPaolo Bonzini     hwaddr l, xlat;
283951644ab7SPaolo Bonzini 
284041063e1eSPaolo Bonzini     rcu_read_lock();
284151644ab7SPaolo Bonzini     while (len > 0) {
284251644ab7SPaolo Bonzini         l = len;
28435c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
28445c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
28455c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
28465c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
284751644ab7SPaolo Bonzini                 return false;
284851644ab7SPaolo Bonzini             }
284951644ab7SPaolo Bonzini         }
285051644ab7SPaolo Bonzini 
285151644ab7SPaolo Bonzini         len -= l;
285251644ab7SPaolo Bonzini         addr += l;
285351644ab7SPaolo Bonzini     }
285441063e1eSPaolo Bonzini     rcu_read_unlock();
285551644ab7SPaolo Bonzini     return true;
285651644ab7SPaolo Bonzini }
285751644ab7SPaolo Bonzini 
28586d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
28596d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
28606d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
28616d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2862ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2863ba223c29Saliguori  * likely to succeed.
28646d16c2f8Saliguori  */
2865ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2866a8170e5eSAvi Kivity                         hwaddr addr,
2867a8170e5eSAvi Kivity                         hwaddr *plen,
2868ac1970fbSAvi Kivity                         bool is_write)
28696d16c2f8Saliguori {
2870a8170e5eSAvi Kivity     hwaddr len = *plen;
2871e3127ae0SPaolo Bonzini     hwaddr done = 0;
2872e3127ae0SPaolo Bonzini     hwaddr l, xlat, base;
2873e3127ae0SPaolo Bonzini     MemoryRegion *mr, *this_mr;
2874e3127ae0SPaolo Bonzini     ram_addr_t raddr;
2875e81bcda5SPaolo Bonzini     void *ptr;
28766d16c2f8Saliguori 
2877e3127ae0SPaolo Bonzini     if (len == 0) {
2878e3127ae0SPaolo Bonzini         return NULL;
2879e3127ae0SPaolo Bonzini     }
2880e3127ae0SPaolo Bonzini 
28816d16c2f8Saliguori     l = len;
288241063e1eSPaolo Bonzini     rcu_read_lock();
28835c8a00ceSPaolo Bonzini     mr = address_space_translate(as, addr, &xlat, &l, is_write);
288441063e1eSPaolo Bonzini 
28855c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
2886c2cba0ffSFam Zheng         if (atomic_xchg(&bounce.in_use, true)) {
288741063e1eSPaolo Bonzini             rcu_read_unlock();
2888e3127ae0SPaolo Bonzini             return NULL;
28896d16c2f8Saliguori         }
2890e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
2891e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
2892e85d9db5SKevin Wolf         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
28936d16c2f8Saliguori         bounce.addr = addr;
28946d16c2f8Saliguori         bounce.len = l;
2895d3e71559SPaolo Bonzini 
2896d3e71559SPaolo Bonzini         memory_region_ref(mr);
2897d3e71559SPaolo Bonzini         bounce.mr = mr;
28986d16c2f8Saliguori         if (!is_write) {
28995c9eb028SPeter Maydell             address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
29005c9eb028SPeter Maydell                                bounce.buffer, l);
29016d16c2f8Saliguori         }
290238bee5dcSStefano Stabellini 
290341063e1eSPaolo Bonzini         rcu_read_unlock();
290438bee5dcSStefano Stabellini         *plen = l;
290538bee5dcSStefano Stabellini         return bounce.buffer;
29066d16c2f8Saliguori     }
2907e3127ae0SPaolo Bonzini 
2908e3127ae0SPaolo Bonzini     base = xlat;
2909e3127ae0SPaolo Bonzini     raddr = memory_region_get_ram_addr(mr);
2910e3127ae0SPaolo Bonzini 
2911e3127ae0SPaolo Bonzini     for (;;) {
2912e3127ae0SPaolo Bonzini         len -= l;
2913e3127ae0SPaolo Bonzini         addr += l;
2914e3127ae0SPaolo Bonzini         done += l;
2915e3127ae0SPaolo Bonzini         if (len == 0) {
2916e3127ae0SPaolo Bonzini             break;
2917e3127ae0SPaolo Bonzini         }
2918e3127ae0SPaolo Bonzini 
2919e3127ae0SPaolo Bonzini         l = len;
2920e3127ae0SPaolo Bonzini         this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2921e3127ae0SPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
2922149f54b5SPaolo Bonzini             break;
2923149f54b5SPaolo Bonzini         }
29248ab934f9SStefano Stabellini     }
29256d16c2f8Saliguori 
2926d3e71559SPaolo Bonzini     memory_region_ref(mr);
2927e3127ae0SPaolo Bonzini     *plen = done;
2928e81bcda5SPaolo Bonzini     ptr = qemu_ram_ptr_length(raddr + base, plen);
2929e81bcda5SPaolo Bonzini     rcu_read_unlock();
2930e81bcda5SPaolo Bonzini 
2931e81bcda5SPaolo Bonzini     return ptr;
29326d16c2f8Saliguori }
29336d16c2f8Saliguori 
2934ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
29356d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
29366d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
29376d16c2f8Saliguori  */
2938a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2939a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
29406d16c2f8Saliguori {
29416d16c2f8Saliguori     if (buffer != bounce.buffer) {
2942d3e71559SPaolo Bonzini         MemoryRegion *mr;
29437443b437SPaolo Bonzini         ram_addr_t addr1;
2944d3e71559SPaolo Bonzini 
2945d3e71559SPaolo Bonzini         mr = qemu_ram_addr_from_host(buffer, &addr1);
29461b5ec234SPaolo Bonzini         assert(mr != NULL);
2947d3e71559SPaolo Bonzini         if (is_write) {
2948845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, access_len);
29496d16c2f8Saliguori         }
2950868bb33fSJan Kiszka         if (xen_enabled()) {
2951e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
2952050a0ddfSAnthony PERARD         }
2953d3e71559SPaolo Bonzini         memory_region_unref(mr);
29546d16c2f8Saliguori         return;
29556d16c2f8Saliguori     }
29566d16c2f8Saliguori     if (is_write) {
29575c9eb028SPeter Maydell         address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
29585c9eb028SPeter Maydell                             bounce.buffer, access_len);
29596d16c2f8Saliguori     }
2960f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
29616d16c2f8Saliguori     bounce.buffer = NULL;
2962d3e71559SPaolo Bonzini     memory_region_unref(bounce.mr);
2963c2cba0ffSFam Zheng     atomic_mb_set(&bounce.in_use, false);
2964ba223c29Saliguori     cpu_notify_map_clients();
29656d16c2f8Saliguori }
2966d0ecd2aaSbellard 
2967a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
2968a8170e5eSAvi Kivity                               hwaddr *plen,
2969ac1970fbSAvi Kivity                               int is_write)
2970ac1970fbSAvi Kivity {
2971ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
2972ac1970fbSAvi Kivity }
2973ac1970fbSAvi Kivity 
2974a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2975a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
2976ac1970fbSAvi Kivity {
2977ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2978ac1970fbSAvi Kivity }
2979ac1970fbSAvi Kivity 
29808df1cd07Sbellard /* warning: addr must be aligned */
298150013115SPeter Maydell static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
298250013115SPeter Maydell                                                   MemTxAttrs attrs,
298350013115SPeter Maydell                                                   MemTxResult *result,
29841e78bcc1SAlexander Graf                                                   enum device_endian endian)
29858df1cd07Sbellard {
29868df1cd07Sbellard     uint8_t *ptr;
2987791af8c8SPaolo Bonzini     uint64_t val;
29885c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2989149f54b5SPaolo Bonzini     hwaddr l = 4;
2990149f54b5SPaolo Bonzini     hwaddr addr1;
299150013115SPeter Maydell     MemTxResult r;
29924840f10eSJan Kiszka     bool release_lock = false;
29938df1cd07Sbellard 
299441063e1eSPaolo Bonzini     rcu_read_lock();
2995fdfba1a2SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, false);
29965c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
29974840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
2998125b3806SPaolo Bonzini 
29998df1cd07Sbellard         /* I/O case */
300050013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
30011e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
30021e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
30031e78bcc1SAlexander Graf             val = bswap32(val);
30041e78bcc1SAlexander Graf         }
30051e78bcc1SAlexander Graf #else
30061e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
30071e78bcc1SAlexander Graf             val = bswap32(val);
30081e78bcc1SAlexander Graf         }
30091e78bcc1SAlexander Graf #endif
30108df1cd07Sbellard     } else {
30118df1cd07Sbellard         /* RAM case */
30125c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
301306ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3014149f54b5SPaolo Bonzini                                + addr1);
30151e78bcc1SAlexander Graf         switch (endian) {
30161e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
30171e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
30181e78bcc1SAlexander Graf             break;
30191e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
30201e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
30211e78bcc1SAlexander Graf             break;
30221e78bcc1SAlexander Graf         default:
30238df1cd07Sbellard             val = ldl_p(ptr);
30241e78bcc1SAlexander Graf             break;
30251e78bcc1SAlexander Graf         }
302650013115SPeter Maydell         r = MEMTX_OK;
302750013115SPeter Maydell     }
302850013115SPeter Maydell     if (result) {
302950013115SPeter Maydell         *result = r;
30308df1cd07Sbellard     }
30314840f10eSJan Kiszka     if (release_lock) {
30324840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
30334840f10eSJan Kiszka     }
303441063e1eSPaolo Bonzini     rcu_read_unlock();
30358df1cd07Sbellard     return val;
30368df1cd07Sbellard }
30378df1cd07Sbellard 
303850013115SPeter Maydell uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
303950013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
304050013115SPeter Maydell {
304150013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
304250013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
304350013115SPeter Maydell }
304450013115SPeter Maydell 
304550013115SPeter Maydell uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
304650013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
304750013115SPeter Maydell {
304850013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
304950013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
305050013115SPeter Maydell }
305150013115SPeter Maydell 
305250013115SPeter Maydell uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
305350013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
305450013115SPeter Maydell {
305550013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
305650013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
305750013115SPeter Maydell }
305850013115SPeter Maydell 
3059fdfba1a2SEdgar E. Iglesias uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
30601e78bcc1SAlexander Graf {
306150013115SPeter Maydell     return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30621e78bcc1SAlexander Graf }
30631e78bcc1SAlexander Graf 
3064fdfba1a2SEdgar E. Iglesias uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
30651e78bcc1SAlexander Graf {
306650013115SPeter Maydell     return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30671e78bcc1SAlexander Graf }
30681e78bcc1SAlexander Graf 
3069fdfba1a2SEdgar E. Iglesias uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
30701e78bcc1SAlexander Graf {
307150013115SPeter Maydell     return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30721e78bcc1SAlexander Graf }
30731e78bcc1SAlexander Graf 
307484b7b8e7Sbellard /* warning: addr must be aligned */
307550013115SPeter Maydell static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
307650013115SPeter Maydell                                                   MemTxAttrs attrs,
307750013115SPeter Maydell                                                   MemTxResult *result,
30781e78bcc1SAlexander Graf                                                   enum device_endian endian)
307984b7b8e7Sbellard {
308084b7b8e7Sbellard     uint8_t *ptr;
308184b7b8e7Sbellard     uint64_t val;
30825c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3083149f54b5SPaolo Bonzini     hwaddr l = 8;
3084149f54b5SPaolo Bonzini     hwaddr addr1;
308550013115SPeter Maydell     MemTxResult r;
30864840f10eSJan Kiszka     bool release_lock = false;
308784b7b8e7Sbellard 
308841063e1eSPaolo Bonzini     rcu_read_lock();
30892c17449bSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3090149f54b5SPaolo Bonzini                                  false);
30915c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
30924840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3093125b3806SPaolo Bonzini 
309484b7b8e7Sbellard         /* I/O case */
309550013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3096968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
3097968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
3098968a5627SPaolo Bonzini             val = bswap64(val);
3099968a5627SPaolo Bonzini         }
3100968a5627SPaolo Bonzini #else
3101968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
3102968a5627SPaolo Bonzini             val = bswap64(val);
3103968a5627SPaolo Bonzini         }
3104968a5627SPaolo Bonzini #endif
310584b7b8e7Sbellard     } else {
310684b7b8e7Sbellard         /* RAM case */
31075c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
310806ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3109149f54b5SPaolo Bonzini                                + addr1);
31101e78bcc1SAlexander Graf         switch (endian) {
31111e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
31121e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
31131e78bcc1SAlexander Graf             break;
31141e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
31151e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
31161e78bcc1SAlexander Graf             break;
31171e78bcc1SAlexander Graf         default:
311884b7b8e7Sbellard             val = ldq_p(ptr);
31191e78bcc1SAlexander Graf             break;
31201e78bcc1SAlexander Graf         }
312150013115SPeter Maydell         r = MEMTX_OK;
312250013115SPeter Maydell     }
312350013115SPeter Maydell     if (result) {
312450013115SPeter Maydell         *result = r;
312584b7b8e7Sbellard     }
31264840f10eSJan Kiszka     if (release_lock) {
31274840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
31284840f10eSJan Kiszka     }
312941063e1eSPaolo Bonzini     rcu_read_unlock();
313084b7b8e7Sbellard     return val;
313184b7b8e7Sbellard }
313284b7b8e7Sbellard 
313350013115SPeter Maydell uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
313450013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
313550013115SPeter Maydell {
313650013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
313750013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
313850013115SPeter Maydell }
313950013115SPeter Maydell 
314050013115SPeter Maydell uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
314150013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
314250013115SPeter Maydell {
314350013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
314450013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
314550013115SPeter Maydell }
314650013115SPeter Maydell 
314750013115SPeter Maydell uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
314850013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
314950013115SPeter Maydell {
315050013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
315150013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
315250013115SPeter Maydell }
315350013115SPeter Maydell 
31542c17449bSEdgar E. Iglesias uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
31551e78bcc1SAlexander Graf {
315650013115SPeter Maydell     return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31571e78bcc1SAlexander Graf }
31581e78bcc1SAlexander Graf 
31592c17449bSEdgar E. Iglesias uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
31601e78bcc1SAlexander Graf {
316150013115SPeter Maydell     return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31621e78bcc1SAlexander Graf }
31631e78bcc1SAlexander Graf 
31642c17449bSEdgar E. Iglesias uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
31651e78bcc1SAlexander Graf {
316650013115SPeter Maydell     return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31671e78bcc1SAlexander Graf }
31681e78bcc1SAlexander Graf 
3169aab33094Sbellard /* XXX: optimize */
317050013115SPeter Maydell uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
317150013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result)
3172aab33094Sbellard {
3173aab33094Sbellard     uint8_t val;
317450013115SPeter Maydell     MemTxResult r;
317550013115SPeter Maydell 
317650013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &val, 1, 0);
317750013115SPeter Maydell     if (result) {
317850013115SPeter Maydell         *result = r;
317950013115SPeter Maydell     }
3180aab33094Sbellard     return val;
3181aab33094Sbellard }
3182aab33094Sbellard 
318350013115SPeter Maydell uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
318450013115SPeter Maydell {
318550013115SPeter Maydell     return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
318650013115SPeter Maydell }
318750013115SPeter Maydell 
3188733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
318950013115SPeter Maydell static inline uint32_t address_space_lduw_internal(AddressSpace *as,
319050013115SPeter Maydell                                                    hwaddr addr,
319150013115SPeter Maydell                                                    MemTxAttrs attrs,
319250013115SPeter Maydell                                                    MemTxResult *result,
31931e78bcc1SAlexander Graf                                                    enum device_endian endian)
3194aab33094Sbellard {
3195733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3196733f0b02SMichael S. Tsirkin     uint64_t val;
31975c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3198149f54b5SPaolo Bonzini     hwaddr l = 2;
3199149f54b5SPaolo Bonzini     hwaddr addr1;
320050013115SPeter Maydell     MemTxResult r;
32014840f10eSJan Kiszka     bool release_lock = false;
3202733f0b02SMichael S. Tsirkin 
320341063e1eSPaolo Bonzini     rcu_read_lock();
320441701aa4SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3205149f54b5SPaolo Bonzini                                  false);
32065c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
32074840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3208125b3806SPaolo Bonzini 
3209733f0b02SMichael S. Tsirkin         /* I/O case */
321050013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
32111e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
32121e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
32131e78bcc1SAlexander Graf             val = bswap16(val);
32141e78bcc1SAlexander Graf         }
32151e78bcc1SAlexander Graf #else
32161e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
32171e78bcc1SAlexander Graf             val = bswap16(val);
32181e78bcc1SAlexander Graf         }
32191e78bcc1SAlexander Graf #endif
3220733f0b02SMichael S. Tsirkin     } else {
3221733f0b02SMichael S. Tsirkin         /* RAM case */
32225c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
322306ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3224149f54b5SPaolo Bonzini                                + addr1);
32251e78bcc1SAlexander Graf         switch (endian) {
32261e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
32271e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
32281e78bcc1SAlexander Graf             break;
32291e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
32301e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
32311e78bcc1SAlexander Graf             break;
32321e78bcc1SAlexander Graf         default:
3233733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
32341e78bcc1SAlexander Graf             break;
32351e78bcc1SAlexander Graf         }
323650013115SPeter Maydell         r = MEMTX_OK;
323750013115SPeter Maydell     }
323850013115SPeter Maydell     if (result) {
323950013115SPeter Maydell         *result = r;
3240733f0b02SMichael S. Tsirkin     }
32414840f10eSJan Kiszka     if (release_lock) {
32424840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
32434840f10eSJan Kiszka     }
324441063e1eSPaolo Bonzini     rcu_read_unlock();
3245733f0b02SMichael S. Tsirkin     return val;
3246aab33094Sbellard }
3247aab33094Sbellard 
324850013115SPeter Maydell uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
324950013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
325050013115SPeter Maydell {
325150013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
325250013115SPeter Maydell                                        DEVICE_NATIVE_ENDIAN);
325350013115SPeter Maydell }
325450013115SPeter Maydell 
325550013115SPeter Maydell uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
325650013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
325750013115SPeter Maydell {
325850013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
325950013115SPeter Maydell                                        DEVICE_LITTLE_ENDIAN);
326050013115SPeter Maydell }
326150013115SPeter Maydell 
326250013115SPeter Maydell uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
326350013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
326450013115SPeter Maydell {
326550013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
326650013115SPeter Maydell                                        DEVICE_BIG_ENDIAN);
326750013115SPeter Maydell }
326850013115SPeter Maydell 
326941701aa4SEdgar E. Iglesias uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
32701e78bcc1SAlexander Graf {
327150013115SPeter Maydell     return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32721e78bcc1SAlexander Graf }
32731e78bcc1SAlexander Graf 
327441701aa4SEdgar E. Iglesias uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
32751e78bcc1SAlexander Graf {
327650013115SPeter Maydell     return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32771e78bcc1SAlexander Graf }
32781e78bcc1SAlexander Graf 
327941701aa4SEdgar E. Iglesias uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
32801e78bcc1SAlexander Graf {
328150013115SPeter Maydell     return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32821e78bcc1SAlexander Graf }
32831e78bcc1SAlexander Graf 
32848df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
32858df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
32868df1cd07Sbellard    bits are used to track modified PTEs */
328750013115SPeter Maydell void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
328850013115SPeter Maydell                                 MemTxAttrs attrs, MemTxResult *result)
32898df1cd07Sbellard {
32908df1cd07Sbellard     uint8_t *ptr;
32915c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3292149f54b5SPaolo Bonzini     hwaddr l = 4;
3293149f54b5SPaolo Bonzini     hwaddr addr1;
329450013115SPeter Maydell     MemTxResult r;
3295845b6214SPaolo Bonzini     uint8_t dirty_log_mask;
32964840f10eSJan Kiszka     bool release_lock = false;
32978df1cd07Sbellard 
329841063e1eSPaolo Bonzini     rcu_read_lock();
32992198a121SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3300149f54b5SPaolo Bonzini                                  true);
33015c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
33024840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3303125b3806SPaolo Bonzini 
330450013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
33058df1cd07Sbellard     } else {
33065c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
33075579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
33088df1cd07Sbellard         stl_p(ptr, val);
330974576198Saliguori 
3310845b6214SPaolo Bonzini         dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3311845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
331258d2707eSPaolo Bonzini         cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
331350013115SPeter Maydell         r = MEMTX_OK;
331450013115SPeter Maydell     }
331550013115SPeter Maydell     if (result) {
331650013115SPeter Maydell         *result = r;
33178df1cd07Sbellard     }
33184840f10eSJan Kiszka     if (release_lock) {
33194840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
33204840f10eSJan Kiszka     }
332141063e1eSPaolo Bonzini     rcu_read_unlock();
33228df1cd07Sbellard }
33238df1cd07Sbellard 
332450013115SPeter Maydell void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
332550013115SPeter Maydell {
332650013115SPeter Maydell     address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
332750013115SPeter Maydell }
332850013115SPeter Maydell 
33298df1cd07Sbellard /* warning: addr must be aligned */
333050013115SPeter Maydell static inline void address_space_stl_internal(AddressSpace *as,
3331ab1da857SEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
333250013115SPeter Maydell                                               MemTxAttrs attrs,
333350013115SPeter Maydell                                               MemTxResult *result,
33341e78bcc1SAlexander Graf                                               enum device_endian endian)
33358df1cd07Sbellard {
33368df1cd07Sbellard     uint8_t *ptr;
33375c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3338149f54b5SPaolo Bonzini     hwaddr l = 4;
3339149f54b5SPaolo Bonzini     hwaddr addr1;
334050013115SPeter Maydell     MemTxResult r;
33414840f10eSJan Kiszka     bool release_lock = false;
33428df1cd07Sbellard 
334341063e1eSPaolo Bonzini     rcu_read_lock();
3344ab1da857SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3345149f54b5SPaolo Bonzini                                  true);
33465c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
33474840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3348125b3806SPaolo Bonzini 
33491e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
33501e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
33511e78bcc1SAlexander Graf             val = bswap32(val);
33521e78bcc1SAlexander Graf         }
33531e78bcc1SAlexander Graf #else
33541e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
33551e78bcc1SAlexander Graf             val = bswap32(val);
33561e78bcc1SAlexander Graf         }
33571e78bcc1SAlexander Graf #endif
335850013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
33598df1cd07Sbellard     } else {
33608df1cd07Sbellard         /* RAM case */
33615c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
33625579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
33631e78bcc1SAlexander Graf         switch (endian) {
33641e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
33651e78bcc1SAlexander Graf             stl_le_p(ptr, val);
33661e78bcc1SAlexander Graf             break;
33671e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
33681e78bcc1SAlexander Graf             stl_be_p(ptr, val);
33691e78bcc1SAlexander Graf             break;
33701e78bcc1SAlexander Graf         default:
33718df1cd07Sbellard             stl_p(ptr, val);
33721e78bcc1SAlexander Graf             break;
33731e78bcc1SAlexander Graf         }
3374845b6214SPaolo Bonzini         invalidate_and_set_dirty(mr, addr1, 4);
337550013115SPeter Maydell         r = MEMTX_OK;
33768df1cd07Sbellard     }
337750013115SPeter Maydell     if (result) {
337850013115SPeter Maydell         *result = r;
337950013115SPeter Maydell     }
33804840f10eSJan Kiszka     if (release_lock) {
33814840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
33824840f10eSJan Kiszka     }
338341063e1eSPaolo Bonzini     rcu_read_unlock();
338450013115SPeter Maydell }
338550013115SPeter Maydell 
338650013115SPeter Maydell void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
338750013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
338850013115SPeter Maydell {
338950013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
339050013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
339150013115SPeter Maydell }
339250013115SPeter Maydell 
339350013115SPeter Maydell void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
339450013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
339550013115SPeter Maydell {
339650013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
339750013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
339850013115SPeter Maydell }
339950013115SPeter Maydell 
340050013115SPeter Maydell void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
340150013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
340250013115SPeter Maydell {
340350013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
340450013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
34053a7d929eSbellard }
34068df1cd07Sbellard 
3407ab1da857SEdgar E. Iglesias void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34081e78bcc1SAlexander Graf {
340950013115SPeter Maydell     address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34101e78bcc1SAlexander Graf }
34111e78bcc1SAlexander Graf 
3412ab1da857SEdgar E. Iglesias void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34131e78bcc1SAlexander Graf {
341450013115SPeter Maydell     address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34151e78bcc1SAlexander Graf }
34161e78bcc1SAlexander Graf 
3417ab1da857SEdgar E. Iglesias void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34181e78bcc1SAlexander Graf {
341950013115SPeter Maydell     address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34201e78bcc1SAlexander Graf }
34211e78bcc1SAlexander Graf 
3422aab33094Sbellard /* XXX: optimize */
342350013115SPeter Maydell void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
342450013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
3425aab33094Sbellard {
3426aab33094Sbellard     uint8_t v = val;
342750013115SPeter Maydell     MemTxResult r;
342850013115SPeter Maydell 
342950013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &v, 1, 1);
343050013115SPeter Maydell     if (result) {
343150013115SPeter Maydell         *result = r;
343250013115SPeter Maydell     }
343350013115SPeter Maydell }
343450013115SPeter Maydell 
343550013115SPeter Maydell void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
343650013115SPeter Maydell {
343750013115SPeter Maydell     address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3438aab33094Sbellard }
3439aab33094Sbellard 
3440733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
344150013115SPeter Maydell static inline void address_space_stw_internal(AddressSpace *as,
34425ce5944dSEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
344350013115SPeter Maydell                                               MemTxAttrs attrs,
344450013115SPeter Maydell                                               MemTxResult *result,
34451e78bcc1SAlexander Graf                                               enum device_endian endian)
3446aab33094Sbellard {
3447733f0b02SMichael S. Tsirkin     uint8_t *ptr;
34485c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3449149f54b5SPaolo Bonzini     hwaddr l = 2;
3450149f54b5SPaolo Bonzini     hwaddr addr1;
345150013115SPeter Maydell     MemTxResult r;
34524840f10eSJan Kiszka     bool release_lock = false;
3453733f0b02SMichael S. Tsirkin 
345441063e1eSPaolo Bonzini     rcu_read_lock();
34555ce5944dSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, true);
34565c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
34574840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3458125b3806SPaolo Bonzini 
34591e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
34601e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
34611e78bcc1SAlexander Graf             val = bswap16(val);
34621e78bcc1SAlexander Graf         }
34631e78bcc1SAlexander Graf #else
34641e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
34651e78bcc1SAlexander Graf             val = bswap16(val);
34661e78bcc1SAlexander Graf         }
34671e78bcc1SAlexander Graf #endif
346850013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3469733f0b02SMichael S. Tsirkin     } else {
3470733f0b02SMichael S. Tsirkin         /* RAM case */
34715c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3472733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
34731e78bcc1SAlexander Graf         switch (endian) {
34741e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
34751e78bcc1SAlexander Graf             stw_le_p(ptr, val);
34761e78bcc1SAlexander Graf             break;
34771e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
34781e78bcc1SAlexander Graf             stw_be_p(ptr, val);
34791e78bcc1SAlexander Graf             break;
34801e78bcc1SAlexander Graf         default:
3481733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
34821e78bcc1SAlexander Graf             break;
34831e78bcc1SAlexander Graf         }
3484845b6214SPaolo Bonzini         invalidate_and_set_dirty(mr, addr1, 2);
348550013115SPeter Maydell         r = MEMTX_OK;
3486733f0b02SMichael S. Tsirkin     }
348750013115SPeter Maydell     if (result) {
348850013115SPeter Maydell         *result = r;
348950013115SPeter Maydell     }
34904840f10eSJan Kiszka     if (release_lock) {
34914840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
34924840f10eSJan Kiszka     }
349341063e1eSPaolo Bonzini     rcu_read_unlock();
349450013115SPeter Maydell }
349550013115SPeter Maydell 
349650013115SPeter Maydell void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
349750013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
349850013115SPeter Maydell {
349950013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
350050013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
350150013115SPeter Maydell }
350250013115SPeter Maydell 
350350013115SPeter Maydell void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
350450013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
350550013115SPeter Maydell {
350650013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
350750013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
350850013115SPeter Maydell }
350950013115SPeter Maydell 
351050013115SPeter Maydell void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
351150013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
351250013115SPeter Maydell {
351350013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
351450013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
3515aab33094Sbellard }
3516aab33094Sbellard 
35175ce5944dSEdgar E. Iglesias void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35181e78bcc1SAlexander Graf {
351950013115SPeter Maydell     address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35201e78bcc1SAlexander Graf }
35211e78bcc1SAlexander Graf 
35225ce5944dSEdgar E. Iglesias void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35231e78bcc1SAlexander Graf {
352450013115SPeter Maydell     address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35251e78bcc1SAlexander Graf }
35261e78bcc1SAlexander Graf 
35275ce5944dSEdgar E. Iglesias void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35281e78bcc1SAlexander Graf {
352950013115SPeter Maydell     address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35301e78bcc1SAlexander Graf }
35311e78bcc1SAlexander Graf 
3532aab33094Sbellard /* XXX: optimize */
353350013115SPeter Maydell void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
353450013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
353550013115SPeter Maydell {
353650013115SPeter Maydell     MemTxResult r;
353750013115SPeter Maydell     val = tswap64(val);
353850013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
353950013115SPeter Maydell     if (result) {
354050013115SPeter Maydell         *result = r;
354150013115SPeter Maydell     }
354250013115SPeter Maydell }
354350013115SPeter Maydell 
354450013115SPeter Maydell void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
354550013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
354650013115SPeter Maydell {
354750013115SPeter Maydell     MemTxResult r;
354850013115SPeter Maydell     val = cpu_to_le64(val);
354950013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
355050013115SPeter Maydell     if (result) {
355150013115SPeter Maydell         *result = r;
355250013115SPeter Maydell     }
355350013115SPeter Maydell }
355450013115SPeter Maydell void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
355550013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
355650013115SPeter Maydell {
355750013115SPeter Maydell     MemTxResult r;
355850013115SPeter Maydell     val = cpu_to_be64(val);
355950013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
356050013115SPeter Maydell     if (result) {
356150013115SPeter Maydell         *result = r;
356250013115SPeter Maydell     }
356350013115SPeter Maydell }
356450013115SPeter Maydell 
3565f606604fSEdgar E. Iglesias void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3566aab33094Sbellard {
356750013115SPeter Maydell     address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3568aab33094Sbellard }
3569aab33094Sbellard 
3570f606604fSEdgar E. Iglesias void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
35711e78bcc1SAlexander Graf {
357250013115SPeter Maydell     address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35731e78bcc1SAlexander Graf }
35741e78bcc1SAlexander Graf 
3575f606604fSEdgar E. Iglesias void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
35761e78bcc1SAlexander Graf {
357750013115SPeter Maydell     address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35781e78bcc1SAlexander Graf }
35791e78bcc1SAlexander Graf 
35805e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
3581f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3582b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
358313eb76e0Sbellard {
358413eb76e0Sbellard     int l;
3585a8170e5eSAvi Kivity     hwaddr phys_addr;
35869b3c35e0Sj_mayer     target_ulong page;
358713eb76e0Sbellard 
358813eb76e0Sbellard     while (len > 0) {
358913eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
3590f17ec444SAndreas Färber         phys_addr = cpu_get_phys_page_debug(cpu, page);
359113eb76e0Sbellard         /* if no physical page mapped, return an error */
359213eb76e0Sbellard         if (phys_addr == -1)
359313eb76e0Sbellard             return -1;
359413eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
359513eb76e0Sbellard         if (l > len)
359613eb76e0Sbellard             l = len;
35975e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
35982e38847bSEdgar E. Iglesias         if (is_write) {
35992e38847bSEdgar E. Iglesias             cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
36002e38847bSEdgar E. Iglesias         } else {
36015c9eb028SPeter Maydell             address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
36025c9eb028SPeter Maydell                              buf, l, 0);
36032e38847bSEdgar E. Iglesias         }
360413eb76e0Sbellard         len -= l;
360513eb76e0Sbellard         buf += l;
360613eb76e0Sbellard         addr += l;
360713eb76e0Sbellard     }
360813eb76e0Sbellard     return 0;
360913eb76e0Sbellard }
3610038629a6SDr. David Alan Gilbert 
3611038629a6SDr. David Alan Gilbert /*
3612038629a6SDr. David Alan Gilbert  * Allows code that needs to deal with migration bitmaps etc to still be built
3613038629a6SDr. David Alan Gilbert  * target independent.
3614038629a6SDr. David Alan Gilbert  */
3615038629a6SDr. David Alan Gilbert size_t qemu_target_page_bits(void)
3616038629a6SDr. David Alan Gilbert {
3617038629a6SDr. David Alan Gilbert     return TARGET_PAGE_BITS;
3618038629a6SDr. David Alan Gilbert }
3619038629a6SDr. David Alan Gilbert 
3620a68fe89cSPaul Brook #endif
362113eb76e0Sbellard 
36228e4a424bSBlue Swirl /*
36238e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
36248e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
36258e4a424bSBlue Swirl  */
362698ed8ecfSGreg Kurz bool target_words_bigendian(void);
362798ed8ecfSGreg Kurz bool target_words_bigendian(void)
36288e4a424bSBlue Swirl {
36298e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
36308e4a424bSBlue Swirl     return true;
36318e4a424bSBlue Swirl #else
36328e4a424bSBlue Swirl     return false;
36338e4a424bSBlue Swirl #endif
36348e4a424bSBlue Swirl }
36358e4a424bSBlue Swirl 
363676f35538SWen Congyang #ifndef CONFIG_USER_ONLY
3637a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
363876f35538SWen Congyang {
36395c8a00ceSPaolo Bonzini     MemoryRegion*mr;
3640149f54b5SPaolo Bonzini     hwaddr l = 1;
364141063e1eSPaolo Bonzini     bool res;
364276f35538SWen Congyang 
364341063e1eSPaolo Bonzini     rcu_read_lock();
36445c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
3645149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
364676f35538SWen Congyang 
364741063e1eSPaolo Bonzini     res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
364841063e1eSPaolo Bonzini     rcu_read_unlock();
364941063e1eSPaolo Bonzini     return res;
365076f35538SWen Congyang }
3651bd2fa51fSMichael R. Hines 
3652e3807054SDr. David Alan Gilbert int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3653bd2fa51fSMichael R. Hines {
3654bd2fa51fSMichael R. Hines     RAMBlock *block;
3655e3807054SDr. David Alan Gilbert     int ret = 0;
3656bd2fa51fSMichael R. Hines 
36570dc3f44aSMike Day     rcu_read_lock();
36580dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3659e3807054SDr. David Alan Gilbert         ret = func(block->idstr, block->host, block->offset,
3660e3807054SDr. David Alan Gilbert                    block->used_length, opaque);
3661e3807054SDr. David Alan Gilbert         if (ret) {
3662e3807054SDr. David Alan Gilbert             break;
3663e3807054SDr. David Alan Gilbert         }
3664bd2fa51fSMichael R. Hines     }
36650dc3f44aSMike Day     rcu_read_unlock();
3666e3807054SDr. David Alan Gilbert     return ret;
3667bd2fa51fSMichael R. Hines }
3668ec3f8c99SPeter Maydell #endif
3669