xref: /qemu/system/physmem.c (revision 79ed0416477440ccb6acf136d6808a4f5848bbdf)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20777872e5SStefan Weil #ifndef _WIN32
21a98d49b1Sbellard #include <sys/types.h>
22d5a8f07cSbellard #include <sys/mman.h>
23d5a8f07cSbellard #endif
2454936004Sbellard 
25055403b2SStefan Weil #include "qemu-common.h"
266180a181Sbellard #include "cpu.h"
27b67d9a52Sbellard #include "tcg.h"
28b3c7724cSpbrook #include "hw/hw.h"
294485bd26SMichael S. Tsirkin #if !defined(CONFIG_USER_ONLY)
3047c8ca53SMarcel Apfelbaum #include "hw/boards.h"
314485bd26SMichael S. Tsirkin #endif
32cc9e98cbSAlex Williamson #include "hw/qdev.h"
331de7afc9SPaolo Bonzini #include "qemu/osdep.h"
349c17d615SPaolo Bonzini #include "sysemu/kvm.h"
352ff3de68SMarkus Armbruster #include "sysemu/sysemu.h"
360d09e41aSPaolo Bonzini #include "hw/xen/xen.h"
371de7afc9SPaolo Bonzini #include "qemu/timer.h"
381de7afc9SPaolo Bonzini #include "qemu/config-file.h"
3975a34036SAndreas Färber #include "qemu/error-report.h"
40022c62cbSPaolo Bonzini #include "exec/memory.h"
419c17d615SPaolo Bonzini #include "sysemu/dma.h"
42022c62cbSPaolo Bonzini #include "exec/address-spaces.h"
4353a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4453a5960aSpbrook #include <qemu.h>
45432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
469c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
476506e4f9SStefano Stabellini #include "trace.h"
4853a5960aSpbrook #endif
490d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
500dc3f44aSMike Day #include "qemu/rcu_queue.h"
514840f10eSJan Kiszka #include "qemu/main-loop.h"
525b6dd868SBlue Swirl #include "translate-all.h"
537615936eSPavel Dovgalyuk #include "sysemu/replay.h"
540cac1b66SBlue Swirl 
55022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
56220c3ebdSJuan Quintela #include "exec/ram_addr.h"
5767d95c15SAvi Kivity 
58b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
59794e8f30SMichael S. Tsirkin #ifndef _WIN32
60794e8f30SMichael S. Tsirkin #include "qemu/mmap-alloc.h"
61794e8f30SMichael S. Tsirkin #endif
62b35ba30fSMichael S. Tsirkin 
63db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
641196be37Sths 
6599773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
660dc3f44aSMike Day /* ram_list is read under rcu_read_lock()/rcu_read_unlock().  Writes
670dc3f44aSMike Day  * are protected by the ramlist lock.
680dc3f44aSMike Day  */
690d53d9feSMike Day RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
7062152b8aSAvi Kivity 
7162152b8aSAvi Kivity static MemoryRegion *system_memory;
72309cb471SAvi Kivity static MemoryRegion *system_io;
7362152b8aSAvi Kivity 
74f6790af6SAvi Kivity AddressSpace address_space_io;
75f6790af6SAvi Kivity AddressSpace address_space_memory;
762673a5daSAvi Kivity 
770844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
78acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
790e0df1e2SAvi Kivity 
807bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
817bd4f430SPaolo Bonzini #define RAM_PREALLOC   (1 << 0)
827bd4f430SPaolo Bonzini 
83dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */
84dbcb8981SPaolo Bonzini #define RAM_SHARED     (1 << 1)
85dbcb8981SPaolo Bonzini 
8662be4e3aSMichael S. Tsirkin /* Only a portion of RAM (used_length) is actually used, and migrated.
8762be4e3aSMichael S. Tsirkin  * This used_length size can change across reboots.
8862be4e3aSMichael S. Tsirkin  */
8962be4e3aSMichael S. Tsirkin #define RAM_RESIZEABLE (1 << 2)
9062be4e3aSMichael S. Tsirkin 
91e2eef170Spbrook #endif
929fa3e853Sbellard 
93bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
946a00d601Sbellard /* current CPU in the current thread. It is only valid inside
956a00d601Sbellard    cpu_exec() */
96f240eb6fSPaolo Bonzini __thread CPUState *current_cpu;
972e70f6efSpbrook /* 0 = Do not count executed instructions.
98bf20dc07Sths    1 = Precise instruction counting.
992e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1005708fc66SPaolo Bonzini int use_icount;
1016a00d601Sbellard 
102e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1034346ae3eSAvi Kivity 
1041db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
1051db8abb1SPaolo Bonzini 
1061db8abb1SPaolo Bonzini struct PhysPageEntry {
1079736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
1088b795765SMichael S. Tsirkin     uint32_t skip : 6;
1099736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
1108b795765SMichael S. Tsirkin     uint32_t ptr : 26;
1111db8abb1SPaolo Bonzini };
1121db8abb1SPaolo Bonzini 
1138b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
1148b795765SMichael S. Tsirkin 
11503f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
11657271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
11703f49957SPaolo Bonzini 
118026736ceSMichael S. Tsirkin #define P_L2_BITS 9
11903f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
12003f49957SPaolo Bonzini 
12103f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
12203f49957SPaolo Bonzini 
12303f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1240475d94fSPaolo Bonzini 
12553cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
12679e2b9aeSPaolo Bonzini     struct rcu_head rcu;
12779e2b9aeSPaolo Bonzini 
12853cb28cbSMarcel Apfelbaum     unsigned sections_nb;
12953cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
13053cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
13153cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
13253cb28cbSMarcel Apfelbaum     Node *nodes;
13353cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
13453cb28cbSMarcel Apfelbaum } PhysPageMap;
13553cb28cbSMarcel Apfelbaum 
1361db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
13779e2b9aeSPaolo Bonzini     struct rcu_head rcu;
13879e2b9aeSPaolo Bonzini 
1391db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1401db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1411db8abb1SPaolo Bonzini      */
1421db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
14353cb28cbSMarcel Apfelbaum     PhysPageMap map;
144acc9d80bSJan Kiszka     AddressSpace *as;
1451db8abb1SPaolo Bonzini };
1461db8abb1SPaolo Bonzini 
14790260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
14890260c6cSJan Kiszka typedef struct subpage_t {
14990260c6cSJan Kiszka     MemoryRegion iomem;
150acc9d80bSJan Kiszka     AddressSpace *as;
15190260c6cSJan Kiszka     hwaddr base;
15290260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
15390260c6cSJan Kiszka } subpage_t;
15490260c6cSJan Kiszka 
155b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
156b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1
157b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2
158b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3
1595312bd8bSAvi Kivity 
160e2eef170Spbrook static void io_mem_init(void);
16162152b8aSAvi Kivity static void memory_map_init(void);
16209daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener);
163e2eef170Spbrook 
1641ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
16532857f4dSPeter Maydell 
16632857f4dSPeter Maydell /**
16732857f4dSPeter Maydell  * CPUAddressSpace: all the information a CPU needs about an AddressSpace
16832857f4dSPeter Maydell  * @cpu: the CPU whose AddressSpace this is
16932857f4dSPeter Maydell  * @as: the AddressSpace itself
17032857f4dSPeter Maydell  * @memory_dispatch: its dispatch pointer (cached, RCU protected)
17132857f4dSPeter Maydell  * @tcg_as_listener: listener for tracking changes to the AddressSpace
17232857f4dSPeter Maydell  */
17332857f4dSPeter Maydell struct CPUAddressSpace {
17432857f4dSPeter Maydell     CPUState *cpu;
17532857f4dSPeter Maydell     AddressSpace *as;
17632857f4dSPeter Maydell     struct AddressSpaceDispatch *memory_dispatch;
17732857f4dSPeter Maydell     MemoryListener tcg_as_listener;
17832857f4dSPeter Maydell };
17932857f4dSPeter Maydell 
1806658ffb8Spbrook #endif
18154936004Sbellard 
1826d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
183d6f2ea22SAvi Kivity 
18453cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
185f7bf5461SAvi Kivity {
18653cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
18753cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
18853cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
18953cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
190f7bf5461SAvi Kivity     }
191f7bf5461SAvi Kivity }
192f7bf5461SAvi Kivity 
193db94604bSPaolo Bonzini static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
194d6f2ea22SAvi Kivity {
195d6f2ea22SAvi Kivity     unsigned i;
1968b795765SMichael S. Tsirkin     uint32_t ret;
197db94604bSPaolo Bonzini     PhysPageEntry e;
198db94604bSPaolo Bonzini     PhysPageEntry *p;
199d6f2ea22SAvi Kivity 
20053cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
201db94604bSPaolo Bonzini     p = map->nodes[ret];
202d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
20353cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
204db94604bSPaolo Bonzini 
205db94604bSPaolo Bonzini     e.skip = leaf ? 0 : 1;
206db94604bSPaolo Bonzini     e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
20703f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
208db94604bSPaolo Bonzini         memcpy(&p[i], &e, sizeof(e));
209d6f2ea22SAvi Kivity     }
210f7bf5461SAvi Kivity     return ret;
211d6f2ea22SAvi Kivity }
212d6f2ea22SAvi Kivity 
21353cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
21453cb28cbSMarcel Apfelbaum                                 hwaddr *index, hwaddr *nb, uint16_t leaf,
2152999097bSAvi Kivity                                 int level)
21692e873b9Sbellard {
217f7bf5461SAvi Kivity     PhysPageEntry *p;
21803f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
2195cd2c5b6SRichard Henderson 
2209736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
221db94604bSPaolo Bonzini         lp->ptr = phys_map_node_alloc(map, level == 0);
222db94604bSPaolo Bonzini     }
22353cb28cbSMarcel Apfelbaum     p = map->nodes[lp->ptr];
22403f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
225f7bf5461SAvi Kivity 
22603f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
22707f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2289736e55bSMichael S. Tsirkin             lp->skip = 0;
229c19e8800SAvi Kivity             lp->ptr = leaf;
23007f07b31SAvi Kivity             *index += step;
23107f07b31SAvi Kivity             *nb -= step;
232f7bf5461SAvi Kivity         } else {
23353cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2342999097bSAvi Kivity         }
2352999097bSAvi Kivity         ++lp;
236f7bf5461SAvi Kivity     }
2374346ae3eSAvi Kivity }
2385cd2c5b6SRichard Henderson 
239ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
240a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2412999097bSAvi Kivity                           uint16_t leaf)
242f7bf5461SAvi Kivity {
2432999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
24453cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
245f7bf5461SAvi Kivity 
24653cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
24792e873b9Sbellard }
24892e873b9Sbellard 
249b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
251b35ba30fSMichael S. Tsirkin  */
252b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253b35ba30fSMichael S. Tsirkin {
254b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
255b35ba30fSMichael S. Tsirkin     int valid = 0;
256b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
257b35ba30fSMichael S. Tsirkin     int i;
258b35ba30fSMichael S. Tsirkin 
259b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
260b35ba30fSMichael S. Tsirkin         return;
261b35ba30fSMichael S. Tsirkin     }
262b35ba30fSMichael S. Tsirkin 
263b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
264b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
265b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266b35ba30fSMichael S. Tsirkin             continue;
267b35ba30fSMichael S. Tsirkin         }
268b35ba30fSMichael S. Tsirkin 
269b35ba30fSMichael S. Tsirkin         valid_ptr = i;
270b35ba30fSMichael S. Tsirkin         valid++;
271b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
272b35ba30fSMichael S. Tsirkin             phys_page_compact(&p[i], nodes, compacted);
273b35ba30fSMichael S. Tsirkin         }
274b35ba30fSMichael S. Tsirkin     }
275b35ba30fSMichael S. Tsirkin 
276b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
277b35ba30fSMichael S. Tsirkin     if (valid != 1) {
278b35ba30fSMichael S. Tsirkin         return;
279b35ba30fSMichael S. Tsirkin     }
280b35ba30fSMichael S. Tsirkin 
281b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
282b35ba30fSMichael S. Tsirkin 
283b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
284b35ba30fSMichael S. Tsirkin     if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285b35ba30fSMichael S. Tsirkin         return;
286b35ba30fSMichael S. Tsirkin     }
287b35ba30fSMichael S. Tsirkin 
288b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
289b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
290b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
291b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
292b35ba30fSMichael S. Tsirkin          * should never reach here.
293b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
294b35ba30fSMichael S. Tsirkin          * change this rule.
295b35ba30fSMichael S. Tsirkin          */
296b35ba30fSMichael S. Tsirkin         lp->skip = 0;
297b35ba30fSMichael S. Tsirkin     } else {
298b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
299b35ba30fSMichael S. Tsirkin     }
300b35ba30fSMichael S. Tsirkin }
301b35ba30fSMichael S. Tsirkin 
302b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303b35ba30fSMichael S. Tsirkin {
304b35ba30fSMichael S. Tsirkin     DECLARE_BITMAP(compacted, nodes_nb);
305b35ba30fSMichael S. Tsirkin 
306b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
30753cb28cbSMarcel Apfelbaum         phys_page_compact(&d->phys_map, d->map.nodes, compacted);
308b35ba30fSMichael S. Tsirkin     }
309b35ba30fSMichael S. Tsirkin }
310b35ba30fSMichael S. Tsirkin 
31197115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
3129affd6fcSPaolo Bonzini                                            Node *nodes, MemoryRegionSection *sections)
31392e873b9Sbellard {
31431ab2b4aSAvi Kivity     PhysPageEntry *p;
31597115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
31631ab2b4aSAvi Kivity     int i;
317f1f6e3b8SAvi Kivity 
3189736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
319c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
3209affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
321f1f6e3b8SAvi Kivity         }
3229affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
32303f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
32431ab2b4aSAvi Kivity     }
325b35ba30fSMichael S. Tsirkin 
326b35ba30fSMichael S. Tsirkin     if (sections[lp.ptr].size.hi ||
327b35ba30fSMichael S. Tsirkin         range_covers_byte(sections[lp.ptr].offset_within_address_space,
328b35ba30fSMichael S. Tsirkin                           sections[lp.ptr].size.lo, addr)) {
3299affd6fcSPaolo Bonzini         return &sections[lp.ptr];
330b35ba30fSMichael S. Tsirkin     } else {
331b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
332b35ba30fSMichael S. Tsirkin     }
333f3705d53SAvi Kivity }
334f3705d53SAvi Kivity 
335e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
336e5548617SBlue Swirl {
3372a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
338e5548617SBlue Swirl         && mr != &io_mem_watch;
339e5548617SBlue Swirl }
340149f54b5SPaolo Bonzini 
34179e2b9aeSPaolo Bonzini /* Called from RCU critical section */
342c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
34390260c6cSJan Kiszka                                                         hwaddr addr,
34490260c6cSJan Kiszka                                                         bool resolve_subpage)
3459f029603SJan Kiszka {
34690260c6cSJan Kiszka     MemoryRegionSection *section;
34790260c6cSJan Kiszka     subpage_t *subpage;
34890260c6cSJan Kiszka 
34953cb28cbSMarcel Apfelbaum     section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
35090260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
35190260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
35253cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
35390260c6cSJan Kiszka     }
35490260c6cSJan Kiszka     return section;
3559f029603SJan Kiszka }
3569f029603SJan Kiszka 
35779e2b9aeSPaolo Bonzini /* Called from RCU critical section */
35890260c6cSJan Kiszka static MemoryRegionSection *
359c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
36090260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
361149f54b5SPaolo Bonzini {
362149f54b5SPaolo Bonzini     MemoryRegionSection *section;
363965eb2fcSPaolo Bonzini     MemoryRegion *mr;
364a87f3954SPaolo Bonzini     Int128 diff;
365149f54b5SPaolo Bonzini 
366c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
367149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
368149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
369149f54b5SPaolo Bonzini 
370149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
371149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
372149f54b5SPaolo Bonzini 
373965eb2fcSPaolo Bonzini     mr = section->mr;
374b242e0e0SPaolo Bonzini 
375b242e0e0SPaolo Bonzini     /* MMIO registers can be expected to perform full-width accesses based only
376b242e0e0SPaolo Bonzini      * on their address, without considering adjacent registers that could
377b242e0e0SPaolo Bonzini      * decode to completely different MemoryRegions.  When such registers
378b242e0e0SPaolo Bonzini      * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379b242e0e0SPaolo Bonzini      * regions overlap wildly.  For this reason we cannot clamp the accesses
380b242e0e0SPaolo Bonzini      * here.
381b242e0e0SPaolo Bonzini      *
382b242e0e0SPaolo Bonzini      * If the length is small (as is the case for address_space_ldl/stl),
383b242e0e0SPaolo Bonzini      * everything works fine.  If the incoming length is large, however,
384b242e0e0SPaolo Bonzini      * the caller really has to do the clamping through memory_access_size.
385b242e0e0SPaolo Bonzini      */
386965eb2fcSPaolo Bonzini     if (memory_region_is_ram(mr)) {
387e4a511f8SPaolo Bonzini         diff = int128_sub(section->size, int128_make64(addr));
3883752a036SPeter Maydell         *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
389965eb2fcSPaolo Bonzini     }
390149f54b5SPaolo Bonzini     return section;
391149f54b5SPaolo Bonzini }
39290260c6cSJan Kiszka 
39341063e1eSPaolo Bonzini /* Called from RCU critical section */
3945c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
39590260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
39690260c6cSJan Kiszka                                       bool is_write)
39790260c6cSJan Kiszka {
39830951157SAvi Kivity     IOMMUTLBEntry iotlb;
39930951157SAvi Kivity     MemoryRegionSection *section;
40030951157SAvi Kivity     MemoryRegion *mr;
40130951157SAvi Kivity 
40230951157SAvi Kivity     for (;;) {
40379e2b9aeSPaolo Bonzini         AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
40479e2b9aeSPaolo Bonzini         section = address_space_translate_internal(d, addr, &addr, plen, true);
40530951157SAvi Kivity         mr = section->mr;
40630951157SAvi Kivity 
40730951157SAvi Kivity         if (!mr->iommu_ops) {
40830951157SAvi Kivity             break;
40930951157SAvi Kivity         }
41030951157SAvi Kivity 
4118d7b8cb9SLe Tan         iotlb = mr->iommu_ops->translate(mr, addr, is_write);
41230951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
41330951157SAvi Kivity                 | (addr & iotlb.addr_mask));
41423820dbfSPeter Crosthwaite         *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
41530951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
41630951157SAvi Kivity             mr = &io_mem_unassigned;
41730951157SAvi Kivity             break;
41830951157SAvi Kivity         }
41930951157SAvi Kivity 
42030951157SAvi Kivity         as = iotlb.target_as;
42130951157SAvi Kivity     }
42230951157SAvi Kivity 
423fe680d0dSAlexey Kardashevskiy     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
424a87f3954SPaolo Bonzini         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
42523820dbfSPeter Crosthwaite         *plen = MIN(page, *plen);
426a87f3954SPaolo Bonzini     }
427a87f3954SPaolo Bonzini 
42830951157SAvi Kivity     *xlat = addr;
42930951157SAvi Kivity     return mr;
43090260c6cSJan Kiszka }
43190260c6cSJan Kiszka 
43279e2b9aeSPaolo Bonzini /* Called from RCU critical section */
43390260c6cSJan Kiszka MemoryRegionSection *
434d7898cdaSPeter Maydell address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
4359d82b5a7SPaolo Bonzini                                   hwaddr *xlat, hwaddr *plen)
43690260c6cSJan Kiszka {
43730951157SAvi Kivity     MemoryRegionSection *section;
438d7898cdaSPeter Maydell     AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
439d7898cdaSPeter Maydell 
440d7898cdaSPeter Maydell     section = address_space_translate_internal(d, addr, xlat, plen, false);
44130951157SAvi Kivity 
44230951157SAvi Kivity     assert(!section->mr->iommu_ops);
44330951157SAvi Kivity     return section;
44490260c6cSJan Kiszka }
4459fa3e853Sbellard #endif
446fd6ce8f6Sbellard 
447b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
4489656f324Spbrook 
449e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
450e7f4eff7SJuan Quintela {
451259186a7SAndreas Färber     CPUState *cpu = opaque;
452e7f4eff7SJuan Quintela 
4533098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
4543098dba0Saurel32        version_id is increased. */
455259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
456c01a71c1SChristian Borntraeger     tlb_flush(cpu, 1);
4579656f324Spbrook 
4589656f324Spbrook     return 0;
4599656f324Spbrook }
460e7f4eff7SJuan Quintela 
4616c3bff0eSPavel Dovgaluk static int cpu_common_pre_load(void *opaque)
4626c3bff0eSPavel Dovgaluk {
4636c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4646c3bff0eSPavel Dovgaluk 
465adee6424SPaolo Bonzini     cpu->exception_index = -1;
4666c3bff0eSPavel Dovgaluk 
4676c3bff0eSPavel Dovgaluk     return 0;
4686c3bff0eSPavel Dovgaluk }
4696c3bff0eSPavel Dovgaluk 
4706c3bff0eSPavel Dovgaluk static bool cpu_common_exception_index_needed(void *opaque)
4716c3bff0eSPavel Dovgaluk {
4726c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4736c3bff0eSPavel Dovgaluk 
474adee6424SPaolo Bonzini     return tcg_enabled() && cpu->exception_index != -1;
4756c3bff0eSPavel Dovgaluk }
4766c3bff0eSPavel Dovgaluk 
4776c3bff0eSPavel Dovgaluk static const VMStateDescription vmstate_cpu_common_exception_index = {
4786c3bff0eSPavel Dovgaluk     .name = "cpu_common/exception_index",
4796c3bff0eSPavel Dovgaluk     .version_id = 1,
4806c3bff0eSPavel Dovgaluk     .minimum_version_id = 1,
4815cd8cadaSJuan Quintela     .needed = cpu_common_exception_index_needed,
4826c3bff0eSPavel Dovgaluk     .fields = (VMStateField[]) {
4836c3bff0eSPavel Dovgaluk         VMSTATE_INT32(exception_index, CPUState),
4846c3bff0eSPavel Dovgaluk         VMSTATE_END_OF_LIST()
4856c3bff0eSPavel Dovgaluk     }
4866c3bff0eSPavel Dovgaluk };
4876c3bff0eSPavel Dovgaluk 
488bac05aa9SAndrey Smetanin static bool cpu_common_crash_occurred_needed(void *opaque)
489bac05aa9SAndrey Smetanin {
490bac05aa9SAndrey Smetanin     CPUState *cpu = opaque;
491bac05aa9SAndrey Smetanin 
492bac05aa9SAndrey Smetanin     return cpu->crash_occurred;
493bac05aa9SAndrey Smetanin }
494bac05aa9SAndrey Smetanin 
495bac05aa9SAndrey Smetanin static const VMStateDescription vmstate_cpu_common_crash_occurred = {
496bac05aa9SAndrey Smetanin     .name = "cpu_common/crash_occurred",
497bac05aa9SAndrey Smetanin     .version_id = 1,
498bac05aa9SAndrey Smetanin     .minimum_version_id = 1,
499bac05aa9SAndrey Smetanin     .needed = cpu_common_crash_occurred_needed,
500bac05aa9SAndrey Smetanin     .fields = (VMStateField[]) {
501bac05aa9SAndrey Smetanin         VMSTATE_BOOL(crash_occurred, CPUState),
502bac05aa9SAndrey Smetanin         VMSTATE_END_OF_LIST()
503bac05aa9SAndrey Smetanin     }
504bac05aa9SAndrey Smetanin };
505bac05aa9SAndrey Smetanin 
5061a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
507e7f4eff7SJuan Quintela     .name = "cpu_common",
508e7f4eff7SJuan Quintela     .version_id = 1,
509e7f4eff7SJuan Quintela     .minimum_version_id = 1,
5106c3bff0eSPavel Dovgaluk     .pre_load = cpu_common_pre_load,
511e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
512e7f4eff7SJuan Quintela     .fields = (VMStateField[]) {
513259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
514259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
515e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
5166c3bff0eSPavel Dovgaluk     },
5175cd8cadaSJuan Quintela     .subsections = (const VMStateDescription*[]) {
5185cd8cadaSJuan Quintela         &vmstate_cpu_common_exception_index,
519bac05aa9SAndrey Smetanin         &vmstate_cpu_common_crash_occurred,
5205cd8cadaSJuan Quintela         NULL
521e7f4eff7SJuan Quintela     }
522e7f4eff7SJuan Quintela };
5231a1562f5SAndreas Färber 
5249656f324Spbrook #endif
5259656f324Spbrook 
52638d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
527950f1472SGlauber Costa {
528bdc44640SAndreas Färber     CPUState *cpu;
529950f1472SGlauber Costa 
530bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
53155e5c285SAndreas Färber         if (cpu->cpu_index == index) {
532bdc44640SAndreas Färber             return cpu;
53355e5c285SAndreas Färber         }
534950f1472SGlauber Costa     }
535950f1472SGlauber Costa 
536bdc44640SAndreas Färber     return NULL;
537950f1472SGlauber Costa }
538950f1472SGlauber Costa 
53909daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY)
54056943e8cSPeter Maydell void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
54109daed84SEdgar E. Iglesias {
54212ebc9a7SPeter Maydell     CPUAddressSpace *newas;
54312ebc9a7SPeter Maydell 
54412ebc9a7SPeter Maydell     /* Target code should have set num_ases before calling us */
54512ebc9a7SPeter Maydell     assert(asidx < cpu->num_ases);
54612ebc9a7SPeter Maydell 
54756943e8cSPeter Maydell     if (asidx == 0) {
54856943e8cSPeter Maydell         /* address space 0 gets the convenience alias */
54956943e8cSPeter Maydell         cpu->as = as;
55056943e8cSPeter Maydell     }
55156943e8cSPeter Maydell 
55212ebc9a7SPeter Maydell     /* KVM cannot currently support multiple address spaces. */
55312ebc9a7SPeter Maydell     assert(asidx == 0 || !kvm_enabled());
55409daed84SEdgar E. Iglesias 
55512ebc9a7SPeter Maydell     if (!cpu->cpu_ases) {
55612ebc9a7SPeter Maydell         cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
55709daed84SEdgar E. Iglesias     }
55832857f4dSPeter Maydell 
55912ebc9a7SPeter Maydell     newas = &cpu->cpu_ases[asidx];
56012ebc9a7SPeter Maydell     newas->cpu = cpu;
56112ebc9a7SPeter Maydell     newas->as = as;
56256943e8cSPeter Maydell     if (tcg_enabled()) {
56312ebc9a7SPeter Maydell         newas->tcg_as_listener.commit = tcg_commit;
56412ebc9a7SPeter Maydell         memory_listener_register(&newas->tcg_as_listener, as);
56509daed84SEdgar E. Iglesias     }
56656943e8cSPeter Maydell }
567651a5bc0SPeter Maydell 
568651a5bc0SPeter Maydell AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
569651a5bc0SPeter Maydell {
570651a5bc0SPeter Maydell     /* Return the AddressSpace corresponding to the specified index */
571651a5bc0SPeter Maydell     return cpu->cpu_ases[asidx].as;
572651a5bc0SPeter Maydell }
57309daed84SEdgar E. Iglesias #endif
57409daed84SEdgar E. Iglesias 
575b7bca733SBharata B Rao #ifndef CONFIG_USER_ONLY
576b7bca733SBharata B Rao static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
577b7bca733SBharata B Rao 
578b7bca733SBharata B Rao static int cpu_get_free_index(Error **errp)
579b7bca733SBharata B Rao {
580b7bca733SBharata B Rao     int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
581b7bca733SBharata B Rao 
582b7bca733SBharata B Rao     if (cpu >= MAX_CPUMASK_BITS) {
583b7bca733SBharata B Rao         error_setg(errp, "Trying to use more CPUs than max of %d",
584b7bca733SBharata B Rao                    MAX_CPUMASK_BITS);
585b7bca733SBharata B Rao         return -1;
586b7bca733SBharata B Rao     }
587b7bca733SBharata B Rao 
588b7bca733SBharata B Rao     bitmap_set(cpu_index_map, cpu, 1);
589b7bca733SBharata B Rao     return cpu;
590b7bca733SBharata B Rao }
591b7bca733SBharata B Rao 
592b7bca733SBharata B Rao void cpu_exec_exit(CPUState *cpu)
593b7bca733SBharata B Rao {
594b7bca733SBharata B Rao     if (cpu->cpu_index == -1) {
595b7bca733SBharata B Rao         /* cpu_index was never allocated by this @cpu or was already freed. */
596b7bca733SBharata B Rao         return;
597b7bca733SBharata B Rao     }
598b7bca733SBharata B Rao 
599b7bca733SBharata B Rao     bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
600b7bca733SBharata B Rao     cpu->cpu_index = -1;
601b7bca733SBharata B Rao }
602b7bca733SBharata B Rao #else
603b7bca733SBharata B Rao 
604b7bca733SBharata B Rao static int cpu_get_free_index(Error **errp)
605b7bca733SBharata B Rao {
606b7bca733SBharata B Rao     CPUState *some_cpu;
607b7bca733SBharata B Rao     int cpu_index = 0;
608b7bca733SBharata B Rao 
609b7bca733SBharata B Rao     CPU_FOREACH(some_cpu) {
610b7bca733SBharata B Rao         cpu_index++;
611b7bca733SBharata B Rao     }
612b7bca733SBharata B Rao     return cpu_index;
613b7bca733SBharata B Rao }
614b7bca733SBharata B Rao 
615b7bca733SBharata B Rao void cpu_exec_exit(CPUState *cpu)
616b7bca733SBharata B Rao {
617b7bca733SBharata B Rao }
618b7bca733SBharata B Rao #endif
619b7bca733SBharata B Rao 
6204bad9e39SPeter Crosthwaite void cpu_exec_init(CPUState *cpu, Error **errp)
621fd6ce8f6Sbellard {
622b170fce3SAndreas Färber     CPUClass *cc = CPU_GET_CLASS(cpu);
6236a00d601Sbellard     int cpu_index;
624b7bca733SBharata B Rao     Error *local_err = NULL;
6256a00d601Sbellard 
62656943e8cSPeter Maydell     cpu->as = NULL;
62712ebc9a7SPeter Maydell     cpu->num_ases = 0;
62856943e8cSPeter Maydell 
629291135b5SEduardo Habkost #ifndef CONFIG_USER_ONLY
630291135b5SEduardo Habkost     cpu->thread_id = qemu_get_thread_id();
631291135b5SEduardo Habkost #endif
632291135b5SEduardo Habkost 
633c2764719Spbrook #if defined(CONFIG_USER_ONLY)
634c2764719Spbrook     cpu_list_lock();
635c2764719Spbrook #endif
636b7bca733SBharata B Rao     cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
637b7bca733SBharata B Rao     if (local_err) {
638b7bca733SBharata B Rao         error_propagate(errp, local_err);
639b7bca733SBharata B Rao #if defined(CONFIG_USER_ONLY)
640b7bca733SBharata B Rao         cpu_list_unlock();
641b7bca733SBharata B Rao #endif
642b7bca733SBharata B Rao         return;
6436a00d601Sbellard     }
644bdc44640SAndreas Färber     QTAILQ_INSERT_TAIL(&cpus, cpu, node);
645c2764719Spbrook #if defined(CONFIG_USER_ONLY)
646c2764719Spbrook     cpu_list_unlock();
647c2764719Spbrook #endif
648e0d47944SAndreas Färber     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
649259186a7SAndreas Färber         vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
650e0d47944SAndreas Färber     }
651b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
6520be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
6534bad9e39SPeter Crosthwaite                     cpu_save, cpu_load, cpu->env_ptr);
654b170fce3SAndreas Färber     assert(cc->vmsd == NULL);
655e0d47944SAndreas Färber     assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
656b3c7724cSpbrook #endif
657b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
658b170fce3SAndreas Färber         vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
659b170fce3SAndreas Färber     }
660fd6ce8f6Sbellard }
661fd6ce8f6Sbellard 
66294df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
66300b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
66494df27fdSPaul Brook {
66594df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
66694df27fdSPaul Brook }
66794df27fdSPaul Brook #else
66800b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
6691e7855a5SMax Filippov {
6705232e4c7SPeter Maydell     MemTxAttrs attrs;
6715232e4c7SPeter Maydell     hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
6725232e4c7SPeter Maydell     int asidx = cpu_asidx_from_attrs(cpu, attrs);
673e8262a1bSMax Filippov     if (phys != -1) {
6745232e4c7SPeter Maydell         tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
67529d8ec7bSEdgar E. Iglesias                                 phys | (pc & ~TARGET_PAGE_MASK));
676e8262a1bSMax Filippov     }
6771e7855a5SMax Filippov }
678c27004ecSbellard #endif
679d720b93dSbellard 
680c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
68175a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
682c527ee8fSPaul Brook 
683c527ee8fSPaul Brook {
684c527ee8fSPaul Brook }
685c527ee8fSPaul Brook 
6863ee887e8SPeter Maydell int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
6873ee887e8SPeter Maydell                           int flags)
6883ee887e8SPeter Maydell {
6893ee887e8SPeter Maydell     return -ENOSYS;
6903ee887e8SPeter Maydell }
6913ee887e8SPeter Maydell 
6923ee887e8SPeter Maydell void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
6933ee887e8SPeter Maydell {
6943ee887e8SPeter Maydell }
6953ee887e8SPeter Maydell 
69675a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
697c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
698c527ee8fSPaul Brook {
699c527ee8fSPaul Brook     return -ENOSYS;
700c527ee8fSPaul Brook }
701c527ee8fSPaul Brook #else
7026658ffb8Spbrook /* Add a watchpoint.  */
70375a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
704a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
7056658ffb8Spbrook {
706c0ce998eSaliguori     CPUWatchpoint *wp;
7076658ffb8Spbrook 
70805068c0dSPeter Maydell     /* forbid ranges which are empty or run off the end of the address space */
70907e2863dSMax Filippov     if (len == 0 || (addr + len - 1) < addr) {
71075a34036SAndreas Färber         error_report("tried to set invalid watchpoint at %"
71175a34036SAndreas Färber                      VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
712b4051334Saliguori         return -EINVAL;
713b4051334Saliguori     }
7147267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
7156658ffb8Spbrook 
716a1d1bb31Saliguori     wp->vaddr = addr;
71705068c0dSPeter Maydell     wp->len = len;
718a1d1bb31Saliguori     wp->flags = flags;
719a1d1bb31Saliguori 
7202dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
721ff4700b0SAndreas Färber     if (flags & BP_GDB) {
722ff4700b0SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
723ff4700b0SAndreas Färber     } else {
724ff4700b0SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
725ff4700b0SAndreas Färber     }
726a1d1bb31Saliguori 
72731b030d4SAndreas Färber     tlb_flush_page(cpu, addr);
728a1d1bb31Saliguori 
729a1d1bb31Saliguori     if (watchpoint)
730a1d1bb31Saliguori         *watchpoint = wp;
731a1d1bb31Saliguori     return 0;
7326658ffb8Spbrook }
7336658ffb8Spbrook 
734a1d1bb31Saliguori /* Remove a specific watchpoint.  */
73575a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
736a1d1bb31Saliguori                           int flags)
7376658ffb8Spbrook {
738a1d1bb31Saliguori     CPUWatchpoint *wp;
7396658ffb8Spbrook 
740ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
74105068c0dSPeter Maydell         if (addr == wp->vaddr && len == wp->len
7426e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
74375a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
7446658ffb8Spbrook             return 0;
7456658ffb8Spbrook         }
7466658ffb8Spbrook     }
747a1d1bb31Saliguori     return -ENOENT;
7486658ffb8Spbrook }
7496658ffb8Spbrook 
750a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
75175a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
752a1d1bb31Saliguori {
753ff4700b0SAndreas Färber     QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7547d03f82fSedgar_igl 
75531b030d4SAndreas Färber     tlb_flush_page(cpu, watchpoint->vaddr);
756a1d1bb31Saliguori 
7577267c094SAnthony Liguori     g_free(watchpoint);
7587d03f82fSedgar_igl }
7597d03f82fSedgar_igl 
760a1d1bb31Saliguori /* Remove all matching watchpoints.  */
76175a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
762a1d1bb31Saliguori {
763c0ce998eSaliguori     CPUWatchpoint *wp, *next;
764a1d1bb31Saliguori 
765ff4700b0SAndreas Färber     QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
76675a34036SAndreas Färber         if (wp->flags & mask) {
76775a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
76875a34036SAndreas Färber         }
769a1d1bb31Saliguori     }
770c0ce998eSaliguori }
77105068c0dSPeter Maydell 
77205068c0dSPeter Maydell /* Return true if this watchpoint address matches the specified
77305068c0dSPeter Maydell  * access (ie the address range covered by the watchpoint overlaps
77405068c0dSPeter Maydell  * partially or completely with the address range covered by the
77505068c0dSPeter Maydell  * access).
77605068c0dSPeter Maydell  */
77705068c0dSPeter Maydell static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
77805068c0dSPeter Maydell                                                   vaddr addr,
77905068c0dSPeter Maydell                                                   vaddr len)
78005068c0dSPeter Maydell {
78105068c0dSPeter Maydell     /* We know the lengths are non-zero, but a little caution is
78205068c0dSPeter Maydell      * required to avoid errors in the case where the range ends
78305068c0dSPeter Maydell      * exactly at the top of the address space and so addr + len
78405068c0dSPeter Maydell      * wraps round to zero.
78505068c0dSPeter Maydell      */
78605068c0dSPeter Maydell     vaddr wpend = wp->vaddr + wp->len - 1;
78705068c0dSPeter Maydell     vaddr addrend = addr + len - 1;
78805068c0dSPeter Maydell 
78905068c0dSPeter Maydell     return !(addr > wpend || wp->vaddr > addrend);
79005068c0dSPeter Maydell }
79105068c0dSPeter Maydell 
792c527ee8fSPaul Brook #endif
793a1d1bb31Saliguori 
794a1d1bb31Saliguori /* Add a breakpoint.  */
795b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
796a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
7974c3a88a2Sbellard {
798c0ce998eSaliguori     CPUBreakpoint *bp;
7994c3a88a2Sbellard 
8007267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
8014c3a88a2Sbellard 
802a1d1bb31Saliguori     bp->pc = pc;
803a1d1bb31Saliguori     bp->flags = flags;
804a1d1bb31Saliguori 
8052dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
80600b941e5SAndreas Färber     if (flags & BP_GDB) {
807f0c3c505SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
80800b941e5SAndreas Färber     } else {
809f0c3c505SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
81000b941e5SAndreas Färber     }
811d720b93dSbellard 
812f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, pc);
813a1d1bb31Saliguori 
81400b941e5SAndreas Färber     if (breakpoint) {
815a1d1bb31Saliguori         *breakpoint = bp;
81600b941e5SAndreas Färber     }
8174c3a88a2Sbellard     return 0;
8184c3a88a2Sbellard }
8194c3a88a2Sbellard 
820a1d1bb31Saliguori /* Remove a specific breakpoint.  */
821b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
822a1d1bb31Saliguori {
823a1d1bb31Saliguori     CPUBreakpoint *bp;
824a1d1bb31Saliguori 
825f0c3c505SAndreas Färber     QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
826a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
827b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
828a1d1bb31Saliguori             return 0;
8297d03f82fSedgar_igl         }
830a1d1bb31Saliguori     }
831a1d1bb31Saliguori     return -ENOENT;
8327d03f82fSedgar_igl }
8337d03f82fSedgar_igl 
834a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
835b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
8364c3a88a2Sbellard {
837f0c3c505SAndreas Färber     QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
838f0c3c505SAndreas Färber 
839f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, breakpoint->pc);
840a1d1bb31Saliguori 
8417267c094SAnthony Liguori     g_free(breakpoint);
842a1d1bb31Saliguori }
843a1d1bb31Saliguori 
844a1d1bb31Saliguori /* Remove all matching breakpoints. */
845b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
846a1d1bb31Saliguori {
847c0ce998eSaliguori     CPUBreakpoint *bp, *next;
848a1d1bb31Saliguori 
849f0c3c505SAndreas Färber     QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
850b3310ab3SAndreas Färber         if (bp->flags & mask) {
851b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
852b3310ab3SAndreas Färber         }
853c0ce998eSaliguori     }
8544c3a88a2Sbellard }
8554c3a88a2Sbellard 
856c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
857c33a346eSbellard    CPU loop after each instruction */
8583825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled)
859c33a346eSbellard {
860ed2803daSAndreas Färber     if (cpu->singlestep_enabled != enabled) {
861ed2803daSAndreas Färber         cpu->singlestep_enabled = enabled;
862ed2803daSAndreas Färber         if (kvm_enabled()) {
86338e478ecSStefan Weil             kvm_update_guest_debug(cpu, 0);
864ed2803daSAndreas Färber         } else {
865ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
8669fa3e853Sbellard             /* XXX: only flush what is necessary */
867bbd77c18SPeter Crosthwaite             tb_flush(cpu);
868c33a346eSbellard         }
869e22a25c9Saliguori     }
870c33a346eSbellard }
871c33a346eSbellard 
872a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...)
8737501267eSbellard {
8747501267eSbellard     va_list ap;
875493ae1f0Spbrook     va_list ap2;
8767501267eSbellard 
8777501267eSbellard     va_start(ap, fmt);
878493ae1f0Spbrook     va_copy(ap2, ap);
8797501267eSbellard     fprintf(stderr, "qemu: fatal: ");
8807501267eSbellard     vfprintf(stderr, fmt, ap);
8817501267eSbellard     fprintf(stderr, "\n");
882878096eeSAndreas Färber     cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
883013a2942SPaolo Bonzini     if (qemu_log_separate()) {
88493fcfe39Saliguori         qemu_log("qemu: fatal: ");
88593fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
88693fcfe39Saliguori         qemu_log("\n");
887a0762859SAndreas Färber         log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
88831b1a7b4Saliguori         qemu_log_flush();
88993fcfe39Saliguori         qemu_log_close();
890924edcaeSbalrog     }
891493ae1f0Spbrook     va_end(ap2);
892f9373291Sj_mayer     va_end(ap);
8937615936eSPavel Dovgalyuk     replay_finish();
894fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
895fd052bf6SRiku Voipio     {
896fd052bf6SRiku Voipio         struct sigaction act;
897fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
898fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
899fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
900fd052bf6SRiku Voipio     }
901fd052bf6SRiku Voipio #endif
9027501267eSbellard     abort();
9037501267eSbellard }
9047501267eSbellard 
9050124311eSbellard #if !defined(CONFIG_USER_ONLY)
9060dc3f44aSMike Day /* Called from RCU critical section */
907041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
908041603feSPaolo Bonzini {
909041603feSPaolo Bonzini     RAMBlock *block;
910041603feSPaolo Bonzini 
91143771539SPaolo Bonzini     block = atomic_rcu_read(&ram_list.mru_block);
9129b8424d5SMichael S. Tsirkin     if (block && addr - block->offset < block->max_length) {
91368851b98SPaolo Bonzini         return block;
914041603feSPaolo Bonzini     }
9150dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9169b8424d5SMichael S. Tsirkin         if (addr - block->offset < block->max_length) {
917041603feSPaolo Bonzini             goto found;
918041603feSPaolo Bonzini         }
919041603feSPaolo Bonzini     }
920041603feSPaolo Bonzini 
921041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
922041603feSPaolo Bonzini     abort();
923041603feSPaolo Bonzini 
924041603feSPaolo Bonzini found:
92543771539SPaolo Bonzini     /* It is safe to write mru_block outside the iothread lock.  This
92643771539SPaolo Bonzini      * is what happens:
92743771539SPaolo Bonzini      *
92843771539SPaolo Bonzini      *     mru_block = xxx
92943771539SPaolo Bonzini      *     rcu_read_unlock()
93043771539SPaolo Bonzini      *                                        xxx removed from list
93143771539SPaolo Bonzini      *                  rcu_read_lock()
93243771539SPaolo Bonzini      *                  read mru_block
93343771539SPaolo Bonzini      *                                        mru_block = NULL;
93443771539SPaolo Bonzini      *                                        call_rcu(reclaim_ramblock, xxx);
93543771539SPaolo Bonzini      *                  rcu_read_unlock()
93643771539SPaolo Bonzini      *
93743771539SPaolo Bonzini      * atomic_rcu_set is not needed here.  The block was already published
93843771539SPaolo Bonzini      * when it was placed into the list.  Here we're just making an extra
93943771539SPaolo Bonzini      * copy of the pointer.
94043771539SPaolo Bonzini      */
941041603feSPaolo Bonzini     ram_list.mru_block = block;
942041603feSPaolo Bonzini     return block;
943041603feSPaolo Bonzini }
944041603feSPaolo Bonzini 
945a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
9461ccde1cbSbellard {
9479a13565dSPeter Crosthwaite     CPUState *cpu;
948041603feSPaolo Bonzini     ram_addr_t start1;
949a2f4d5beSJuan Quintela     RAMBlock *block;
950a2f4d5beSJuan Quintela     ram_addr_t end;
951a2f4d5beSJuan Quintela 
952a2f4d5beSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length);
953a2f4d5beSJuan Quintela     start &= TARGET_PAGE_MASK;
954f23db169Sbellard 
9550dc3f44aSMike Day     rcu_read_lock();
956041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
957041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
9581240be24SMichael S. Tsirkin     start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
9599a13565dSPeter Crosthwaite     CPU_FOREACH(cpu) {
9609a13565dSPeter Crosthwaite         tlb_reset_dirty(cpu, start1, length);
9619a13565dSPeter Crosthwaite     }
9620dc3f44aSMike Day     rcu_read_unlock();
963d24981d3SJuan Quintela }
964d24981d3SJuan Quintela 
965d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
96603eebc9eSStefan Hajnoczi bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
96703eebc9eSStefan Hajnoczi                                               ram_addr_t length,
96852159192SJuan Quintela                                               unsigned client)
969d24981d3SJuan Quintela {
97003eebc9eSStefan Hajnoczi     unsigned long end, page;
97103eebc9eSStefan Hajnoczi     bool dirty;
972d24981d3SJuan Quintela 
97303eebc9eSStefan Hajnoczi     if (length == 0) {
97403eebc9eSStefan Hajnoczi         return false;
97503eebc9eSStefan Hajnoczi     }
97603eebc9eSStefan Hajnoczi 
97703eebc9eSStefan Hajnoczi     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
97803eebc9eSStefan Hajnoczi     page = start >> TARGET_PAGE_BITS;
97903eebc9eSStefan Hajnoczi     dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
98003eebc9eSStefan Hajnoczi                                          page, end - page);
98103eebc9eSStefan Hajnoczi 
98203eebc9eSStefan Hajnoczi     if (dirty && tcg_enabled()) {
983a2f4d5beSJuan Quintela         tlb_reset_dirty_range_all(start, length);
984d24981d3SJuan Quintela     }
98503eebc9eSStefan Hajnoczi 
98603eebc9eSStefan Hajnoczi     return dirty;
9871ccde1cbSbellard }
9881ccde1cbSbellard 
98979e2b9aeSPaolo Bonzini /* Called from RCU critical section */
990bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu,
991e5548617SBlue Swirl                                        MemoryRegionSection *section,
992e5548617SBlue Swirl                                        target_ulong vaddr,
993149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
994e5548617SBlue Swirl                                        int prot,
995e5548617SBlue Swirl                                        target_ulong *address)
996e5548617SBlue Swirl {
997a8170e5eSAvi Kivity     hwaddr iotlb;
998e5548617SBlue Swirl     CPUWatchpoint *wp;
999e5548617SBlue Swirl 
1000cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
1001e5548617SBlue Swirl         /* Normal RAM.  */
1002e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1003149f54b5SPaolo Bonzini             + xlat;
1004e5548617SBlue Swirl         if (!section->readonly) {
1005b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_NOTDIRTY;
1006e5548617SBlue Swirl         } else {
1007b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_ROM;
1008e5548617SBlue Swirl         }
1009e5548617SBlue Swirl     } else {
10100b8e2c10SPeter Maydell         AddressSpaceDispatch *d;
10110b8e2c10SPeter Maydell 
10120b8e2c10SPeter Maydell         d = atomic_rcu_read(&section->address_space->dispatch);
10130b8e2c10SPeter Maydell         iotlb = section - d->map.sections;
1014149f54b5SPaolo Bonzini         iotlb += xlat;
1015e5548617SBlue Swirl     }
1016e5548617SBlue Swirl 
1017e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
1018e5548617SBlue Swirl        watchpoint trap routines.  */
1019ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
102005068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1021e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
1022e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1023b41aac4fSLiu Ping Fan                 iotlb = PHYS_SECTION_WATCH + paddr;
1024e5548617SBlue Swirl                 *address |= TLB_MMIO;
1025e5548617SBlue Swirl                 break;
1026e5548617SBlue Swirl             }
1027e5548617SBlue Swirl         }
1028e5548617SBlue Swirl     }
1029e5548617SBlue Swirl 
1030e5548617SBlue Swirl     return iotlb;
1031e5548617SBlue Swirl }
10329fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
103333417e70Sbellard 
1034e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
10358da3ff18Spbrook 
1036c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
10375312bd8bSAvi Kivity                              uint16_t section);
1038acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
103954688b1eSAvi Kivity 
1040a2b257d6SIgor Mammedov static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1041a2b257d6SIgor Mammedov                                qemu_anon_ram_alloc;
104291138037SMarkus Armbruster 
104391138037SMarkus Armbruster /*
104491138037SMarkus Armbruster  * Set a custom physical guest memory alloator.
104591138037SMarkus Armbruster  * Accelerators with unusual needs may need this.  Hopefully, we can
104691138037SMarkus Armbruster  * get rid of it eventually.
104791138037SMarkus Armbruster  */
1048a2b257d6SIgor Mammedov void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
104991138037SMarkus Armbruster {
105091138037SMarkus Armbruster     phys_mem_alloc = alloc;
105191138037SMarkus Armbruster }
105291138037SMarkus Armbruster 
105353cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
105453cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
10555312bd8bSAvi Kivity {
105668f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
105768f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
105868f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
105968f3f65bSPaolo Bonzini      */
106053cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
106168f3f65bSPaolo Bonzini 
106253cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
106353cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
106453cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
106553cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
10665312bd8bSAvi Kivity     }
106753cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
1068dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
106953cb28cbSMarcel Apfelbaum     return map->sections_nb++;
10705312bd8bSAvi Kivity }
10715312bd8bSAvi Kivity 
1072058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
1073058bc4b5SPaolo Bonzini {
107455b4e80bSDon Slutz     bool have_sub_page = mr->subpage;
107555b4e80bSDon Slutz 
1076dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
1077dfde4e6eSPaolo Bonzini 
107855b4e80bSDon Slutz     if (have_sub_page) {
1079058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
1080b4fefef9SPeter Crosthwaite         object_unref(OBJECT(&subpage->iomem));
1081058bc4b5SPaolo Bonzini         g_free(subpage);
1082058bc4b5SPaolo Bonzini     }
1083058bc4b5SPaolo Bonzini }
1084058bc4b5SPaolo Bonzini 
10856092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
10865312bd8bSAvi Kivity {
10879affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
10889affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
1089058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
1090058bc4b5SPaolo Bonzini     }
10919affd6fcSPaolo Bonzini     g_free(map->sections);
10929affd6fcSPaolo Bonzini     g_free(map->nodes);
10935312bd8bSAvi Kivity }
10945312bd8bSAvi Kivity 
1095ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
10960f0cb164SAvi Kivity {
10970f0cb164SAvi Kivity     subpage_t *subpage;
1098a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
10990f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
110097115a8dSMichael S. Tsirkin     MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
110153cb28cbSMarcel Apfelbaum                                                    d->map.nodes, d->map.sections);
11020f0cb164SAvi Kivity     MemoryRegionSection subsection = {
11030f0cb164SAvi Kivity         .offset_within_address_space = base,
1104052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
11050f0cb164SAvi Kivity     };
1106a8170e5eSAvi Kivity     hwaddr start, end;
11070f0cb164SAvi Kivity 
1108f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
11090f0cb164SAvi Kivity 
1110f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
1111acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
11123be91e86SEdgar E. Iglesias         subsection.address_space = d->as;
11130f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
1114ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
111553cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
11160f0cb164SAvi Kivity     } else {
1117f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
11180f0cb164SAvi Kivity     }
11190f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1120052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
112153cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
112253cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
11230f0cb164SAvi Kivity }
11240f0cb164SAvi Kivity 
11250f0cb164SAvi Kivity 
1126052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
1127052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
112833417e70Sbellard {
1129a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
113053cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
1131052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
1132052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
1133dd81124bSAvi Kivity 
1134733d5ef5SPaolo Bonzini     assert(num_pages);
1135733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
113633417e70Sbellard }
113733417e70Sbellard 
1138ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
11390f0cb164SAvi Kivity {
114089ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
114100752703SPaolo Bonzini     AddressSpaceDispatch *d = as->next_dispatch;
114299b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
1143052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
11440f0cb164SAvi Kivity 
1145733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1146733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1147733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
1148733d5ef5SPaolo Bonzini 
1149052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
1150ac1970fbSAvi Kivity         register_subpage(d, &now);
1151733d5ef5SPaolo Bonzini     } else {
1152052e87b0SPaolo Bonzini         now.size = int128_zero();
1153733d5ef5SPaolo Bonzini     }
1154052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
1155052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
1156052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
1157052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
11580f0cb164SAvi Kivity         now = remain;
1159052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
1160733d5ef5SPaolo Bonzini             register_subpage(d, &now);
116188266249SHu Tao         } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1162052e87b0SPaolo Bonzini             now.size = page_size;
1163ac1970fbSAvi Kivity             register_subpage(d, &now);
116469b67646STyler Hall         } else {
1165052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
1166ac1970fbSAvi Kivity             register_multipage(d, &now);
116769b67646STyler Hall         }
11680f0cb164SAvi Kivity     }
11690f0cb164SAvi Kivity }
11700f0cb164SAvi Kivity 
117162a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
117262a2744cSSheng Yang {
117362a2744cSSheng Yang     if (kvm_enabled())
117462a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
117562a2744cSSheng Yang }
117662a2744cSSheng Yang 
1177b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
1178b2a8658eSUmesh Deshpande {
1179b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
1180b2a8658eSUmesh Deshpande }
1181b2a8658eSUmesh Deshpande 
1182b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
1183b2a8658eSUmesh Deshpande {
1184b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
1185b2a8658eSUmesh Deshpande }
1186b2a8658eSUmesh Deshpande 
1187e1e84ba0SMarkus Armbruster #ifdef __linux__
1188c902760fSMarcelo Tosatti 
1189c902760fSMarcelo Tosatti #include <sys/vfs.h>
1190c902760fSMarcelo Tosatti 
1191c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
1192c902760fSMarcelo Tosatti 
1193fc7a5800SHu Tao static long gethugepagesize(const char *path, Error **errp)
1194c902760fSMarcelo Tosatti {
1195c902760fSMarcelo Tosatti     struct statfs fs;
1196c902760fSMarcelo Tosatti     int ret;
1197c902760fSMarcelo Tosatti 
1198c902760fSMarcelo Tosatti     do {
1199c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
1200c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
1201c902760fSMarcelo Tosatti 
1202c902760fSMarcelo Tosatti     if (ret != 0) {
1203fc7a5800SHu Tao         error_setg_errno(errp, errno, "failed to get page size of file %s",
1204fc7a5800SHu Tao                          path);
1205c902760fSMarcelo Tosatti         return 0;
1206c902760fSMarcelo Tosatti     }
1207c902760fSMarcelo Tosatti 
1208c902760fSMarcelo Tosatti     return fs.f_bsize;
1209c902760fSMarcelo Tosatti }
1210c902760fSMarcelo Tosatti 
121104b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
121204b16653SAlex Williamson                             ram_addr_t memory,
12137f56e740SPaolo Bonzini                             const char *path,
12147f56e740SPaolo Bonzini                             Error **errp)
1215c902760fSMarcelo Tosatti {
12168d31d6b6SPavel Fedin     struct stat st;
1217c902760fSMarcelo Tosatti     char *filename;
12188ca761f6SPeter Feiner     char *sanitized_name;
12198ca761f6SPeter Feiner     char *c;
1220794e8f30SMichael S. Tsirkin     void *area;
1221c902760fSMarcelo Tosatti     int fd;
1222557529ddSHu Tao     uint64_t hpagesize;
1223fc7a5800SHu Tao     Error *local_err = NULL;
1224c902760fSMarcelo Tosatti 
1225fc7a5800SHu Tao     hpagesize = gethugepagesize(path, &local_err);
1226fc7a5800SHu Tao     if (local_err) {
1227fc7a5800SHu Tao         error_propagate(errp, local_err);
1228f9a49dfaSMarcelo Tosatti         goto error;
1229c902760fSMarcelo Tosatti     }
1230a2b257d6SIgor Mammedov     block->mr->align = hpagesize;
1231c902760fSMarcelo Tosatti 
1232c902760fSMarcelo Tosatti     if (memory < hpagesize) {
1233557529ddSHu Tao         error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1234557529ddSHu Tao                    "or larger than huge page size 0x%" PRIx64,
1235557529ddSHu Tao                    memory, hpagesize);
1236557529ddSHu Tao         goto error;
1237c902760fSMarcelo Tosatti     }
1238c902760fSMarcelo Tosatti 
1239c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
12407f56e740SPaolo Bonzini         error_setg(errp,
12417f56e740SPaolo Bonzini                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1242f9a49dfaSMarcelo Tosatti         goto error;
1243c902760fSMarcelo Tosatti     }
1244c902760fSMarcelo Tosatti 
12458d31d6b6SPavel Fedin     if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
12468ca761f6SPeter Feiner         /* Make name safe to use with mkstemp by replacing '/' with '_'. */
124783234bf2SPeter Crosthwaite         sanitized_name = g_strdup(memory_region_name(block->mr));
12488ca761f6SPeter Feiner         for (c = sanitized_name; *c != '\0'; c++) {
12498d31d6b6SPavel Fedin             if (*c == '/') {
12508ca761f6SPeter Feiner                 *c = '_';
12518ca761f6SPeter Feiner             }
12528d31d6b6SPavel Fedin         }
12538ca761f6SPeter Feiner 
12548ca761f6SPeter Feiner         filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
12558ca761f6SPeter Feiner                                    sanitized_name);
12568ca761f6SPeter Feiner         g_free(sanitized_name);
1257c902760fSMarcelo Tosatti 
1258c902760fSMarcelo Tosatti         fd = mkstemp(filename);
12598d31d6b6SPavel Fedin         if (fd >= 0) {
12608d31d6b6SPavel Fedin             unlink(filename);
12618d31d6b6SPavel Fedin         }
12628d31d6b6SPavel Fedin         g_free(filename);
12638d31d6b6SPavel Fedin     } else {
12648d31d6b6SPavel Fedin         fd = open(path, O_RDWR | O_CREAT, 0644);
12658d31d6b6SPavel Fedin     }
12668d31d6b6SPavel Fedin 
1267c902760fSMarcelo Tosatti     if (fd < 0) {
12687f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
12697f56e740SPaolo Bonzini                          "unable to create backing store for hugepages");
1270f9a49dfaSMarcelo Tosatti         goto error;
1271c902760fSMarcelo Tosatti     }
1272c902760fSMarcelo Tosatti 
12739284f319SChen Hanxiao     memory = ROUND_UP(memory, hpagesize);
1274c902760fSMarcelo Tosatti 
1275c902760fSMarcelo Tosatti     /*
1276c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1277c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1278c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1279c902760fSMarcelo Tosatti      * mmap will fail.
1280c902760fSMarcelo Tosatti      */
12817f56e740SPaolo Bonzini     if (ftruncate(fd, memory)) {
1282c902760fSMarcelo Tosatti         perror("ftruncate");
12837f56e740SPaolo Bonzini     }
1284c902760fSMarcelo Tosatti 
1285794e8f30SMichael S. Tsirkin     area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
1286c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
12877f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
12887f56e740SPaolo Bonzini                          "unable to map backing store for hugepages");
1289c902760fSMarcelo Tosatti         close(fd);
1290f9a49dfaSMarcelo Tosatti         goto error;
1291c902760fSMarcelo Tosatti     }
1292ef36fa14SMarcelo Tosatti 
1293ef36fa14SMarcelo Tosatti     if (mem_prealloc) {
129438183310SPaolo Bonzini         os_mem_prealloc(fd, area, memory);
1295ef36fa14SMarcelo Tosatti     }
1296ef36fa14SMarcelo Tosatti 
129704b16653SAlex Williamson     block->fd = fd;
1298c902760fSMarcelo Tosatti     return area;
1299f9a49dfaSMarcelo Tosatti 
1300f9a49dfaSMarcelo Tosatti error:
1301f9a49dfaSMarcelo Tosatti     return NULL;
1302c902760fSMarcelo Tosatti }
1303c902760fSMarcelo Tosatti #endif
1304c902760fSMarcelo Tosatti 
13050dc3f44aSMike Day /* Called with the ramlist lock held.  */
1306d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1307d17b5288SAlex Williamson {
130804b16653SAlex Williamson     RAMBlock *block, *next_block;
13093e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
131004b16653SAlex Williamson 
131149cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
131249cd9ac6SStefan Hajnoczi 
13130dc3f44aSMike Day     if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
131404b16653SAlex Williamson         return 0;
13150d53d9feSMike Day     }
131604b16653SAlex Williamson 
13170dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1318f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
131904b16653SAlex Williamson 
132062be4e3aSMichael S. Tsirkin         end = block->offset + block->max_length;
132104b16653SAlex Williamson 
13220dc3f44aSMike Day         QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
132304b16653SAlex Williamson             if (next_block->offset >= end) {
132404b16653SAlex Williamson                 next = MIN(next, next_block->offset);
132504b16653SAlex Williamson             }
132604b16653SAlex Williamson         }
132704b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
132804b16653SAlex Williamson             offset = end;
132904b16653SAlex Williamson             mingap = next - end;
133004b16653SAlex Williamson         }
133104b16653SAlex Williamson     }
13323e837b2cSAlex Williamson 
13333e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
13343e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
13353e837b2cSAlex Williamson                 (uint64_t)size);
13363e837b2cSAlex Williamson         abort();
13373e837b2cSAlex Williamson     }
13383e837b2cSAlex Williamson 
133904b16653SAlex Williamson     return offset;
134004b16653SAlex Williamson }
134104b16653SAlex Williamson 
1342652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
134304b16653SAlex Williamson {
1344d17b5288SAlex Williamson     RAMBlock *block;
1345d17b5288SAlex Williamson     ram_addr_t last = 0;
1346d17b5288SAlex Williamson 
13470dc3f44aSMike Day     rcu_read_lock();
13480dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
134962be4e3aSMichael S. Tsirkin         last = MAX(last, block->offset + block->max_length);
13500d53d9feSMike Day     }
13510dc3f44aSMike Day     rcu_read_unlock();
1352d17b5288SAlex Williamson     return last;
1353d17b5288SAlex Williamson }
1354d17b5288SAlex Williamson 
1355ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1356ddb97f1dSJason Baron {
1357ddb97f1dSJason Baron     int ret;
1358ddb97f1dSJason Baron 
1359ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
136047c8ca53SMarcel Apfelbaum     if (!machine_dump_guest_core(current_machine)) {
1361ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1362ddb97f1dSJason Baron         if (ret) {
1363ddb97f1dSJason Baron             perror("qemu_madvise");
1364ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1365ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1366ddb97f1dSJason Baron         }
1367ddb97f1dSJason Baron     }
1368ddb97f1dSJason Baron }
1369ddb97f1dSJason Baron 
13700dc3f44aSMike Day /* Called within an RCU critical section, or while the ramlist lock
13710dc3f44aSMike Day  * is held.
13720dc3f44aSMike Day  */
137320cfe881SHu Tao static RAMBlock *find_ram_block(ram_addr_t addr)
137484b89d78SCam Macdonell {
137520cfe881SHu Tao     RAMBlock *block;
137684b89d78SCam Macdonell 
13770dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1378c5705a77SAvi Kivity         if (block->offset == addr) {
137920cfe881SHu Tao             return block;
1380c5705a77SAvi Kivity         }
1381c5705a77SAvi Kivity     }
138220cfe881SHu Tao 
138320cfe881SHu Tao     return NULL;
138420cfe881SHu Tao }
138520cfe881SHu Tao 
1386422148d3SDr. David Alan Gilbert const char *qemu_ram_get_idstr(RAMBlock *rb)
1387422148d3SDr. David Alan Gilbert {
1388422148d3SDr. David Alan Gilbert     return rb->idstr;
1389422148d3SDr. David Alan Gilbert }
1390422148d3SDr. David Alan Gilbert 
1391ae3a7047SMike Day /* Called with iothread lock held.  */
139220cfe881SHu Tao void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
139320cfe881SHu Tao {
1394ae3a7047SMike Day     RAMBlock *new_block, *block;
139520cfe881SHu Tao 
13960dc3f44aSMike Day     rcu_read_lock();
1397ae3a7047SMike Day     new_block = find_ram_block(addr);
1398c5705a77SAvi Kivity     assert(new_block);
1399c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
140084b89d78SCam Macdonell 
140109e5ab63SAnthony Liguori     if (dev) {
140209e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
140384b89d78SCam Macdonell         if (id) {
140484b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
14057267c094SAnthony Liguori             g_free(id);
140684b89d78SCam Macdonell         }
140784b89d78SCam Macdonell     }
140884b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
140984b89d78SCam Macdonell 
14100dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1411c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
141284b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
141384b89d78SCam Macdonell                     new_block->idstr);
141484b89d78SCam Macdonell             abort();
141584b89d78SCam Macdonell         }
141684b89d78SCam Macdonell     }
14170dc3f44aSMike Day     rcu_read_unlock();
1418c5705a77SAvi Kivity }
1419c5705a77SAvi Kivity 
1420ae3a7047SMike Day /* Called with iothread lock held.  */
142120cfe881SHu Tao void qemu_ram_unset_idstr(ram_addr_t addr)
142220cfe881SHu Tao {
1423ae3a7047SMike Day     RAMBlock *block;
142420cfe881SHu Tao 
1425ae3a7047SMike Day     /* FIXME: arch_init.c assumes that this is not called throughout
1426ae3a7047SMike Day      * migration.  Ignore the problem since hot-unplug during migration
1427ae3a7047SMike Day      * does not work anyway.
1428ae3a7047SMike Day      */
1429ae3a7047SMike Day 
14300dc3f44aSMike Day     rcu_read_lock();
1431ae3a7047SMike Day     block = find_ram_block(addr);
143220cfe881SHu Tao     if (block) {
143320cfe881SHu Tao         memset(block->idstr, 0, sizeof(block->idstr));
143420cfe881SHu Tao     }
14350dc3f44aSMike Day     rcu_read_unlock();
143620cfe881SHu Tao }
143720cfe881SHu Tao 
14388490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
14398490fc78SLuiz Capitulino {
144075cc7f01SMarcel Apfelbaum     if (!machine_mem_merge(current_machine)) {
14418490fc78SLuiz Capitulino         /* disabled by the user */
14428490fc78SLuiz Capitulino         return 0;
14438490fc78SLuiz Capitulino     }
14448490fc78SLuiz Capitulino 
14458490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
14468490fc78SLuiz Capitulino }
14478490fc78SLuiz Capitulino 
144862be4e3aSMichael S. Tsirkin /* Only legal before guest might have detected the memory size: e.g. on
144962be4e3aSMichael S. Tsirkin  * incoming migration, or right after reset.
145062be4e3aSMichael S. Tsirkin  *
145162be4e3aSMichael S. Tsirkin  * As memory core doesn't know how is memory accessed, it is up to
145262be4e3aSMichael S. Tsirkin  * resize callback to update device state and/or add assertions to detect
145362be4e3aSMichael S. Tsirkin  * misuse, if necessary.
145462be4e3aSMichael S. Tsirkin  */
145562be4e3aSMichael S. Tsirkin int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
145662be4e3aSMichael S. Tsirkin {
145762be4e3aSMichael S. Tsirkin     RAMBlock *block = find_ram_block(base);
145862be4e3aSMichael S. Tsirkin 
145962be4e3aSMichael S. Tsirkin     assert(block);
146062be4e3aSMichael S. Tsirkin 
14614ed023ceSDr. David Alan Gilbert     newsize = HOST_PAGE_ALIGN(newsize);
1462129ddaf3SMichael S. Tsirkin 
146362be4e3aSMichael S. Tsirkin     if (block->used_length == newsize) {
146462be4e3aSMichael S. Tsirkin         return 0;
146562be4e3aSMichael S. Tsirkin     }
146662be4e3aSMichael S. Tsirkin 
146762be4e3aSMichael S. Tsirkin     if (!(block->flags & RAM_RESIZEABLE)) {
146862be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
146962be4e3aSMichael S. Tsirkin                          "Length mismatch: %s: 0x" RAM_ADDR_FMT
147062be4e3aSMichael S. Tsirkin                          " in != 0x" RAM_ADDR_FMT, block->idstr,
147162be4e3aSMichael S. Tsirkin                          newsize, block->used_length);
147262be4e3aSMichael S. Tsirkin         return -EINVAL;
147362be4e3aSMichael S. Tsirkin     }
147462be4e3aSMichael S. Tsirkin 
147562be4e3aSMichael S. Tsirkin     if (block->max_length < newsize) {
147662be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
147762be4e3aSMichael S. Tsirkin                          "Length too large: %s: 0x" RAM_ADDR_FMT
147862be4e3aSMichael S. Tsirkin                          " > 0x" RAM_ADDR_FMT, block->idstr,
147962be4e3aSMichael S. Tsirkin                          newsize, block->max_length);
148062be4e3aSMichael S. Tsirkin         return -EINVAL;
148162be4e3aSMichael S. Tsirkin     }
148262be4e3aSMichael S. Tsirkin 
148362be4e3aSMichael S. Tsirkin     cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
148462be4e3aSMichael S. Tsirkin     block->used_length = newsize;
148558d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
148658d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
148762be4e3aSMichael S. Tsirkin     memory_region_set_size(block->mr, newsize);
148862be4e3aSMichael S. Tsirkin     if (block->resized) {
148962be4e3aSMichael S. Tsirkin         block->resized(block->idstr, newsize, block->host);
149062be4e3aSMichael S. Tsirkin     }
149162be4e3aSMichael S. Tsirkin     return 0;
149262be4e3aSMichael S. Tsirkin }
149362be4e3aSMichael S. Tsirkin 
1494ef701d7bSHu Tao static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1495c5705a77SAvi Kivity {
1496e1c57ab8SPaolo Bonzini     RAMBlock *block;
14970d53d9feSMike Day     RAMBlock *last_block = NULL;
14982152f5caSJuan Quintela     ram_addr_t old_ram_size, new_ram_size;
14992152f5caSJuan Quintela 
15002152f5caSJuan Quintela     old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1501c5705a77SAvi Kivity 
1502b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
15039b8424d5SMichael S. Tsirkin     new_block->offset = find_ram_offset(new_block->max_length);
1504e1c57ab8SPaolo Bonzini 
15050628c182SMarkus Armbruster     if (!new_block->host) {
1506e1c57ab8SPaolo Bonzini         if (xen_enabled()) {
15079b8424d5SMichael S. Tsirkin             xen_ram_alloc(new_block->offset, new_block->max_length,
15089b8424d5SMichael S. Tsirkin                           new_block->mr);
1509e1c57ab8SPaolo Bonzini         } else {
15109b8424d5SMichael S. Tsirkin             new_block->host = phys_mem_alloc(new_block->max_length,
1511a2b257d6SIgor Mammedov                                              &new_block->mr->align);
151239228250SMarkus Armbruster             if (!new_block->host) {
1513ef701d7bSHu Tao                 error_setg_errno(errp, errno,
1514ef701d7bSHu Tao                                  "cannot set up guest memory '%s'",
1515ef701d7bSHu Tao                                  memory_region_name(new_block->mr));
1516ef701d7bSHu Tao                 qemu_mutex_unlock_ramlist();
1517ef701d7bSHu Tao                 return -1;
151839228250SMarkus Armbruster             }
15199b8424d5SMichael S. Tsirkin             memory_try_enable_merging(new_block->host, new_block->max_length);
1520c902760fSMarcelo Tosatti         }
15216977dfe6SYoshiaki Tamura     }
152294a6b54fSpbrook 
1523dd631697SLi Zhijian     new_ram_size = MAX(old_ram_size,
1524dd631697SLi Zhijian               (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1525dd631697SLi Zhijian     if (new_ram_size > old_ram_size) {
1526dd631697SLi Zhijian         migration_bitmap_extend(old_ram_size, new_ram_size);
1527dd631697SLi Zhijian     }
15280d53d9feSMike Day     /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
15290d53d9feSMike Day      * QLIST (which has an RCU-friendly variant) does not have insertion at
15300d53d9feSMike Day      * tail, so save the last element in last_block.
15310d53d9feSMike Day      */
15320dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
15330d53d9feSMike Day         last_block = block;
15349b8424d5SMichael S. Tsirkin         if (block->max_length < new_block->max_length) {
1535abb26d63SPaolo Bonzini             break;
1536abb26d63SPaolo Bonzini         }
1537abb26d63SPaolo Bonzini     }
1538abb26d63SPaolo Bonzini     if (block) {
15390dc3f44aSMike Day         QLIST_INSERT_BEFORE_RCU(block, new_block, next);
15400d53d9feSMike Day     } else if (last_block) {
15410dc3f44aSMike Day         QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
15420d53d9feSMike Day     } else { /* list is empty */
15430dc3f44aSMike Day         QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1544abb26d63SPaolo Bonzini     }
15450d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
154694a6b54fSpbrook 
15470dc3f44aSMike Day     /* Write list before version */
15480dc3f44aSMike Day     smp_wmb();
1549f798b07fSUmesh Deshpande     ram_list.version++;
1550b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1551f798b07fSUmesh Deshpande 
15522152f5caSJuan Quintela     new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
15532152f5caSJuan Quintela 
15542152f5caSJuan Quintela     if (new_ram_size > old_ram_size) {
15551ab4c8ceSJuan Quintela         int i;
1556ae3a7047SMike Day 
1557ae3a7047SMike Day         /* ram_list.dirty_memory[] is protected by the iothread lock.  */
15581ab4c8ceSJuan Quintela         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
15591ab4c8ceSJuan Quintela             ram_list.dirty_memory[i] =
15601ab4c8ceSJuan Quintela                 bitmap_zero_extend(ram_list.dirty_memory[i],
15611ab4c8ceSJuan Quintela                                    old_ram_size, new_ram_size);
15621ab4c8ceSJuan Quintela        }
15632152f5caSJuan Quintela     }
15649b8424d5SMichael S. Tsirkin     cpu_physical_memory_set_dirty_range(new_block->offset,
156558d2707eSPaolo Bonzini                                         new_block->used_length,
156658d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
156794a6b54fSpbrook 
1568a904c911SPaolo Bonzini     if (new_block->host) {
15699b8424d5SMichael S. Tsirkin         qemu_ram_setup_dump(new_block->host, new_block->max_length);
15709b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
15719b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1572e1c57ab8SPaolo Bonzini         if (kvm_enabled()) {
15739b8424d5SMichael S. Tsirkin             kvm_setup_guest_memory(new_block->host, new_block->max_length);
1574e1c57ab8SPaolo Bonzini         }
1575a904c911SPaolo Bonzini     }
15766f0437e8SJan Kiszka 
157794a6b54fSpbrook     return new_block->offset;
157894a6b54fSpbrook }
1579e9a1ab19Sbellard 
15800b183fc8SPaolo Bonzini #ifdef __linux__
1581e1c57ab8SPaolo Bonzini ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1582dbcb8981SPaolo Bonzini                                     bool share, const char *mem_path,
15837f56e740SPaolo Bonzini                                     Error **errp)
1584e1c57ab8SPaolo Bonzini {
1585e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1586ef701d7bSHu Tao     ram_addr_t addr;
1587ef701d7bSHu Tao     Error *local_err = NULL;
1588e1c57ab8SPaolo Bonzini 
1589e1c57ab8SPaolo Bonzini     if (xen_enabled()) {
15907f56e740SPaolo Bonzini         error_setg(errp, "-mem-path not supported with Xen");
15917f56e740SPaolo Bonzini         return -1;
1592e1c57ab8SPaolo Bonzini     }
1593e1c57ab8SPaolo Bonzini 
1594e1c57ab8SPaolo Bonzini     if (phys_mem_alloc != qemu_anon_ram_alloc) {
1595e1c57ab8SPaolo Bonzini         /*
1596e1c57ab8SPaolo Bonzini          * file_ram_alloc() needs to allocate just like
1597e1c57ab8SPaolo Bonzini          * phys_mem_alloc, but we haven't bothered to provide
1598e1c57ab8SPaolo Bonzini          * a hook there.
1599e1c57ab8SPaolo Bonzini          */
16007f56e740SPaolo Bonzini         error_setg(errp,
16017f56e740SPaolo Bonzini                    "-mem-path not supported with this accelerator");
16027f56e740SPaolo Bonzini         return -1;
1603e1c57ab8SPaolo Bonzini     }
1604e1c57ab8SPaolo Bonzini 
16054ed023ceSDr. David Alan Gilbert     size = HOST_PAGE_ALIGN(size);
1606e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1607e1c57ab8SPaolo Bonzini     new_block->mr = mr;
16089b8424d5SMichael S. Tsirkin     new_block->used_length = size;
16099b8424d5SMichael S. Tsirkin     new_block->max_length = size;
1610dbcb8981SPaolo Bonzini     new_block->flags = share ? RAM_SHARED : 0;
16117f56e740SPaolo Bonzini     new_block->host = file_ram_alloc(new_block, size,
16127f56e740SPaolo Bonzini                                      mem_path, errp);
16137f56e740SPaolo Bonzini     if (!new_block->host) {
16147f56e740SPaolo Bonzini         g_free(new_block);
16157f56e740SPaolo Bonzini         return -1;
16167f56e740SPaolo Bonzini     }
16177f56e740SPaolo Bonzini 
1618ef701d7bSHu Tao     addr = ram_block_add(new_block, &local_err);
1619ef701d7bSHu Tao     if (local_err) {
1620ef701d7bSHu Tao         g_free(new_block);
1621ef701d7bSHu Tao         error_propagate(errp, local_err);
1622ef701d7bSHu Tao         return -1;
1623ef701d7bSHu Tao     }
1624ef701d7bSHu Tao     return addr;
1625e1c57ab8SPaolo Bonzini }
16260b183fc8SPaolo Bonzini #endif
1627e1c57ab8SPaolo Bonzini 
162862be4e3aSMichael S. Tsirkin static
162962be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
163062be4e3aSMichael S. Tsirkin                                    void (*resized)(const char*,
163162be4e3aSMichael S. Tsirkin                                                    uint64_t length,
163262be4e3aSMichael S. Tsirkin                                                    void *host),
163362be4e3aSMichael S. Tsirkin                                    void *host, bool resizeable,
1634ef701d7bSHu Tao                                    MemoryRegion *mr, Error **errp)
1635e1c57ab8SPaolo Bonzini {
1636e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1637ef701d7bSHu Tao     ram_addr_t addr;
1638ef701d7bSHu Tao     Error *local_err = NULL;
1639e1c57ab8SPaolo Bonzini 
16404ed023ceSDr. David Alan Gilbert     size = HOST_PAGE_ALIGN(size);
16414ed023ceSDr. David Alan Gilbert     max_size = HOST_PAGE_ALIGN(max_size);
1642e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1643e1c57ab8SPaolo Bonzini     new_block->mr = mr;
164462be4e3aSMichael S. Tsirkin     new_block->resized = resized;
16459b8424d5SMichael S. Tsirkin     new_block->used_length = size;
16469b8424d5SMichael S. Tsirkin     new_block->max_length = max_size;
164762be4e3aSMichael S. Tsirkin     assert(max_size >= size);
1648e1c57ab8SPaolo Bonzini     new_block->fd = -1;
1649e1c57ab8SPaolo Bonzini     new_block->host = host;
1650e1c57ab8SPaolo Bonzini     if (host) {
16517bd4f430SPaolo Bonzini         new_block->flags |= RAM_PREALLOC;
1652e1c57ab8SPaolo Bonzini     }
165362be4e3aSMichael S. Tsirkin     if (resizeable) {
165462be4e3aSMichael S. Tsirkin         new_block->flags |= RAM_RESIZEABLE;
165562be4e3aSMichael S. Tsirkin     }
1656ef701d7bSHu Tao     addr = ram_block_add(new_block, &local_err);
1657ef701d7bSHu Tao     if (local_err) {
1658ef701d7bSHu Tao         g_free(new_block);
1659ef701d7bSHu Tao         error_propagate(errp, local_err);
1660ef701d7bSHu Tao         return -1;
1661ef701d7bSHu Tao     }
1662ef701d7bSHu Tao     return addr;
1663e1c57ab8SPaolo Bonzini }
1664e1c57ab8SPaolo Bonzini 
166562be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
166662be4e3aSMichael S. Tsirkin                                    MemoryRegion *mr, Error **errp)
166762be4e3aSMichael S. Tsirkin {
166862be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
166962be4e3aSMichael S. Tsirkin }
167062be4e3aSMichael S. Tsirkin 
1671ef701d7bSHu Tao ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
16726977dfe6SYoshiaki Tamura {
167362be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
167462be4e3aSMichael S. Tsirkin }
167562be4e3aSMichael S. Tsirkin 
167662be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
167762be4e3aSMichael S. Tsirkin                                      void (*resized)(const char*,
167862be4e3aSMichael S. Tsirkin                                                      uint64_t length,
167962be4e3aSMichael S. Tsirkin                                                      void *host),
168062be4e3aSMichael S. Tsirkin                                      MemoryRegion *mr, Error **errp)
168162be4e3aSMichael S. Tsirkin {
168262be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
16836977dfe6SYoshiaki Tamura }
16846977dfe6SYoshiaki Tamura 
168543771539SPaolo Bonzini static void reclaim_ramblock(RAMBlock *block)
1686e9a1ab19Sbellard {
16877bd4f430SPaolo Bonzini     if (block->flags & RAM_PREALLOC) {
1688cd19cfa2SHuang Ying         ;
1689dfeaf2abSMarkus Armbruster     } else if (xen_enabled()) {
1690dfeaf2abSMarkus Armbruster         xen_invalidate_map_cache_entry(block->host);
1691089f3f76SStefan Weil #ifndef _WIN32
16923435f395SMarkus Armbruster     } else if (block->fd >= 0) {
1693794e8f30SMichael S. Tsirkin         qemu_ram_munmap(block->host, block->max_length);
169404b16653SAlex Williamson         close(block->fd);
1695089f3f76SStefan Weil #endif
169604b16653SAlex Williamson     } else {
16979b8424d5SMichael S. Tsirkin         qemu_anon_ram_free(block->host, block->max_length);
169804b16653SAlex Williamson     }
16997267c094SAnthony Liguori     g_free(block);
170043771539SPaolo Bonzini }
170143771539SPaolo Bonzini 
170243771539SPaolo Bonzini void qemu_ram_free(ram_addr_t addr)
170343771539SPaolo Bonzini {
170443771539SPaolo Bonzini     RAMBlock *block;
170543771539SPaolo Bonzini 
170643771539SPaolo Bonzini     qemu_mutex_lock_ramlist();
17070dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
170843771539SPaolo Bonzini         if (addr == block->offset) {
17090dc3f44aSMike Day             QLIST_REMOVE_RCU(block, next);
171043771539SPaolo Bonzini             ram_list.mru_block = NULL;
17110dc3f44aSMike Day             /* Write list before version */
17120dc3f44aSMike Day             smp_wmb();
171343771539SPaolo Bonzini             ram_list.version++;
171443771539SPaolo Bonzini             call_rcu(block, reclaim_ramblock, rcu);
1715b2a8658eSUmesh Deshpande             break;
171604b16653SAlex Williamson         }
171704b16653SAlex Williamson     }
1718b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1719e9a1ab19Sbellard }
1720e9a1ab19Sbellard 
1721cd19cfa2SHuang Ying #ifndef _WIN32
1722cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1723cd19cfa2SHuang Ying {
1724cd19cfa2SHuang Ying     RAMBlock *block;
1725cd19cfa2SHuang Ying     ram_addr_t offset;
1726cd19cfa2SHuang Ying     int flags;
1727cd19cfa2SHuang Ying     void *area, *vaddr;
1728cd19cfa2SHuang Ying 
17290dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1730cd19cfa2SHuang Ying         offset = addr - block->offset;
17319b8424d5SMichael S. Tsirkin         if (offset < block->max_length) {
17321240be24SMichael S. Tsirkin             vaddr = ramblock_ptr(block, offset);
17337bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1734cd19cfa2SHuang Ying                 ;
1735dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1736dfeaf2abSMarkus Armbruster                 abort();
1737cd19cfa2SHuang Ying             } else {
1738cd19cfa2SHuang Ying                 flags = MAP_FIXED;
17393435f395SMarkus Armbruster                 if (block->fd >= 0) {
1740dbcb8981SPaolo Bonzini                     flags |= (block->flags & RAM_SHARED ?
1741dbcb8981SPaolo Bonzini                               MAP_SHARED : MAP_PRIVATE);
1742cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1743cd19cfa2SHuang Ying                                 flags, block->fd, offset);
1744cd19cfa2SHuang Ying                 } else {
17452eb9fbaaSMarkus Armbruster                     /*
17462eb9fbaaSMarkus Armbruster                      * Remap needs to match alloc.  Accelerators that
17472eb9fbaaSMarkus Armbruster                      * set phys_mem_alloc never remap.  If they did,
17482eb9fbaaSMarkus Armbruster                      * we'd need a remap hook here.
17492eb9fbaaSMarkus Armbruster                      */
17502eb9fbaaSMarkus Armbruster                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
17512eb9fbaaSMarkus Armbruster 
1752cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1753cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1754cd19cfa2SHuang Ying                                 flags, -1, 0);
1755cd19cfa2SHuang Ying                 }
1756cd19cfa2SHuang Ying                 if (area != vaddr) {
1757f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1758f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1759cd19cfa2SHuang Ying                             length, addr);
1760cd19cfa2SHuang Ying                     exit(1);
1761cd19cfa2SHuang Ying                 }
17628490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1763ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1764cd19cfa2SHuang Ying             }
1765cd19cfa2SHuang Ying         }
1766cd19cfa2SHuang Ying     }
1767cd19cfa2SHuang Ying }
1768cd19cfa2SHuang Ying #endif /* !_WIN32 */
1769cd19cfa2SHuang Ying 
1770a35ba7beSPaolo Bonzini int qemu_get_ram_fd(ram_addr_t addr)
1771a35ba7beSPaolo Bonzini {
1772ae3a7047SMike Day     RAMBlock *block;
1773ae3a7047SMike Day     int fd;
1774a35ba7beSPaolo Bonzini 
17750dc3f44aSMike Day     rcu_read_lock();
1776ae3a7047SMike Day     block = qemu_get_ram_block(addr);
1777ae3a7047SMike Day     fd = block->fd;
17780dc3f44aSMike Day     rcu_read_unlock();
1779ae3a7047SMike Day     return fd;
1780a35ba7beSPaolo Bonzini }
1781a35ba7beSPaolo Bonzini 
178256a571d9STetsuya Mukawa void qemu_set_ram_fd(ram_addr_t addr, int fd)
178356a571d9STetsuya Mukawa {
178456a571d9STetsuya Mukawa     RAMBlock *block;
178556a571d9STetsuya Mukawa 
178656a571d9STetsuya Mukawa     rcu_read_lock();
178756a571d9STetsuya Mukawa     block = qemu_get_ram_block(addr);
178856a571d9STetsuya Mukawa     block->fd = fd;
178956a571d9STetsuya Mukawa     rcu_read_unlock();
179056a571d9STetsuya Mukawa }
179156a571d9STetsuya Mukawa 
17923fd74b84SDamjan Marion void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
17933fd74b84SDamjan Marion {
1794ae3a7047SMike Day     RAMBlock *block;
1795ae3a7047SMike Day     void *ptr;
17963fd74b84SDamjan Marion 
17970dc3f44aSMike Day     rcu_read_lock();
1798ae3a7047SMike Day     block = qemu_get_ram_block(addr);
1799ae3a7047SMike Day     ptr = ramblock_ptr(block, 0);
18000dc3f44aSMike Day     rcu_read_unlock();
1801ae3a7047SMike Day     return ptr;
18023fd74b84SDamjan Marion }
18033fd74b84SDamjan Marion 
18041b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.
1805ae3a7047SMike Day  * This should not be used for general purpose DMA.  Use address_space_map
1806ae3a7047SMike Day  * or address_space_rw instead. For local memory (e.g. video ram) that the
1807ae3a7047SMike Day  * device owns, use memory_region_get_ram_ptr.
18080dc3f44aSMike Day  *
180949b24afcSPaolo Bonzini  * Called within RCU critical section.
18101b5ec234SPaolo Bonzini  */
18111b5ec234SPaolo Bonzini void *qemu_get_ram_ptr(ram_addr_t addr)
18121b5ec234SPaolo Bonzini {
181349b24afcSPaolo Bonzini     RAMBlock *block = qemu_get_ram_block(addr);
1814ae3a7047SMike Day 
1815ae3a7047SMike Day     if (xen_enabled() && block->host == NULL) {
1816432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1817432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1818712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1819432d268cSJun Nakajima          */
1820432d268cSJun Nakajima         if (block->offset == 0) {
182149b24afcSPaolo Bonzini             return xen_map_cache(addr, 0, 0);
1822432d268cSJun Nakajima         }
1823ae3a7047SMike Day 
1824ae3a7047SMike Day         block->host = xen_map_cache(block->offset, block->max_length, 1);
1825432d268cSJun Nakajima     }
182649b24afcSPaolo Bonzini     return ramblock_ptr(block, addr - block->offset);
182794a6b54fSpbrook }
1828f471a17eSAlex Williamson 
182938bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1830ae3a7047SMike Day  * but takes a size argument.
18310dc3f44aSMike Day  *
1832e81bcda5SPaolo Bonzini  * Called within RCU critical section.
1833ae3a7047SMike Day  */
1834cb85f7abSPeter Maydell static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
183538bee5dcSStefano Stabellini {
1836e81bcda5SPaolo Bonzini     RAMBlock *block;
1837e81bcda5SPaolo Bonzini     ram_addr_t offset_inside_block;
18388ab934f9SStefano Stabellini     if (*size == 0) {
18398ab934f9SStefano Stabellini         return NULL;
18408ab934f9SStefano Stabellini     }
1841e81bcda5SPaolo Bonzini 
1842e81bcda5SPaolo Bonzini     block = qemu_get_ram_block(addr);
1843e81bcda5SPaolo Bonzini     offset_inside_block = addr - block->offset;
1844e81bcda5SPaolo Bonzini     *size = MIN(*size, block->max_length - offset_inside_block);
1845e81bcda5SPaolo Bonzini 
1846e81bcda5SPaolo Bonzini     if (xen_enabled() && block->host == NULL) {
1847e81bcda5SPaolo Bonzini         /* We need to check if the requested address is in the RAM
1848e81bcda5SPaolo Bonzini          * because we don't want to map the entire memory in QEMU.
1849e81bcda5SPaolo Bonzini          * In that case just map the requested area.
1850e81bcda5SPaolo Bonzini          */
1851e81bcda5SPaolo Bonzini         if (block->offset == 0) {
1852e41d7c69SJan Kiszka             return xen_map_cache(addr, *size, 1);
185338bee5dcSStefano Stabellini         }
185438bee5dcSStefano Stabellini 
1855e81bcda5SPaolo Bonzini         block->host = xen_map_cache(block->offset, block->max_length, 1);
185638bee5dcSStefano Stabellini     }
1857e81bcda5SPaolo Bonzini 
1858e81bcda5SPaolo Bonzini     return ramblock_ptr(block, offset_inside_block);
185938bee5dcSStefano Stabellini }
186038bee5dcSStefano Stabellini 
1861422148d3SDr. David Alan Gilbert /*
1862422148d3SDr. David Alan Gilbert  * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1863422148d3SDr. David Alan Gilbert  * in that RAMBlock.
1864422148d3SDr. David Alan Gilbert  *
1865422148d3SDr. David Alan Gilbert  * ptr: Host pointer to look up
1866422148d3SDr. David Alan Gilbert  * round_offset: If true round the result offset down to a page boundary
1867422148d3SDr. David Alan Gilbert  * *ram_addr: set to result ram_addr
1868422148d3SDr. David Alan Gilbert  * *offset: set to result offset within the RAMBlock
1869422148d3SDr. David Alan Gilbert  *
1870422148d3SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
1871ae3a7047SMike Day  *
1872ae3a7047SMike Day  * By the time this function returns, the returned pointer is not protected
1873ae3a7047SMike Day  * by RCU anymore.  If the caller is not within an RCU critical section and
1874ae3a7047SMike Day  * does not hold the iothread lock, it must have other means of protecting the
1875ae3a7047SMike Day  * pointer, such as a reference to the region that includes the incoming
1876ae3a7047SMike Day  * ram_addr_t.
1877ae3a7047SMike Day  */
1878422148d3SDr. David Alan Gilbert RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1879422148d3SDr. David Alan Gilbert                                    ram_addr_t *ram_addr,
1880422148d3SDr. David Alan Gilbert                                    ram_addr_t *offset)
18815579c7f3Spbrook {
188294a6b54fSpbrook     RAMBlock *block;
188394a6b54fSpbrook     uint8_t *host = ptr;
188494a6b54fSpbrook 
1885868bb33fSJan Kiszka     if (xen_enabled()) {
18860dc3f44aSMike Day         rcu_read_lock();
1887e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
1888422148d3SDr. David Alan Gilbert         block = qemu_get_ram_block(*ram_addr);
1889422148d3SDr. David Alan Gilbert         if (block) {
1890422148d3SDr. David Alan Gilbert             *offset = (host - block->host);
1891422148d3SDr. David Alan Gilbert         }
18920dc3f44aSMike Day         rcu_read_unlock();
1893422148d3SDr. David Alan Gilbert         return block;
1894712c2b41SStefano Stabellini     }
1895712c2b41SStefano Stabellini 
18960dc3f44aSMike Day     rcu_read_lock();
18970dc3f44aSMike Day     block = atomic_rcu_read(&ram_list.mru_block);
18989b8424d5SMichael S. Tsirkin     if (block && block->host && host - block->host < block->max_length) {
189923887b79SPaolo Bonzini         goto found;
190023887b79SPaolo Bonzini     }
190123887b79SPaolo Bonzini 
19020dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1903432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1904432d268cSJun Nakajima         if (block->host == NULL) {
1905432d268cSJun Nakajima             continue;
1906432d268cSJun Nakajima         }
19079b8424d5SMichael S. Tsirkin         if (host - block->host < block->max_length) {
190823887b79SPaolo Bonzini             goto found;
190994a6b54fSpbrook         }
1910f471a17eSAlex Williamson     }
1911432d268cSJun Nakajima 
19120dc3f44aSMike Day     rcu_read_unlock();
19131b5ec234SPaolo Bonzini     return NULL;
191423887b79SPaolo Bonzini 
191523887b79SPaolo Bonzini found:
1916422148d3SDr. David Alan Gilbert     *offset = (host - block->host);
1917422148d3SDr. David Alan Gilbert     if (round_offset) {
1918422148d3SDr. David Alan Gilbert         *offset &= TARGET_PAGE_MASK;
1919422148d3SDr. David Alan Gilbert     }
1920422148d3SDr. David Alan Gilbert     *ram_addr = block->offset + *offset;
19210dc3f44aSMike Day     rcu_read_unlock();
1922422148d3SDr. David Alan Gilbert     return block;
1923422148d3SDr. David Alan Gilbert }
1924422148d3SDr. David Alan Gilbert 
1925e3dd7493SDr. David Alan Gilbert /*
1926e3dd7493SDr. David Alan Gilbert  * Finds the named RAMBlock
1927e3dd7493SDr. David Alan Gilbert  *
1928e3dd7493SDr. David Alan Gilbert  * name: The name of RAMBlock to find
1929e3dd7493SDr. David Alan Gilbert  *
1930e3dd7493SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
1931e3dd7493SDr. David Alan Gilbert  */
1932e3dd7493SDr. David Alan Gilbert RAMBlock *qemu_ram_block_by_name(const char *name)
1933e3dd7493SDr. David Alan Gilbert {
1934e3dd7493SDr. David Alan Gilbert     RAMBlock *block;
1935e3dd7493SDr. David Alan Gilbert 
1936e3dd7493SDr. David Alan Gilbert     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1937e3dd7493SDr. David Alan Gilbert         if (!strcmp(name, block->idstr)) {
1938e3dd7493SDr. David Alan Gilbert             return block;
1939e3dd7493SDr. David Alan Gilbert         }
1940e3dd7493SDr. David Alan Gilbert     }
1941e3dd7493SDr. David Alan Gilbert 
1942e3dd7493SDr. David Alan Gilbert     return NULL;
1943e3dd7493SDr. David Alan Gilbert }
1944e3dd7493SDr. David Alan Gilbert 
1945422148d3SDr. David Alan Gilbert /* Some of the softmmu routines need to translate from a host pointer
1946422148d3SDr. David Alan Gilbert    (typically a TLB entry) back to a ram offset.  */
1947422148d3SDr. David Alan Gilbert MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1948422148d3SDr. David Alan Gilbert {
1949422148d3SDr. David Alan Gilbert     RAMBlock *block;
1950422148d3SDr. David Alan Gilbert     ram_addr_t offset; /* Not used */
1951422148d3SDr. David Alan Gilbert 
1952422148d3SDr. David Alan Gilbert     block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1953422148d3SDr. David Alan Gilbert 
1954422148d3SDr. David Alan Gilbert     if (!block) {
1955422148d3SDr. David Alan Gilbert         return NULL;
1956422148d3SDr. David Alan Gilbert     }
1957422148d3SDr. David Alan Gilbert 
1958422148d3SDr. David Alan Gilbert     return block->mr;
1959e890261fSMarcelo Tosatti }
1960f471a17eSAlex Williamson 
196149b24afcSPaolo Bonzini /* Called within RCU critical section.  */
1962a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
19630e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
19641ccde1cbSbellard {
196552159192SJuan Quintela     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
19660e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
19673a7d929eSbellard     }
19680e0df1e2SAvi Kivity     switch (size) {
19690e0df1e2SAvi Kivity     case 1:
19705579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
19710e0df1e2SAvi Kivity         break;
19720e0df1e2SAvi Kivity     case 2:
19735579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
19740e0df1e2SAvi Kivity         break;
19750e0df1e2SAvi Kivity     case 4:
19765579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
19770e0df1e2SAvi Kivity         break;
19780e0df1e2SAvi Kivity     default:
19790e0df1e2SAvi Kivity         abort();
19800e0df1e2SAvi Kivity     }
198158d2707eSPaolo Bonzini     /* Set both VGA and migration bits for simplicity and to remove
198258d2707eSPaolo Bonzini      * the notdirty callback faster.
198358d2707eSPaolo Bonzini      */
198458d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(ram_addr, size,
198558d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_NOCODE);
1986f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1987f23db169Sbellard        flushed */
1988a2cd8c85SJuan Quintela     if (!cpu_physical_memory_is_clean(ram_addr)) {
1989bcae01e4SPeter Crosthwaite         tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
19904917cf44SAndreas Färber     }
19911ccde1cbSbellard }
19921ccde1cbSbellard 
1993b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1994b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
1995b018ddf6SPaolo Bonzini {
1996b018ddf6SPaolo Bonzini     return is_write;
1997b018ddf6SPaolo Bonzini }
1998b018ddf6SPaolo Bonzini 
19990e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
20000e0df1e2SAvi Kivity     .write = notdirty_mem_write,
2001b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
20020e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
20031ccde1cbSbellard };
20041ccde1cbSbellard 
20050f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
200666b9b43cSPeter Maydell static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
20070f459d16Spbrook {
200893afeadeSAndreas Färber     CPUState *cpu = current_cpu;
200993afeadeSAndreas Färber     CPUArchState *env = cpu->env_ptr;
201006d55cc1Saliguori     target_ulong pc, cs_base;
20110f459d16Spbrook     target_ulong vaddr;
2012a1d1bb31Saliguori     CPUWatchpoint *wp;
201306d55cc1Saliguori     int cpu_flags;
20140f459d16Spbrook 
2015ff4700b0SAndreas Färber     if (cpu->watchpoint_hit) {
201606d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
201706d55cc1Saliguori          * the debug interrupt so that is will trigger after the
201806d55cc1Saliguori          * current instruction. */
201993afeadeSAndreas Färber         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
202006d55cc1Saliguori         return;
202106d55cc1Saliguori     }
202293afeadeSAndreas Färber     vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2023ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
202405068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, len)
202505068c0dSPeter Maydell             && (wp->flags & flags)) {
202608225676SPeter Maydell             if (flags == BP_MEM_READ) {
202708225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_READ;
202808225676SPeter Maydell             } else {
202908225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
203008225676SPeter Maydell             }
203108225676SPeter Maydell             wp->hitaddr = vaddr;
203266b9b43cSPeter Maydell             wp->hitattrs = attrs;
2033ff4700b0SAndreas Färber             if (!cpu->watchpoint_hit) {
2034ff4700b0SAndreas Färber                 cpu->watchpoint_hit = wp;
2035239c51a5SAndreas Färber                 tb_check_watchpoint(cpu);
203606d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
203727103424SAndreas Färber                     cpu->exception_index = EXCP_DEBUG;
20385638d180SAndreas Färber                     cpu_loop_exit(cpu);
203906d55cc1Saliguori                 } else {
204006d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2041648f034cSAndreas Färber                     tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
20420ea8cb88SAndreas Färber                     cpu_resume_from_signal(cpu, NULL);
20430f459d16Spbrook                 }
2044488d6577SMax Filippov             }
20456e140f28Saliguori         } else {
20466e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
20476e140f28Saliguori         }
20480f459d16Spbrook     }
20490f459d16Spbrook }
20500f459d16Spbrook 
20516658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
20526658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
20536658ffb8Spbrook    phys routines.  */
205466b9b43cSPeter Maydell static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
205566b9b43cSPeter Maydell                                   unsigned size, MemTxAttrs attrs)
20566658ffb8Spbrook {
205766b9b43cSPeter Maydell     MemTxResult res;
205866b9b43cSPeter Maydell     uint64_t data;
205979ed0416SPeter Maydell     int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
206079ed0416SPeter Maydell     AddressSpace *as = current_cpu->cpu_ases[asidx].as;
20616658ffb8Spbrook 
206266b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
20631ec9b909SAvi Kivity     switch (size) {
206467364150SMax Filippov     case 1:
206579ed0416SPeter Maydell         data = address_space_ldub(as, addr, attrs, &res);
206667364150SMax Filippov         break;
206767364150SMax Filippov     case 2:
206879ed0416SPeter Maydell         data = address_space_lduw(as, addr, attrs, &res);
206967364150SMax Filippov         break;
207067364150SMax Filippov     case 4:
207179ed0416SPeter Maydell         data = address_space_ldl(as, addr, attrs, &res);
207267364150SMax Filippov         break;
20731ec9b909SAvi Kivity     default: abort();
20741ec9b909SAvi Kivity     }
207566b9b43cSPeter Maydell     *pdata = data;
207666b9b43cSPeter Maydell     return res;
207766b9b43cSPeter Maydell }
207866b9b43cSPeter Maydell 
207966b9b43cSPeter Maydell static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
208066b9b43cSPeter Maydell                                    uint64_t val, unsigned size,
208166b9b43cSPeter Maydell                                    MemTxAttrs attrs)
208266b9b43cSPeter Maydell {
208366b9b43cSPeter Maydell     MemTxResult res;
208479ed0416SPeter Maydell     int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
208579ed0416SPeter Maydell     AddressSpace *as = current_cpu->cpu_ases[asidx].as;
208666b9b43cSPeter Maydell 
208766b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
208866b9b43cSPeter Maydell     switch (size) {
208966b9b43cSPeter Maydell     case 1:
209079ed0416SPeter Maydell         address_space_stb(as, addr, val, attrs, &res);
209166b9b43cSPeter Maydell         break;
209266b9b43cSPeter Maydell     case 2:
209379ed0416SPeter Maydell         address_space_stw(as, addr, val, attrs, &res);
209466b9b43cSPeter Maydell         break;
209566b9b43cSPeter Maydell     case 4:
209679ed0416SPeter Maydell         address_space_stl(as, addr, val, attrs, &res);
209766b9b43cSPeter Maydell         break;
209866b9b43cSPeter Maydell     default: abort();
209966b9b43cSPeter Maydell     }
210066b9b43cSPeter Maydell     return res;
21016658ffb8Spbrook }
21026658ffb8Spbrook 
21031ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
210466b9b43cSPeter Maydell     .read_with_attrs = watch_mem_read,
210566b9b43cSPeter Maydell     .write_with_attrs = watch_mem_write,
21061ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
21076658ffb8Spbrook };
21086658ffb8Spbrook 
2109f25a49e0SPeter Maydell static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2110f25a49e0SPeter Maydell                                 unsigned len, MemTxAttrs attrs)
2111db7b5426Sblueswir1 {
2112acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2113ff6cff75SPaolo Bonzini     uint8_t buf[8];
21145c9eb028SPeter Maydell     MemTxResult res;
2115791af8c8SPaolo Bonzini 
2116db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2117016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2118acc9d80bSJan Kiszka            subpage, len, addr);
2119db7b5426Sblueswir1 #endif
21205c9eb028SPeter Maydell     res = address_space_read(subpage->as, addr + subpage->base,
21215c9eb028SPeter Maydell                              attrs, buf, len);
21225c9eb028SPeter Maydell     if (res) {
21235c9eb028SPeter Maydell         return res;
2124f25a49e0SPeter Maydell     }
2125acc9d80bSJan Kiszka     switch (len) {
2126acc9d80bSJan Kiszka     case 1:
2127f25a49e0SPeter Maydell         *data = ldub_p(buf);
2128f25a49e0SPeter Maydell         return MEMTX_OK;
2129acc9d80bSJan Kiszka     case 2:
2130f25a49e0SPeter Maydell         *data = lduw_p(buf);
2131f25a49e0SPeter Maydell         return MEMTX_OK;
2132acc9d80bSJan Kiszka     case 4:
2133f25a49e0SPeter Maydell         *data = ldl_p(buf);
2134f25a49e0SPeter Maydell         return MEMTX_OK;
2135ff6cff75SPaolo Bonzini     case 8:
2136f25a49e0SPeter Maydell         *data = ldq_p(buf);
2137f25a49e0SPeter Maydell         return MEMTX_OK;
2138acc9d80bSJan Kiszka     default:
2139acc9d80bSJan Kiszka         abort();
2140acc9d80bSJan Kiszka     }
2141db7b5426Sblueswir1 }
2142db7b5426Sblueswir1 
2143f25a49e0SPeter Maydell static MemTxResult subpage_write(void *opaque, hwaddr addr,
2144f25a49e0SPeter Maydell                                  uint64_t value, unsigned len, MemTxAttrs attrs)
2145db7b5426Sblueswir1 {
2146acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2147ff6cff75SPaolo Bonzini     uint8_t buf[8];
2148acc9d80bSJan Kiszka 
2149db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2150016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2151acc9d80bSJan Kiszka            " value %"PRIx64"\n",
2152acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
2153db7b5426Sblueswir1 #endif
2154acc9d80bSJan Kiszka     switch (len) {
2155acc9d80bSJan Kiszka     case 1:
2156acc9d80bSJan Kiszka         stb_p(buf, value);
2157acc9d80bSJan Kiszka         break;
2158acc9d80bSJan Kiszka     case 2:
2159acc9d80bSJan Kiszka         stw_p(buf, value);
2160acc9d80bSJan Kiszka         break;
2161acc9d80bSJan Kiszka     case 4:
2162acc9d80bSJan Kiszka         stl_p(buf, value);
2163acc9d80bSJan Kiszka         break;
2164ff6cff75SPaolo Bonzini     case 8:
2165ff6cff75SPaolo Bonzini         stq_p(buf, value);
2166ff6cff75SPaolo Bonzini         break;
2167acc9d80bSJan Kiszka     default:
2168acc9d80bSJan Kiszka         abort();
2169acc9d80bSJan Kiszka     }
21705c9eb028SPeter Maydell     return address_space_write(subpage->as, addr + subpage->base,
21715c9eb028SPeter Maydell                                attrs, buf, len);
2172db7b5426Sblueswir1 }
2173db7b5426Sblueswir1 
2174c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
2175016e9d62SAmos Kong                             unsigned len, bool is_write)
2176c353e4ccSPaolo Bonzini {
2177acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2178c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
2179016e9d62SAmos Kong     printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2180acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
2181c353e4ccSPaolo Bonzini #endif
2182c353e4ccSPaolo Bonzini 
2183acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
2184016e9d62SAmos Kong                                       len, is_write);
2185c353e4ccSPaolo Bonzini }
2186c353e4ccSPaolo Bonzini 
218770c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
2188f25a49e0SPeter Maydell     .read_with_attrs = subpage_read,
2189f25a49e0SPeter Maydell     .write_with_attrs = subpage_write,
2190ff6cff75SPaolo Bonzini     .impl.min_access_size = 1,
2191ff6cff75SPaolo Bonzini     .impl.max_access_size = 8,
2192ff6cff75SPaolo Bonzini     .valid.min_access_size = 1,
2193ff6cff75SPaolo Bonzini     .valid.max_access_size = 8,
2194c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
219570c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
2196db7b5426Sblueswir1 };
2197db7b5426Sblueswir1 
2198c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
21995312bd8bSAvi Kivity                              uint16_t section)
2200db7b5426Sblueswir1 {
2201db7b5426Sblueswir1     int idx, eidx;
2202db7b5426Sblueswir1 
2203db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2204db7b5426Sblueswir1         return -1;
2205db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2206db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2207db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2208016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2209016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
2210db7b5426Sblueswir1 #endif
2211db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
22125312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
2213db7b5426Sblueswir1     }
2214db7b5426Sblueswir1 
2215db7b5426Sblueswir1     return 0;
2216db7b5426Sblueswir1 }
2217db7b5426Sblueswir1 
2218acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2219db7b5426Sblueswir1 {
2220c227f099SAnthony Liguori     subpage_t *mmio;
2221db7b5426Sblueswir1 
22227267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
22231eec614bSaliguori 
2224acc9d80bSJan Kiszka     mmio->as = as;
2225db7b5426Sblueswir1     mmio->base = base;
22262c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2227b4fefef9SPeter Crosthwaite                           NULL, TARGET_PAGE_SIZE);
2228b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
2229db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2230016e9d62SAmos Kong     printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2231016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
2232db7b5426Sblueswir1 #endif
2233b41aac4fSLiu Ping Fan     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2234db7b5426Sblueswir1 
2235db7b5426Sblueswir1     return mmio;
2236db7b5426Sblueswir1 }
2237db7b5426Sblueswir1 
2238a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2239a656e22fSPeter Crosthwaite                               MemoryRegion *mr)
22405312bd8bSAvi Kivity {
2241a656e22fSPeter Crosthwaite     assert(as);
22425312bd8bSAvi Kivity     MemoryRegionSection section = {
2243a656e22fSPeter Crosthwaite         .address_space = as,
22445312bd8bSAvi Kivity         .mr = mr,
22455312bd8bSAvi Kivity         .offset_within_address_space = 0,
22465312bd8bSAvi Kivity         .offset_within_region = 0,
2247052e87b0SPaolo Bonzini         .size = int128_2_64(),
22485312bd8bSAvi Kivity     };
22495312bd8bSAvi Kivity 
225053cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
22515312bd8bSAvi Kivity }
22525312bd8bSAvi Kivity 
2253a54c87b6SPeter Maydell MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2254aa102231SAvi Kivity {
2255a54c87b6SPeter Maydell     int asidx = cpu_asidx_from_attrs(cpu, attrs);
2256a54c87b6SPeter Maydell     CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
225732857f4dSPeter Maydell     AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
225879e2b9aeSPaolo Bonzini     MemoryRegionSection *sections = d->map.sections;
22599d82b5a7SPaolo Bonzini 
22609d82b5a7SPaolo Bonzini     return sections[index & ~TARGET_PAGE_MASK].mr;
2261aa102231SAvi Kivity }
2262aa102231SAvi Kivity 
2263e9179ce1SAvi Kivity static void io_mem_init(void)
2264e9179ce1SAvi Kivity {
22651f6245e5SPaolo Bonzini     memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
22662c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
22671f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
22682c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
22691f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
22702c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
22711f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
2272e9179ce1SAvi Kivity }
2273e9179ce1SAvi Kivity 
2274ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
2275ac1970fbSAvi Kivity {
227689ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
227753cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
227853cb28cbSMarcel Apfelbaum     uint16_t n;
227953cb28cbSMarcel Apfelbaum 
2280a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_unassigned);
228153cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
2282a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_notdirty);
228353cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_NOTDIRTY);
2284a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_rom);
228553cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_ROM);
2286a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_watch);
228753cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_WATCH);
228800752703SPaolo Bonzini 
22899736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
229000752703SPaolo Bonzini     d->as = as;
229100752703SPaolo Bonzini     as->next_dispatch = d;
229200752703SPaolo Bonzini }
229300752703SPaolo Bonzini 
229479e2b9aeSPaolo Bonzini static void address_space_dispatch_free(AddressSpaceDispatch *d)
229579e2b9aeSPaolo Bonzini {
229679e2b9aeSPaolo Bonzini     phys_sections_free(&d->map);
229779e2b9aeSPaolo Bonzini     g_free(d);
229879e2b9aeSPaolo Bonzini }
229979e2b9aeSPaolo Bonzini 
230000752703SPaolo Bonzini static void mem_commit(MemoryListener *listener)
230100752703SPaolo Bonzini {
230200752703SPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
23030475d94fSPaolo Bonzini     AddressSpaceDispatch *cur = as->dispatch;
23040475d94fSPaolo Bonzini     AddressSpaceDispatch *next = as->next_dispatch;
2305ac1970fbSAvi Kivity 
230653cb28cbSMarcel Apfelbaum     phys_page_compact_all(next, next->map.nodes_nb);
2307b35ba30fSMichael S. Tsirkin 
230879e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, next);
230953cb28cbSMarcel Apfelbaum     if (cur) {
231079e2b9aeSPaolo Bonzini         call_rcu(cur, address_space_dispatch_free, rcu);
2311ac1970fbSAvi Kivity     }
23129affd6fcSPaolo Bonzini }
23139affd6fcSPaolo Bonzini 
23141d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
231550c1e149SAvi Kivity {
231632857f4dSPeter Maydell     CPUAddressSpace *cpuas;
231732857f4dSPeter Maydell     AddressSpaceDispatch *d;
2318117712c3SAvi Kivity 
2319117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
2320117712c3SAvi Kivity        reset the modified entries */
232132857f4dSPeter Maydell     cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
232232857f4dSPeter Maydell     cpu_reloading_memory_map();
232332857f4dSPeter Maydell     /* The CPU and TLB are protected by the iothread lock.
232432857f4dSPeter Maydell      * We reload the dispatch pointer now because cpu_reloading_memory_map()
232532857f4dSPeter Maydell      * may have split the RCU critical section.
232632857f4dSPeter Maydell      */
232732857f4dSPeter Maydell     d = atomic_rcu_read(&cpuas->as->dispatch);
232832857f4dSPeter Maydell     cpuas->memory_dispatch = d;
232932857f4dSPeter Maydell     tlb_flush(cpuas->cpu, 1);
233050c1e149SAvi Kivity }
233150c1e149SAvi Kivity 
2332ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
2333ac1970fbSAvi Kivity {
233400752703SPaolo Bonzini     as->dispatch = NULL;
233589ae337aSPaolo Bonzini     as->dispatch_listener = (MemoryListener) {
2336ac1970fbSAvi Kivity         .begin = mem_begin,
233700752703SPaolo Bonzini         .commit = mem_commit,
2338ac1970fbSAvi Kivity         .region_add = mem_add,
2339ac1970fbSAvi Kivity         .region_nop = mem_add,
2340ac1970fbSAvi Kivity         .priority = 0,
2341ac1970fbSAvi Kivity     };
234289ae337aSPaolo Bonzini     memory_listener_register(&as->dispatch_listener, as);
2343ac1970fbSAvi Kivity }
2344ac1970fbSAvi Kivity 
23456e48e8f9SPaolo Bonzini void address_space_unregister(AddressSpace *as)
23466e48e8f9SPaolo Bonzini {
23476e48e8f9SPaolo Bonzini     memory_listener_unregister(&as->dispatch_listener);
23486e48e8f9SPaolo Bonzini }
23496e48e8f9SPaolo Bonzini 
235083f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
235183f3c251SAvi Kivity {
235283f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
235383f3c251SAvi Kivity 
235479e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, NULL);
235579e2b9aeSPaolo Bonzini     if (d) {
235679e2b9aeSPaolo Bonzini         call_rcu(d, address_space_dispatch_free, rcu);
235779e2b9aeSPaolo Bonzini     }
235883f3c251SAvi Kivity }
235983f3c251SAvi Kivity 
236062152b8aSAvi Kivity static void memory_map_init(void)
236162152b8aSAvi Kivity {
23627267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
236303f49957SPaolo Bonzini 
236457271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
23657dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
2366309cb471SAvi Kivity 
23677267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
23683bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
23693bb28b72SJan Kiszka                           65536);
23707dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
23712641689aSliguang }
237262152b8aSAvi Kivity 
237362152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
237462152b8aSAvi Kivity {
237562152b8aSAvi Kivity     return system_memory;
237662152b8aSAvi Kivity }
237762152b8aSAvi Kivity 
2378309cb471SAvi Kivity MemoryRegion *get_system_io(void)
2379309cb471SAvi Kivity {
2380309cb471SAvi Kivity     return system_io;
2381309cb471SAvi Kivity }
2382309cb471SAvi Kivity 
2383e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2384e2eef170Spbrook 
238513eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
238613eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
2387f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2388a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
238913eb76e0Sbellard {
239013eb76e0Sbellard     int l, flags;
239113eb76e0Sbellard     target_ulong page;
239253a5960aSpbrook     void * p;
239313eb76e0Sbellard 
239413eb76e0Sbellard     while (len > 0) {
239513eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
239613eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
239713eb76e0Sbellard         if (l > len)
239813eb76e0Sbellard             l = len;
239913eb76e0Sbellard         flags = page_get_flags(page);
240013eb76e0Sbellard         if (!(flags & PAGE_VALID))
2401a68fe89cSPaul Brook             return -1;
240213eb76e0Sbellard         if (is_write) {
240313eb76e0Sbellard             if (!(flags & PAGE_WRITE))
2404a68fe89cSPaul Brook                 return -1;
2405579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
240672fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2407a68fe89cSPaul Brook                 return -1;
240872fb7daaSaurel32             memcpy(p, buf, l);
240972fb7daaSaurel32             unlock_user(p, addr, l);
241013eb76e0Sbellard         } else {
241113eb76e0Sbellard             if (!(flags & PAGE_READ))
2412a68fe89cSPaul Brook                 return -1;
2413579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
241472fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2415a68fe89cSPaul Brook                 return -1;
241672fb7daaSaurel32             memcpy(buf, p, l);
24175b257578Saurel32             unlock_user(p, addr, 0);
241813eb76e0Sbellard         }
241913eb76e0Sbellard         len -= l;
242013eb76e0Sbellard         buf += l;
242113eb76e0Sbellard         addr += l;
242213eb76e0Sbellard     }
2423a68fe89cSPaul Brook     return 0;
242413eb76e0Sbellard }
24258df1cd07Sbellard 
242613eb76e0Sbellard #else
242751d7a9ebSAnthony PERARD 
2428845b6214SPaolo Bonzini static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2429a8170e5eSAvi Kivity                                      hwaddr length)
243051d7a9ebSAnthony PERARD {
2431845b6214SPaolo Bonzini     uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2432e87f7778SPaolo Bonzini     /* No early return if dirty_log_mask is or becomes 0, because
2433e87f7778SPaolo Bonzini      * cpu_physical_memory_set_dirty_range will still call
2434e87f7778SPaolo Bonzini      * xen_modified_memory.
2435e87f7778SPaolo Bonzini      */
2436e87f7778SPaolo Bonzini     if (dirty_log_mask) {
2437e87f7778SPaolo Bonzini         dirty_log_mask =
2438e87f7778SPaolo Bonzini             cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2439e87f7778SPaolo Bonzini     }
2440845b6214SPaolo Bonzini     if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
244135865339SPaolo Bonzini         tb_invalidate_phys_range(addr, addr + length);
2442845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2443845b6214SPaolo Bonzini     }
244458d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
244549dfcec4SPaolo Bonzini }
244651d7a9ebSAnthony PERARD 
244723326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
244882f2563fSPaolo Bonzini {
2449e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
245023326164SRichard Henderson 
245123326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
245223326164SRichard Henderson        otherwise specified.  */
245323326164SRichard Henderson     if (access_size_max == 0) {
245423326164SRichard Henderson         access_size_max = 4;
245582f2563fSPaolo Bonzini     }
245623326164SRichard Henderson 
245723326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
245823326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
245923326164SRichard Henderson         unsigned align_size_max = addr & -addr;
246023326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
246123326164SRichard Henderson             access_size_max = align_size_max;
246223326164SRichard Henderson         }
246323326164SRichard Henderson     }
246423326164SRichard Henderson 
246523326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
246623326164SRichard Henderson     if (l > access_size_max) {
246723326164SRichard Henderson         l = access_size_max;
246823326164SRichard Henderson     }
24696554f5c0SPeter Maydell     l = pow2floor(l);
247023326164SRichard Henderson 
247123326164SRichard Henderson     return l;
247282f2563fSPaolo Bonzini }
247382f2563fSPaolo Bonzini 
24744840f10eSJan Kiszka static bool prepare_mmio_access(MemoryRegion *mr)
2475125b3806SPaolo Bonzini {
24764840f10eSJan Kiszka     bool unlocked = !qemu_mutex_iothread_locked();
24774840f10eSJan Kiszka     bool release_lock = false;
24784840f10eSJan Kiszka 
24794840f10eSJan Kiszka     if (unlocked && mr->global_locking) {
24804840f10eSJan Kiszka         qemu_mutex_lock_iothread();
24814840f10eSJan Kiszka         unlocked = false;
24824840f10eSJan Kiszka         release_lock = true;
2483125b3806SPaolo Bonzini     }
24844840f10eSJan Kiszka     if (mr->flush_coalesced_mmio) {
24854840f10eSJan Kiszka         if (unlocked) {
24864840f10eSJan Kiszka             qemu_mutex_lock_iothread();
24874840f10eSJan Kiszka         }
24884840f10eSJan Kiszka         qemu_flush_coalesced_mmio_buffer();
24894840f10eSJan Kiszka         if (unlocked) {
24904840f10eSJan Kiszka             qemu_mutex_unlock_iothread();
24914840f10eSJan Kiszka         }
24924840f10eSJan Kiszka     }
24934840f10eSJan Kiszka 
24944840f10eSJan Kiszka     return release_lock;
2495125b3806SPaolo Bonzini }
2496125b3806SPaolo Bonzini 
2497a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
2498a203ac70SPaolo Bonzini static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2499a203ac70SPaolo Bonzini                                                 MemTxAttrs attrs,
2500a203ac70SPaolo Bonzini                                                 const uint8_t *buf,
2501a203ac70SPaolo Bonzini                                                 int len, hwaddr addr1,
2502a203ac70SPaolo Bonzini                                                 hwaddr l, MemoryRegion *mr)
250313eb76e0Sbellard {
250413eb76e0Sbellard     uint8_t *ptr;
2505791af8c8SPaolo Bonzini     uint64_t val;
25063b643495SPeter Maydell     MemTxResult result = MEMTX_OK;
25074840f10eSJan Kiszka     bool release_lock = false;
250813eb76e0Sbellard 
2509a203ac70SPaolo Bonzini     for (;;) {
2510eb7eeb88SPaolo Bonzini         if (!memory_access_is_direct(mr, true)) {
25114840f10eSJan Kiszka             release_lock |= prepare_mmio_access(mr);
25125c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr1);
25134917cf44SAndreas Färber             /* XXX: could force current_cpu to NULL to avoid
25146a00d601Sbellard                potential bugs */
251523326164SRichard Henderson             switch (l) {
251623326164SRichard Henderson             case 8:
251723326164SRichard Henderson                 /* 64 bit write access */
251823326164SRichard Henderson                 val = ldq_p(buf);
25193b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 8,
25203b643495SPeter Maydell                                                        attrs);
252123326164SRichard Henderson                 break;
252223326164SRichard Henderson             case 4:
25231c213d19Sbellard                 /* 32 bit write access */
2524c27004ecSbellard                 val = ldl_p(buf);
25253b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 4,
25263b643495SPeter Maydell                                                        attrs);
252723326164SRichard Henderson                 break;
252823326164SRichard Henderson             case 2:
25291c213d19Sbellard                 /* 16 bit write access */
2530c27004ecSbellard                 val = lduw_p(buf);
25313b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 2,
25323b643495SPeter Maydell                                                        attrs);
253323326164SRichard Henderson                 break;
253423326164SRichard Henderson             case 1:
25351c213d19Sbellard                 /* 8 bit write access */
2536c27004ecSbellard                 val = ldub_p(buf);
25373b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 1,
25383b643495SPeter Maydell                                                        attrs);
253923326164SRichard Henderson                 break;
254023326164SRichard Henderson             default:
254123326164SRichard Henderson                 abort();
254213eb76e0Sbellard             }
25432bbfa05dSPaolo Bonzini         } else {
25445c8a00ceSPaolo Bonzini             addr1 += memory_region_get_ram_addr(mr);
254513eb76e0Sbellard             /* RAM case */
25465579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
254713eb76e0Sbellard             memcpy(ptr, buf, l);
2548845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, l);
25493a7d929eSbellard         }
2550eb7eeb88SPaolo Bonzini 
2551eb7eeb88SPaolo Bonzini         if (release_lock) {
2552eb7eeb88SPaolo Bonzini             qemu_mutex_unlock_iothread();
2553eb7eeb88SPaolo Bonzini             release_lock = false;
2554eb7eeb88SPaolo Bonzini         }
2555eb7eeb88SPaolo Bonzini 
2556eb7eeb88SPaolo Bonzini         len -= l;
2557eb7eeb88SPaolo Bonzini         buf += l;
2558eb7eeb88SPaolo Bonzini         addr += l;
2559a203ac70SPaolo Bonzini 
2560a203ac70SPaolo Bonzini         if (!len) {
2561a203ac70SPaolo Bonzini             break;
2562eb7eeb88SPaolo Bonzini         }
2563a203ac70SPaolo Bonzini 
2564a203ac70SPaolo Bonzini         l = len;
2565a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, true);
2566a203ac70SPaolo Bonzini     }
2567eb7eeb88SPaolo Bonzini 
2568eb7eeb88SPaolo Bonzini     return result;
2569eb7eeb88SPaolo Bonzini }
2570eb7eeb88SPaolo Bonzini 
2571a203ac70SPaolo Bonzini MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2572a203ac70SPaolo Bonzini                                 const uint8_t *buf, int len)
2573eb7eeb88SPaolo Bonzini {
2574eb7eeb88SPaolo Bonzini     hwaddr l;
2575eb7eeb88SPaolo Bonzini     hwaddr addr1;
2576eb7eeb88SPaolo Bonzini     MemoryRegion *mr;
2577eb7eeb88SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2578a203ac70SPaolo Bonzini 
2579a203ac70SPaolo Bonzini     if (len > 0) {
2580a203ac70SPaolo Bonzini         rcu_read_lock();
2581a203ac70SPaolo Bonzini         l = len;
2582a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, true);
2583a203ac70SPaolo Bonzini         result = address_space_write_continue(as, addr, attrs, buf, len,
2584a203ac70SPaolo Bonzini                                               addr1, l, mr);
2585a203ac70SPaolo Bonzini         rcu_read_unlock();
2586a203ac70SPaolo Bonzini     }
2587a203ac70SPaolo Bonzini 
2588a203ac70SPaolo Bonzini     return result;
2589a203ac70SPaolo Bonzini }
2590a203ac70SPaolo Bonzini 
2591a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
2592a203ac70SPaolo Bonzini MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2593a203ac70SPaolo Bonzini                                         MemTxAttrs attrs, uint8_t *buf,
2594a203ac70SPaolo Bonzini                                         int len, hwaddr addr1, hwaddr l,
2595a203ac70SPaolo Bonzini                                         MemoryRegion *mr)
2596a203ac70SPaolo Bonzini {
2597a203ac70SPaolo Bonzini     uint8_t *ptr;
2598a203ac70SPaolo Bonzini     uint64_t val;
2599a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2600eb7eeb88SPaolo Bonzini     bool release_lock = false;
2601eb7eeb88SPaolo Bonzini 
2602a203ac70SPaolo Bonzini     for (;;) {
2603eb7eeb88SPaolo Bonzini         if (!memory_access_is_direct(mr, false)) {
260413eb76e0Sbellard             /* I/O case */
26054840f10eSJan Kiszka             release_lock |= prepare_mmio_access(mr);
26065c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr1);
260723326164SRichard Henderson             switch (l) {
260823326164SRichard Henderson             case 8:
260923326164SRichard Henderson                 /* 64 bit read access */
26103b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
26113b643495SPeter Maydell                                                       attrs);
261223326164SRichard Henderson                 stq_p(buf, val);
261323326164SRichard Henderson                 break;
261423326164SRichard Henderson             case 4:
261513eb76e0Sbellard                 /* 32 bit read access */
26163b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
26173b643495SPeter Maydell                                                       attrs);
2618c27004ecSbellard                 stl_p(buf, val);
261923326164SRichard Henderson                 break;
262023326164SRichard Henderson             case 2:
262113eb76e0Sbellard                 /* 16 bit read access */
26223b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
26233b643495SPeter Maydell                                                       attrs);
2624c27004ecSbellard                 stw_p(buf, val);
262523326164SRichard Henderson                 break;
262623326164SRichard Henderson             case 1:
26271c213d19Sbellard                 /* 8 bit read access */
26283b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
26293b643495SPeter Maydell                                                       attrs);
2630c27004ecSbellard                 stb_p(buf, val);
263123326164SRichard Henderson                 break;
263223326164SRichard Henderson             default:
263323326164SRichard Henderson                 abort();
263413eb76e0Sbellard             }
263513eb76e0Sbellard         } else {
263613eb76e0Sbellard             /* RAM case */
26375c8a00ceSPaolo Bonzini             ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2638f3705d53SAvi Kivity             memcpy(buf, ptr, l);
263913eb76e0Sbellard         }
26404840f10eSJan Kiszka 
26414840f10eSJan Kiszka         if (release_lock) {
26424840f10eSJan Kiszka             qemu_mutex_unlock_iothread();
26434840f10eSJan Kiszka             release_lock = false;
26444840f10eSJan Kiszka         }
26454840f10eSJan Kiszka 
264613eb76e0Sbellard         len -= l;
264713eb76e0Sbellard         buf += l;
264813eb76e0Sbellard         addr += l;
2649a203ac70SPaolo Bonzini 
2650a203ac70SPaolo Bonzini         if (!len) {
2651a203ac70SPaolo Bonzini             break;
265213eb76e0Sbellard         }
2653a203ac70SPaolo Bonzini 
2654a203ac70SPaolo Bonzini         l = len;
2655a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, false);
2656a203ac70SPaolo Bonzini     }
2657a203ac70SPaolo Bonzini 
2658a203ac70SPaolo Bonzini     return result;
2659a203ac70SPaolo Bonzini }
2660a203ac70SPaolo Bonzini 
26613cc8f884SPaolo Bonzini MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
26623cc8f884SPaolo Bonzini                                     MemTxAttrs attrs, uint8_t *buf, int len)
2663a203ac70SPaolo Bonzini {
2664a203ac70SPaolo Bonzini     hwaddr l;
2665a203ac70SPaolo Bonzini     hwaddr addr1;
2666a203ac70SPaolo Bonzini     MemoryRegion *mr;
2667a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2668a203ac70SPaolo Bonzini 
2669a203ac70SPaolo Bonzini     if (len > 0) {
2670a203ac70SPaolo Bonzini         rcu_read_lock();
2671a203ac70SPaolo Bonzini         l = len;
2672a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, false);
2673a203ac70SPaolo Bonzini         result = address_space_read_continue(as, addr, attrs, buf, len,
2674a203ac70SPaolo Bonzini                                              addr1, l, mr);
267541063e1eSPaolo Bonzini         rcu_read_unlock();
2676a203ac70SPaolo Bonzini     }
2677fd8aaa76SPaolo Bonzini 
26783b643495SPeter Maydell     return result;
267913eb76e0Sbellard }
26808df1cd07Sbellard 
2681eb7eeb88SPaolo Bonzini MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2682eb7eeb88SPaolo Bonzini                              uint8_t *buf, int len, bool is_write)
2683ac1970fbSAvi Kivity {
2684eb7eeb88SPaolo Bonzini     if (is_write) {
2685eb7eeb88SPaolo Bonzini         return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2686eb7eeb88SPaolo Bonzini     } else {
2687eb7eeb88SPaolo Bonzini         return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2688ac1970fbSAvi Kivity     }
2689ac1970fbSAvi Kivity }
2690ac1970fbSAvi Kivity 
2691a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2692ac1970fbSAvi Kivity                             int len, int is_write)
2693ac1970fbSAvi Kivity {
26945c9eb028SPeter Maydell     address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
26955c9eb028SPeter Maydell                      buf, len, is_write);
2696ac1970fbSAvi Kivity }
2697ac1970fbSAvi Kivity 
2698582b55a9SAlexander Graf enum write_rom_type {
2699582b55a9SAlexander Graf     WRITE_DATA,
2700582b55a9SAlexander Graf     FLUSH_CACHE,
2701582b55a9SAlexander Graf };
2702582b55a9SAlexander Graf 
27032a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2704582b55a9SAlexander Graf     hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2705d0ecd2aaSbellard {
2706149f54b5SPaolo Bonzini     hwaddr l;
2707d0ecd2aaSbellard     uint8_t *ptr;
2708149f54b5SPaolo Bonzini     hwaddr addr1;
27095c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2710d0ecd2aaSbellard 
271141063e1eSPaolo Bonzini     rcu_read_lock();
2712d0ecd2aaSbellard     while (len > 0) {
2713d0ecd2aaSbellard         l = len;
27142a221651SEdgar E. Iglesias         mr = address_space_translate(as, addr, &addr1, &l, true);
2715d0ecd2aaSbellard 
27165c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
27175c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2718b242e0e0SPaolo Bonzini             l = memory_access_size(mr, l, addr1);
2719d0ecd2aaSbellard         } else {
27205c8a00ceSPaolo Bonzini             addr1 += memory_region_get_ram_addr(mr);
2721d0ecd2aaSbellard             /* ROM/RAM case */
27225579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
2723582b55a9SAlexander Graf             switch (type) {
2724582b55a9SAlexander Graf             case WRITE_DATA:
2725d0ecd2aaSbellard                 memcpy(ptr, buf, l);
2726845b6214SPaolo Bonzini                 invalidate_and_set_dirty(mr, addr1, l);
2727582b55a9SAlexander Graf                 break;
2728582b55a9SAlexander Graf             case FLUSH_CACHE:
2729582b55a9SAlexander Graf                 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2730582b55a9SAlexander Graf                 break;
2731582b55a9SAlexander Graf             }
2732d0ecd2aaSbellard         }
2733d0ecd2aaSbellard         len -= l;
2734d0ecd2aaSbellard         buf += l;
2735d0ecd2aaSbellard         addr += l;
2736d0ecd2aaSbellard     }
273741063e1eSPaolo Bonzini     rcu_read_unlock();
2738d0ecd2aaSbellard }
2739d0ecd2aaSbellard 
2740582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
27412a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2742582b55a9SAlexander Graf                                    const uint8_t *buf, int len)
2743582b55a9SAlexander Graf {
27442a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2745582b55a9SAlexander Graf }
2746582b55a9SAlexander Graf 
2747582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len)
2748582b55a9SAlexander Graf {
2749582b55a9SAlexander Graf     /*
2750582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
2751582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
2752582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
2753582b55a9SAlexander Graf      * the host's instruction cache at least.
2754582b55a9SAlexander Graf      */
2755582b55a9SAlexander Graf     if (tcg_enabled()) {
2756582b55a9SAlexander Graf         return;
2757582b55a9SAlexander Graf     }
2758582b55a9SAlexander Graf 
27592a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(&address_space_memory,
27602a221651SEdgar E. Iglesias                                            start, NULL, len, FLUSH_CACHE);
2761582b55a9SAlexander Graf }
2762582b55a9SAlexander Graf 
27636d16c2f8Saliguori typedef struct {
2764d3e71559SPaolo Bonzini     MemoryRegion *mr;
27656d16c2f8Saliguori     void *buffer;
2766a8170e5eSAvi Kivity     hwaddr addr;
2767a8170e5eSAvi Kivity     hwaddr len;
2768c2cba0ffSFam Zheng     bool in_use;
27696d16c2f8Saliguori } BounceBuffer;
27706d16c2f8Saliguori 
27716d16c2f8Saliguori static BounceBuffer bounce;
27726d16c2f8Saliguori 
2773ba223c29Saliguori typedef struct MapClient {
2774e95205e1SFam Zheng     QEMUBH *bh;
277572cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2776ba223c29Saliguori } MapClient;
2777ba223c29Saliguori 
277838e047b5SFam Zheng QemuMutex map_client_list_lock;
277972cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
278072cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2781ba223c29Saliguori 
2782e95205e1SFam Zheng static void cpu_unregister_map_client_do(MapClient *client)
2783ba223c29Saliguori {
278472cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
27857267c094SAnthony Liguori     g_free(client);
2786ba223c29Saliguori }
2787ba223c29Saliguori 
278833b6c2edSFam Zheng static void cpu_notify_map_clients_locked(void)
2789ba223c29Saliguori {
2790ba223c29Saliguori     MapClient *client;
2791ba223c29Saliguori 
279272cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
279372cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2794e95205e1SFam Zheng         qemu_bh_schedule(client->bh);
2795e95205e1SFam Zheng         cpu_unregister_map_client_do(client);
2796ba223c29Saliguori     }
2797ba223c29Saliguori }
2798ba223c29Saliguori 
2799e95205e1SFam Zheng void cpu_register_map_client(QEMUBH *bh)
2800d0ecd2aaSbellard {
2801d0ecd2aaSbellard     MapClient *client = g_malloc(sizeof(*client));
2802d0ecd2aaSbellard 
280338e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2804e95205e1SFam Zheng     client->bh = bh;
2805d0ecd2aaSbellard     QLIST_INSERT_HEAD(&map_client_list, client, link);
280633b6c2edSFam Zheng     if (!atomic_read(&bounce.in_use)) {
280733b6c2edSFam Zheng         cpu_notify_map_clients_locked();
280833b6c2edSFam Zheng     }
280938e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2810d0ecd2aaSbellard }
2811d0ecd2aaSbellard 
281238e047b5SFam Zheng void cpu_exec_init_all(void)
281338e047b5SFam Zheng {
281438e047b5SFam Zheng     qemu_mutex_init(&ram_list.mutex);
281538e047b5SFam Zheng     io_mem_init();
2816680a4783SPaolo Bonzini     memory_map_init();
281738e047b5SFam Zheng     qemu_mutex_init(&map_client_list_lock);
281838e047b5SFam Zheng }
281938e047b5SFam Zheng 
2820e95205e1SFam Zheng void cpu_unregister_map_client(QEMUBH *bh)
2821d0ecd2aaSbellard {
2822e95205e1SFam Zheng     MapClient *client;
2823d0ecd2aaSbellard 
2824e95205e1SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2825e95205e1SFam Zheng     QLIST_FOREACH(client, &map_client_list, link) {
2826e95205e1SFam Zheng         if (client->bh == bh) {
2827e95205e1SFam Zheng             cpu_unregister_map_client_do(client);
2828e95205e1SFam Zheng             break;
2829e95205e1SFam Zheng         }
2830e95205e1SFam Zheng     }
2831e95205e1SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2832d0ecd2aaSbellard }
2833d0ecd2aaSbellard 
2834d0ecd2aaSbellard static void cpu_notify_map_clients(void)
2835d0ecd2aaSbellard {
283638e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
283733b6c2edSFam Zheng     cpu_notify_map_clients_locked();
283838e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
28396d16c2f8Saliguori }
28406d16c2f8Saliguori 
284151644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
284251644ab7SPaolo Bonzini {
28435c8a00ceSPaolo Bonzini     MemoryRegion *mr;
284451644ab7SPaolo Bonzini     hwaddr l, xlat;
284551644ab7SPaolo Bonzini 
284641063e1eSPaolo Bonzini     rcu_read_lock();
284751644ab7SPaolo Bonzini     while (len > 0) {
284851644ab7SPaolo Bonzini         l = len;
28495c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
28505c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
28515c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
28525c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
285351644ab7SPaolo Bonzini                 return false;
285451644ab7SPaolo Bonzini             }
285551644ab7SPaolo Bonzini         }
285651644ab7SPaolo Bonzini 
285751644ab7SPaolo Bonzini         len -= l;
285851644ab7SPaolo Bonzini         addr += l;
285951644ab7SPaolo Bonzini     }
286041063e1eSPaolo Bonzini     rcu_read_unlock();
286151644ab7SPaolo Bonzini     return true;
286251644ab7SPaolo Bonzini }
286351644ab7SPaolo Bonzini 
28646d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
28656d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
28666d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
28676d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2868ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2869ba223c29Saliguori  * likely to succeed.
28706d16c2f8Saliguori  */
2871ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2872a8170e5eSAvi Kivity                         hwaddr addr,
2873a8170e5eSAvi Kivity                         hwaddr *plen,
2874ac1970fbSAvi Kivity                         bool is_write)
28756d16c2f8Saliguori {
2876a8170e5eSAvi Kivity     hwaddr len = *plen;
2877e3127ae0SPaolo Bonzini     hwaddr done = 0;
2878e3127ae0SPaolo Bonzini     hwaddr l, xlat, base;
2879e3127ae0SPaolo Bonzini     MemoryRegion *mr, *this_mr;
2880e3127ae0SPaolo Bonzini     ram_addr_t raddr;
2881e81bcda5SPaolo Bonzini     void *ptr;
28826d16c2f8Saliguori 
2883e3127ae0SPaolo Bonzini     if (len == 0) {
2884e3127ae0SPaolo Bonzini         return NULL;
2885e3127ae0SPaolo Bonzini     }
2886e3127ae0SPaolo Bonzini 
28876d16c2f8Saliguori     l = len;
288841063e1eSPaolo Bonzini     rcu_read_lock();
28895c8a00ceSPaolo Bonzini     mr = address_space_translate(as, addr, &xlat, &l, is_write);
289041063e1eSPaolo Bonzini 
28915c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
2892c2cba0ffSFam Zheng         if (atomic_xchg(&bounce.in_use, true)) {
289341063e1eSPaolo Bonzini             rcu_read_unlock();
2894e3127ae0SPaolo Bonzini             return NULL;
28956d16c2f8Saliguori         }
2896e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
2897e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
2898e85d9db5SKevin Wolf         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
28996d16c2f8Saliguori         bounce.addr = addr;
29006d16c2f8Saliguori         bounce.len = l;
2901d3e71559SPaolo Bonzini 
2902d3e71559SPaolo Bonzini         memory_region_ref(mr);
2903d3e71559SPaolo Bonzini         bounce.mr = mr;
29046d16c2f8Saliguori         if (!is_write) {
29055c9eb028SPeter Maydell             address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
29065c9eb028SPeter Maydell                                bounce.buffer, l);
29076d16c2f8Saliguori         }
290838bee5dcSStefano Stabellini 
290941063e1eSPaolo Bonzini         rcu_read_unlock();
291038bee5dcSStefano Stabellini         *plen = l;
291138bee5dcSStefano Stabellini         return bounce.buffer;
29126d16c2f8Saliguori     }
2913e3127ae0SPaolo Bonzini 
2914e3127ae0SPaolo Bonzini     base = xlat;
2915e3127ae0SPaolo Bonzini     raddr = memory_region_get_ram_addr(mr);
2916e3127ae0SPaolo Bonzini 
2917e3127ae0SPaolo Bonzini     for (;;) {
2918e3127ae0SPaolo Bonzini         len -= l;
2919e3127ae0SPaolo Bonzini         addr += l;
2920e3127ae0SPaolo Bonzini         done += l;
2921e3127ae0SPaolo Bonzini         if (len == 0) {
2922e3127ae0SPaolo Bonzini             break;
2923e3127ae0SPaolo Bonzini         }
2924e3127ae0SPaolo Bonzini 
2925e3127ae0SPaolo Bonzini         l = len;
2926e3127ae0SPaolo Bonzini         this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2927e3127ae0SPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
2928149f54b5SPaolo Bonzini             break;
2929149f54b5SPaolo Bonzini         }
29308ab934f9SStefano Stabellini     }
29316d16c2f8Saliguori 
2932d3e71559SPaolo Bonzini     memory_region_ref(mr);
2933e3127ae0SPaolo Bonzini     *plen = done;
2934e81bcda5SPaolo Bonzini     ptr = qemu_ram_ptr_length(raddr + base, plen);
2935e81bcda5SPaolo Bonzini     rcu_read_unlock();
2936e81bcda5SPaolo Bonzini 
2937e81bcda5SPaolo Bonzini     return ptr;
29386d16c2f8Saliguori }
29396d16c2f8Saliguori 
2940ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
29416d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
29426d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
29436d16c2f8Saliguori  */
2944a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2945a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
29466d16c2f8Saliguori {
29476d16c2f8Saliguori     if (buffer != bounce.buffer) {
2948d3e71559SPaolo Bonzini         MemoryRegion *mr;
29497443b437SPaolo Bonzini         ram_addr_t addr1;
2950d3e71559SPaolo Bonzini 
2951d3e71559SPaolo Bonzini         mr = qemu_ram_addr_from_host(buffer, &addr1);
29521b5ec234SPaolo Bonzini         assert(mr != NULL);
2953d3e71559SPaolo Bonzini         if (is_write) {
2954845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, access_len);
29556d16c2f8Saliguori         }
2956868bb33fSJan Kiszka         if (xen_enabled()) {
2957e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
2958050a0ddfSAnthony PERARD         }
2959d3e71559SPaolo Bonzini         memory_region_unref(mr);
29606d16c2f8Saliguori         return;
29616d16c2f8Saliguori     }
29626d16c2f8Saliguori     if (is_write) {
29635c9eb028SPeter Maydell         address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
29645c9eb028SPeter Maydell                             bounce.buffer, access_len);
29656d16c2f8Saliguori     }
2966f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
29676d16c2f8Saliguori     bounce.buffer = NULL;
2968d3e71559SPaolo Bonzini     memory_region_unref(bounce.mr);
2969c2cba0ffSFam Zheng     atomic_mb_set(&bounce.in_use, false);
2970ba223c29Saliguori     cpu_notify_map_clients();
29716d16c2f8Saliguori }
2972d0ecd2aaSbellard 
2973a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
2974a8170e5eSAvi Kivity                               hwaddr *plen,
2975ac1970fbSAvi Kivity                               int is_write)
2976ac1970fbSAvi Kivity {
2977ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
2978ac1970fbSAvi Kivity }
2979ac1970fbSAvi Kivity 
2980a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2981a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
2982ac1970fbSAvi Kivity {
2983ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2984ac1970fbSAvi Kivity }
2985ac1970fbSAvi Kivity 
29868df1cd07Sbellard /* warning: addr must be aligned */
298750013115SPeter Maydell static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
298850013115SPeter Maydell                                                   MemTxAttrs attrs,
298950013115SPeter Maydell                                                   MemTxResult *result,
29901e78bcc1SAlexander Graf                                                   enum device_endian endian)
29918df1cd07Sbellard {
29928df1cd07Sbellard     uint8_t *ptr;
2993791af8c8SPaolo Bonzini     uint64_t val;
29945c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2995149f54b5SPaolo Bonzini     hwaddr l = 4;
2996149f54b5SPaolo Bonzini     hwaddr addr1;
299750013115SPeter Maydell     MemTxResult r;
29984840f10eSJan Kiszka     bool release_lock = false;
29998df1cd07Sbellard 
300041063e1eSPaolo Bonzini     rcu_read_lock();
3001fdfba1a2SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, false);
30025c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
30034840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3004125b3806SPaolo Bonzini 
30058df1cd07Sbellard         /* I/O case */
300650013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
30071e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
30081e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
30091e78bcc1SAlexander Graf             val = bswap32(val);
30101e78bcc1SAlexander Graf         }
30111e78bcc1SAlexander Graf #else
30121e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
30131e78bcc1SAlexander Graf             val = bswap32(val);
30141e78bcc1SAlexander Graf         }
30151e78bcc1SAlexander Graf #endif
30168df1cd07Sbellard     } else {
30178df1cd07Sbellard         /* RAM case */
30185c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
301906ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3020149f54b5SPaolo Bonzini                                + addr1);
30211e78bcc1SAlexander Graf         switch (endian) {
30221e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
30231e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
30241e78bcc1SAlexander Graf             break;
30251e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
30261e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
30271e78bcc1SAlexander Graf             break;
30281e78bcc1SAlexander Graf         default:
30298df1cd07Sbellard             val = ldl_p(ptr);
30301e78bcc1SAlexander Graf             break;
30311e78bcc1SAlexander Graf         }
303250013115SPeter Maydell         r = MEMTX_OK;
303350013115SPeter Maydell     }
303450013115SPeter Maydell     if (result) {
303550013115SPeter Maydell         *result = r;
30368df1cd07Sbellard     }
30374840f10eSJan Kiszka     if (release_lock) {
30384840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
30394840f10eSJan Kiszka     }
304041063e1eSPaolo Bonzini     rcu_read_unlock();
30418df1cd07Sbellard     return val;
30428df1cd07Sbellard }
30438df1cd07Sbellard 
304450013115SPeter Maydell uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
304550013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
304650013115SPeter Maydell {
304750013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
304850013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
304950013115SPeter Maydell }
305050013115SPeter Maydell 
305150013115SPeter Maydell uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
305250013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
305350013115SPeter Maydell {
305450013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
305550013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
305650013115SPeter Maydell }
305750013115SPeter Maydell 
305850013115SPeter Maydell uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
305950013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
306050013115SPeter Maydell {
306150013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
306250013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
306350013115SPeter Maydell }
306450013115SPeter Maydell 
3065fdfba1a2SEdgar E. Iglesias uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
30661e78bcc1SAlexander Graf {
306750013115SPeter Maydell     return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30681e78bcc1SAlexander Graf }
30691e78bcc1SAlexander Graf 
3070fdfba1a2SEdgar E. Iglesias uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
30711e78bcc1SAlexander Graf {
307250013115SPeter Maydell     return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30731e78bcc1SAlexander Graf }
30741e78bcc1SAlexander Graf 
3075fdfba1a2SEdgar E. Iglesias uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
30761e78bcc1SAlexander Graf {
307750013115SPeter Maydell     return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30781e78bcc1SAlexander Graf }
30791e78bcc1SAlexander Graf 
308084b7b8e7Sbellard /* warning: addr must be aligned */
308150013115SPeter Maydell static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
308250013115SPeter Maydell                                                   MemTxAttrs attrs,
308350013115SPeter Maydell                                                   MemTxResult *result,
30841e78bcc1SAlexander Graf                                                   enum device_endian endian)
308584b7b8e7Sbellard {
308684b7b8e7Sbellard     uint8_t *ptr;
308784b7b8e7Sbellard     uint64_t val;
30885c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3089149f54b5SPaolo Bonzini     hwaddr l = 8;
3090149f54b5SPaolo Bonzini     hwaddr addr1;
309150013115SPeter Maydell     MemTxResult r;
30924840f10eSJan Kiszka     bool release_lock = false;
309384b7b8e7Sbellard 
309441063e1eSPaolo Bonzini     rcu_read_lock();
30952c17449bSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3096149f54b5SPaolo Bonzini                                  false);
30975c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
30984840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3099125b3806SPaolo Bonzini 
310084b7b8e7Sbellard         /* I/O case */
310150013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3102968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
3103968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
3104968a5627SPaolo Bonzini             val = bswap64(val);
3105968a5627SPaolo Bonzini         }
3106968a5627SPaolo Bonzini #else
3107968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
3108968a5627SPaolo Bonzini             val = bswap64(val);
3109968a5627SPaolo Bonzini         }
3110968a5627SPaolo Bonzini #endif
311184b7b8e7Sbellard     } else {
311284b7b8e7Sbellard         /* RAM case */
31135c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
311406ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3115149f54b5SPaolo Bonzini                                + addr1);
31161e78bcc1SAlexander Graf         switch (endian) {
31171e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
31181e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
31191e78bcc1SAlexander Graf             break;
31201e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
31211e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
31221e78bcc1SAlexander Graf             break;
31231e78bcc1SAlexander Graf         default:
312484b7b8e7Sbellard             val = ldq_p(ptr);
31251e78bcc1SAlexander Graf             break;
31261e78bcc1SAlexander Graf         }
312750013115SPeter Maydell         r = MEMTX_OK;
312850013115SPeter Maydell     }
312950013115SPeter Maydell     if (result) {
313050013115SPeter Maydell         *result = r;
313184b7b8e7Sbellard     }
31324840f10eSJan Kiszka     if (release_lock) {
31334840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
31344840f10eSJan Kiszka     }
313541063e1eSPaolo Bonzini     rcu_read_unlock();
313684b7b8e7Sbellard     return val;
313784b7b8e7Sbellard }
313884b7b8e7Sbellard 
313950013115SPeter Maydell uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
314050013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
314150013115SPeter Maydell {
314250013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
314350013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
314450013115SPeter Maydell }
314550013115SPeter Maydell 
314650013115SPeter Maydell uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
314750013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
314850013115SPeter Maydell {
314950013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
315050013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
315150013115SPeter Maydell }
315250013115SPeter Maydell 
315350013115SPeter Maydell uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
315450013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
315550013115SPeter Maydell {
315650013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
315750013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
315850013115SPeter Maydell }
315950013115SPeter Maydell 
31602c17449bSEdgar E. Iglesias uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
31611e78bcc1SAlexander Graf {
316250013115SPeter Maydell     return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31631e78bcc1SAlexander Graf }
31641e78bcc1SAlexander Graf 
31652c17449bSEdgar E. Iglesias uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
31661e78bcc1SAlexander Graf {
316750013115SPeter Maydell     return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31681e78bcc1SAlexander Graf }
31691e78bcc1SAlexander Graf 
31702c17449bSEdgar E. Iglesias uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
31711e78bcc1SAlexander Graf {
317250013115SPeter Maydell     return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31731e78bcc1SAlexander Graf }
31741e78bcc1SAlexander Graf 
3175aab33094Sbellard /* XXX: optimize */
317650013115SPeter Maydell uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
317750013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result)
3178aab33094Sbellard {
3179aab33094Sbellard     uint8_t val;
318050013115SPeter Maydell     MemTxResult r;
318150013115SPeter Maydell 
318250013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &val, 1, 0);
318350013115SPeter Maydell     if (result) {
318450013115SPeter Maydell         *result = r;
318550013115SPeter Maydell     }
3186aab33094Sbellard     return val;
3187aab33094Sbellard }
3188aab33094Sbellard 
318950013115SPeter Maydell uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
319050013115SPeter Maydell {
319150013115SPeter Maydell     return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
319250013115SPeter Maydell }
319350013115SPeter Maydell 
3194733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
319550013115SPeter Maydell static inline uint32_t address_space_lduw_internal(AddressSpace *as,
319650013115SPeter Maydell                                                    hwaddr addr,
319750013115SPeter Maydell                                                    MemTxAttrs attrs,
319850013115SPeter Maydell                                                    MemTxResult *result,
31991e78bcc1SAlexander Graf                                                    enum device_endian endian)
3200aab33094Sbellard {
3201733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3202733f0b02SMichael S. Tsirkin     uint64_t val;
32035c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3204149f54b5SPaolo Bonzini     hwaddr l = 2;
3205149f54b5SPaolo Bonzini     hwaddr addr1;
320650013115SPeter Maydell     MemTxResult r;
32074840f10eSJan Kiszka     bool release_lock = false;
3208733f0b02SMichael S. Tsirkin 
320941063e1eSPaolo Bonzini     rcu_read_lock();
321041701aa4SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3211149f54b5SPaolo Bonzini                                  false);
32125c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
32134840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3214125b3806SPaolo Bonzini 
3215733f0b02SMichael S. Tsirkin         /* I/O case */
321650013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
32171e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
32181e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
32191e78bcc1SAlexander Graf             val = bswap16(val);
32201e78bcc1SAlexander Graf         }
32211e78bcc1SAlexander Graf #else
32221e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
32231e78bcc1SAlexander Graf             val = bswap16(val);
32241e78bcc1SAlexander Graf         }
32251e78bcc1SAlexander Graf #endif
3226733f0b02SMichael S. Tsirkin     } else {
3227733f0b02SMichael S. Tsirkin         /* RAM case */
32285c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
322906ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3230149f54b5SPaolo Bonzini                                + addr1);
32311e78bcc1SAlexander Graf         switch (endian) {
32321e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
32331e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
32341e78bcc1SAlexander Graf             break;
32351e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
32361e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
32371e78bcc1SAlexander Graf             break;
32381e78bcc1SAlexander Graf         default:
3239733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
32401e78bcc1SAlexander Graf             break;
32411e78bcc1SAlexander Graf         }
324250013115SPeter Maydell         r = MEMTX_OK;
324350013115SPeter Maydell     }
324450013115SPeter Maydell     if (result) {
324550013115SPeter Maydell         *result = r;
3246733f0b02SMichael S. Tsirkin     }
32474840f10eSJan Kiszka     if (release_lock) {
32484840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
32494840f10eSJan Kiszka     }
325041063e1eSPaolo Bonzini     rcu_read_unlock();
3251733f0b02SMichael S. Tsirkin     return val;
3252aab33094Sbellard }
3253aab33094Sbellard 
325450013115SPeter Maydell uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
325550013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
325650013115SPeter Maydell {
325750013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
325850013115SPeter Maydell                                        DEVICE_NATIVE_ENDIAN);
325950013115SPeter Maydell }
326050013115SPeter Maydell 
326150013115SPeter Maydell uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
326250013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
326350013115SPeter Maydell {
326450013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
326550013115SPeter Maydell                                        DEVICE_LITTLE_ENDIAN);
326650013115SPeter Maydell }
326750013115SPeter Maydell 
326850013115SPeter Maydell uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
326950013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
327050013115SPeter Maydell {
327150013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
327250013115SPeter Maydell                                        DEVICE_BIG_ENDIAN);
327350013115SPeter Maydell }
327450013115SPeter Maydell 
327541701aa4SEdgar E. Iglesias uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
32761e78bcc1SAlexander Graf {
327750013115SPeter Maydell     return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32781e78bcc1SAlexander Graf }
32791e78bcc1SAlexander Graf 
328041701aa4SEdgar E. Iglesias uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
32811e78bcc1SAlexander Graf {
328250013115SPeter Maydell     return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32831e78bcc1SAlexander Graf }
32841e78bcc1SAlexander Graf 
328541701aa4SEdgar E. Iglesias uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
32861e78bcc1SAlexander Graf {
328750013115SPeter Maydell     return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32881e78bcc1SAlexander Graf }
32891e78bcc1SAlexander Graf 
32908df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
32918df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
32928df1cd07Sbellard    bits are used to track modified PTEs */
329350013115SPeter Maydell void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
329450013115SPeter Maydell                                 MemTxAttrs attrs, MemTxResult *result)
32958df1cd07Sbellard {
32968df1cd07Sbellard     uint8_t *ptr;
32975c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3298149f54b5SPaolo Bonzini     hwaddr l = 4;
3299149f54b5SPaolo Bonzini     hwaddr addr1;
330050013115SPeter Maydell     MemTxResult r;
3301845b6214SPaolo Bonzini     uint8_t dirty_log_mask;
33024840f10eSJan Kiszka     bool release_lock = false;
33038df1cd07Sbellard 
330441063e1eSPaolo Bonzini     rcu_read_lock();
33052198a121SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3306149f54b5SPaolo Bonzini                                  true);
33075c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
33084840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3309125b3806SPaolo Bonzini 
331050013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
33118df1cd07Sbellard     } else {
33125c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
33135579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
33148df1cd07Sbellard         stl_p(ptr, val);
331574576198Saliguori 
3316845b6214SPaolo Bonzini         dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3317845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
331858d2707eSPaolo Bonzini         cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
331950013115SPeter Maydell         r = MEMTX_OK;
332050013115SPeter Maydell     }
332150013115SPeter Maydell     if (result) {
332250013115SPeter Maydell         *result = r;
33238df1cd07Sbellard     }
33244840f10eSJan Kiszka     if (release_lock) {
33254840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
33264840f10eSJan Kiszka     }
332741063e1eSPaolo Bonzini     rcu_read_unlock();
33288df1cd07Sbellard }
33298df1cd07Sbellard 
333050013115SPeter Maydell void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
333150013115SPeter Maydell {
333250013115SPeter Maydell     address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
333350013115SPeter Maydell }
333450013115SPeter Maydell 
33358df1cd07Sbellard /* warning: addr must be aligned */
333650013115SPeter Maydell static inline void address_space_stl_internal(AddressSpace *as,
3337ab1da857SEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
333850013115SPeter Maydell                                               MemTxAttrs attrs,
333950013115SPeter Maydell                                               MemTxResult *result,
33401e78bcc1SAlexander Graf                                               enum device_endian endian)
33418df1cd07Sbellard {
33428df1cd07Sbellard     uint8_t *ptr;
33435c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3344149f54b5SPaolo Bonzini     hwaddr l = 4;
3345149f54b5SPaolo Bonzini     hwaddr addr1;
334650013115SPeter Maydell     MemTxResult r;
33474840f10eSJan Kiszka     bool release_lock = false;
33488df1cd07Sbellard 
334941063e1eSPaolo Bonzini     rcu_read_lock();
3350ab1da857SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3351149f54b5SPaolo Bonzini                                  true);
33525c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
33534840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3354125b3806SPaolo Bonzini 
33551e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
33561e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
33571e78bcc1SAlexander Graf             val = bswap32(val);
33581e78bcc1SAlexander Graf         }
33591e78bcc1SAlexander Graf #else
33601e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
33611e78bcc1SAlexander Graf             val = bswap32(val);
33621e78bcc1SAlexander Graf         }
33631e78bcc1SAlexander Graf #endif
336450013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
33658df1cd07Sbellard     } else {
33668df1cd07Sbellard         /* RAM case */
33675c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
33685579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
33691e78bcc1SAlexander Graf         switch (endian) {
33701e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
33711e78bcc1SAlexander Graf             stl_le_p(ptr, val);
33721e78bcc1SAlexander Graf             break;
33731e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
33741e78bcc1SAlexander Graf             stl_be_p(ptr, val);
33751e78bcc1SAlexander Graf             break;
33761e78bcc1SAlexander Graf         default:
33778df1cd07Sbellard             stl_p(ptr, val);
33781e78bcc1SAlexander Graf             break;
33791e78bcc1SAlexander Graf         }
3380845b6214SPaolo Bonzini         invalidate_and_set_dirty(mr, addr1, 4);
338150013115SPeter Maydell         r = MEMTX_OK;
33828df1cd07Sbellard     }
338350013115SPeter Maydell     if (result) {
338450013115SPeter Maydell         *result = r;
338550013115SPeter Maydell     }
33864840f10eSJan Kiszka     if (release_lock) {
33874840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
33884840f10eSJan Kiszka     }
338941063e1eSPaolo Bonzini     rcu_read_unlock();
339050013115SPeter Maydell }
339150013115SPeter Maydell 
339250013115SPeter Maydell void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
339350013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
339450013115SPeter Maydell {
339550013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
339650013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
339750013115SPeter Maydell }
339850013115SPeter Maydell 
339950013115SPeter Maydell void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
340050013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
340150013115SPeter Maydell {
340250013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
340350013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
340450013115SPeter Maydell }
340550013115SPeter Maydell 
340650013115SPeter Maydell void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
340750013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
340850013115SPeter Maydell {
340950013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
341050013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
34113a7d929eSbellard }
34128df1cd07Sbellard 
3413ab1da857SEdgar E. Iglesias void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34141e78bcc1SAlexander Graf {
341550013115SPeter Maydell     address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34161e78bcc1SAlexander Graf }
34171e78bcc1SAlexander Graf 
3418ab1da857SEdgar E. Iglesias void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34191e78bcc1SAlexander Graf {
342050013115SPeter Maydell     address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34211e78bcc1SAlexander Graf }
34221e78bcc1SAlexander Graf 
3423ab1da857SEdgar E. Iglesias void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34241e78bcc1SAlexander Graf {
342550013115SPeter Maydell     address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34261e78bcc1SAlexander Graf }
34271e78bcc1SAlexander Graf 
3428aab33094Sbellard /* XXX: optimize */
342950013115SPeter Maydell void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
343050013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
3431aab33094Sbellard {
3432aab33094Sbellard     uint8_t v = val;
343350013115SPeter Maydell     MemTxResult r;
343450013115SPeter Maydell 
343550013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &v, 1, 1);
343650013115SPeter Maydell     if (result) {
343750013115SPeter Maydell         *result = r;
343850013115SPeter Maydell     }
343950013115SPeter Maydell }
344050013115SPeter Maydell 
344150013115SPeter Maydell void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
344250013115SPeter Maydell {
344350013115SPeter Maydell     address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3444aab33094Sbellard }
3445aab33094Sbellard 
3446733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
344750013115SPeter Maydell static inline void address_space_stw_internal(AddressSpace *as,
34485ce5944dSEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
344950013115SPeter Maydell                                               MemTxAttrs attrs,
345050013115SPeter Maydell                                               MemTxResult *result,
34511e78bcc1SAlexander Graf                                               enum device_endian endian)
3452aab33094Sbellard {
3453733f0b02SMichael S. Tsirkin     uint8_t *ptr;
34545c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3455149f54b5SPaolo Bonzini     hwaddr l = 2;
3456149f54b5SPaolo Bonzini     hwaddr addr1;
345750013115SPeter Maydell     MemTxResult r;
34584840f10eSJan Kiszka     bool release_lock = false;
3459733f0b02SMichael S. Tsirkin 
346041063e1eSPaolo Bonzini     rcu_read_lock();
34615ce5944dSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, true);
34625c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
34634840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3464125b3806SPaolo Bonzini 
34651e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
34661e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
34671e78bcc1SAlexander Graf             val = bswap16(val);
34681e78bcc1SAlexander Graf         }
34691e78bcc1SAlexander Graf #else
34701e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
34711e78bcc1SAlexander Graf             val = bswap16(val);
34721e78bcc1SAlexander Graf         }
34731e78bcc1SAlexander Graf #endif
347450013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3475733f0b02SMichael S. Tsirkin     } else {
3476733f0b02SMichael S. Tsirkin         /* RAM case */
34775c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3478733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
34791e78bcc1SAlexander Graf         switch (endian) {
34801e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
34811e78bcc1SAlexander Graf             stw_le_p(ptr, val);
34821e78bcc1SAlexander Graf             break;
34831e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
34841e78bcc1SAlexander Graf             stw_be_p(ptr, val);
34851e78bcc1SAlexander Graf             break;
34861e78bcc1SAlexander Graf         default:
3487733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
34881e78bcc1SAlexander Graf             break;
34891e78bcc1SAlexander Graf         }
3490845b6214SPaolo Bonzini         invalidate_and_set_dirty(mr, addr1, 2);
349150013115SPeter Maydell         r = MEMTX_OK;
3492733f0b02SMichael S. Tsirkin     }
349350013115SPeter Maydell     if (result) {
349450013115SPeter Maydell         *result = r;
349550013115SPeter Maydell     }
34964840f10eSJan Kiszka     if (release_lock) {
34974840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
34984840f10eSJan Kiszka     }
349941063e1eSPaolo Bonzini     rcu_read_unlock();
350050013115SPeter Maydell }
350150013115SPeter Maydell 
350250013115SPeter Maydell void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
350350013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
350450013115SPeter Maydell {
350550013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
350650013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
350750013115SPeter Maydell }
350850013115SPeter Maydell 
350950013115SPeter Maydell void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
351050013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
351150013115SPeter Maydell {
351250013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
351350013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
351450013115SPeter Maydell }
351550013115SPeter Maydell 
351650013115SPeter Maydell void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
351750013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
351850013115SPeter Maydell {
351950013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
352050013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
3521aab33094Sbellard }
3522aab33094Sbellard 
35235ce5944dSEdgar E. Iglesias void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35241e78bcc1SAlexander Graf {
352550013115SPeter Maydell     address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35261e78bcc1SAlexander Graf }
35271e78bcc1SAlexander Graf 
35285ce5944dSEdgar E. Iglesias void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35291e78bcc1SAlexander Graf {
353050013115SPeter Maydell     address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35311e78bcc1SAlexander Graf }
35321e78bcc1SAlexander Graf 
35335ce5944dSEdgar E. Iglesias void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35341e78bcc1SAlexander Graf {
353550013115SPeter Maydell     address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35361e78bcc1SAlexander Graf }
35371e78bcc1SAlexander Graf 
3538aab33094Sbellard /* XXX: optimize */
353950013115SPeter Maydell void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
354050013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
354150013115SPeter Maydell {
354250013115SPeter Maydell     MemTxResult r;
354350013115SPeter Maydell     val = tswap64(val);
354450013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
354550013115SPeter Maydell     if (result) {
354650013115SPeter Maydell         *result = r;
354750013115SPeter Maydell     }
354850013115SPeter Maydell }
354950013115SPeter Maydell 
355050013115SPeter Maydell void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
355150013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
355250013115SPeter Maydell {
355350013115SPeter Maydell     MemTxResult r;
355450013115SPeter Maydell     val = cpu_to_le64(val);
355550013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
355650013115SPeter Maydell     if (result) {
355750013115SPeter Maydell         *result = r;
355850013115SPeter Maydell     }
355950013115SPeter Maydell }
356050013115SPeter Maydell void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
356150013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
356250013115SPeter Maydell {
356350013115SPeter Maydell     MemTxResult r;
356450013115SPeter Maydell     val = cpu_to_be64(val);
356550013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
356650013115SPeter Maydell     if (result) {
356750013115SPeter Maydell         *result = r;
356850013115SPeter Maydell     }
356950013115SPeter Maydell }
357050013115SPeter Maydell 
3571f606604fSEdgar E. Iglesias void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3572aab33094Sbellard {
357350013115SPeter Maydell     address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3574aab33094Sbellard }
3575aab33094Sbellard 
3576f606604fSEdgar E. Iglesias void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
35771e78bcc1SAlexander Graf {
357850013115SPeter Maydell     address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35791e78bcc1SAlexander Graf }
35801e78bcc1SAlexander Graf 
3581f606604fSEdgar E. Iglesias void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
35821e78bcc1SAlexander Graf {
358350013115SPeter Maydell     address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35841e78bcc1SAlexander Graf }
35851e78bcc1SAlexander Graf 
35865e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
3587f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3588b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
358913eb76e0Sbellard {
359013eb76e0Sbellard     int l;
3591a8170e5eSAvi Kivity     hwaddr phys_addr;
35929b3c35e0Sj_mayer     target_ulong page;
359313eb76e0Sbellard 
359413eb76e0Sbellard     while (len > 0) {
35955232e4c7SPeter Maydell         int asidx;
35965232e4c7SPeter Maydell         MemTxAttrs attrs;
35975232e4c7SPeter Maydell 
359813eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
35995232e4c7SPeter Maydell         phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
36005232e4c7SPeter Maydell         asidx = cpu_asidx_from_attrs(cpu, attrs);
360113eb76e0Sbellard         /* if no physical page mapped, return an error */
360213eb76e0Sbellard         if (phys_addr == -1)
360313eb76e0Sbellard             return -1;
360413eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
360513eb76e0Sbellard         if (l > len)
360613eb76e0Sbellard             l = len;
36075e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
36082e38847bSEdgar E. Iglesias         if (is_write) {
36095232e4c7SPeter Maydell             cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
36105232e4c7SPeter Maydell                                           phys_addr, buf, l);
36112e38847bSEdgar E. Iglesias         } else {
36125232e4c7SPeter Maydell             address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
36135232e4c7SPeter Maydell                              MEMTXATTRS_UNSPECIFIED,
36145c9eb028SPeter Maydell                              buf, l, 0);
36152e38847bSEdgar E. Iglesias         }
361613eb76e0Sbellard         len -= l;
361713eb76e0Sbellard         buf += l;
361813eb76e0Sbellard         addr += l;
361913eb76e0Sbellard     }
362013eb76e0Sbellard     return 0;
362113eb76e0Sbellard }
3622038629a6SDr. David Alan Gilbert 
3623038629a6SDr. David Alan Gilbert /*
3624038629a6SDr. David Alan Gilbert  * Allows code that needs to deal with migration bitmaps etc to still be built
3625038629a6SDr. David Alan Gilbert  * target independent.
3626038629a6SDr. David Alan Gilbert  */
3627038629a6SDr. David Alan Gilbert size_t qemu_target_page_bits(void)
3628038629a6SDr. David Alan Gilbert {
3629038629a6SDr. David Alan Gilbert     return TARGET_PAGE_BITS;
3630038629a6SDr. David Alan Gilbert }
3631038629a6SDr. David Alan Gilbert 
3632a68fe89cSPaul Brook #endif
363313eb76e0Sbellard 
36348e4a424bSBlue Swirl /*
36358e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
36368e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
36378e4a424bSBlue Swirl  */
363898ed8ecfSGreg Kurz bool target_words_bigendian(void);
363998ed8ecfSGreg Kurz bool target_words_bigendian(void)
36408e4a424bSBlue Swirl {
36418e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
36428e4a424bSBlue Swirl     return true;
36438e4a424bSBlue Swirl #else
36448e4a424bSBlue Swirl     return false;
36458e4a424bSBlue Swirl #endif
36468e4a424bSBlue Swirl }
36478e4a424bSBlue Swirl 
364876f35538SWen Congyang #ifndef CONFIG_USER_ONLY
3649a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
365076f35538SWen Congyang {
36515c8a00ceSPaolo Bonzini     MemoryRegion*mr;
3652149f54b5SPaolo Bonzini     hwaddr l = 1;
365341063e1eSPaolo Bonzini     bool res;
365476f35538SWen Congyang 
365541063e1eSPaolo Bonzini     rcu_read_lock();
36565c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
3657149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
365876f35538SWen Congyang 
365941063e1eSPaolo Bonzini     res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
366041063e1eSPaolo Bonzini     rcu_read_unlock();
366141063e1eSPaolo Bonzini     return res;
366276f35538SWen Congyang }
3663bd2fa51fSMichael R. Hines 
3664e3807054SDr. David Alan Gilbert int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3665bd2fa51fSMichael R. Hines {
3666bd2fa51fSMichael R. Hines     RAMBlock *block;
3667e3807054SDr. David Alan Gilbert     int ret = 0;
3668bd2fa51fSMichael R. Hines 
36690dc3f44aSMike Day     rcu_read_lock();
36700dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3671e3807054SDr. David Alan Gilbert         ret = func(block->idstr, block->host, block->offset,
3672e3807054SDr. David Alan Gilbert                    block->used_length, opaque);
3673e3807054SDr. David Alan Gilbert         if (ret) {
3674e3807054SDr. David Alan Gilbert             break;
3675e3807054SDr. David Alan Gilbert         }
3676bd2fa51fSMichael R. Hines     }
36770dc3f44aSMike Day     rcu_read_unlock();
3678e3807054SDr. David Alan Gilbert     return ret;
3679bd2fa51fSMichael R. Hines }
3680ec3f8c99SPeter Maydell #endif
3681