xref: /qemu/system/physmem.c (revision 7bbc124e7e8fb544288ccd1f5185643a7d0554b8)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
197b31bbc2SPeter Maydell #include "qemu/osdep.h"
20da34e65cSMarkus Armbruster #include "qapi/error.h"
21777872e5SStefan Weil #ifndef _WIN32
22d5a8f07cSbellard #endif
2354936004Sbellard 
24f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
256180a181Sbellard #include "cpu.h"
2663c91552SPaolo Bonzini #include "exec/exec-all.h"
27b67d9a52Sbellard #include "tcg.h"
28741da0d3SPaolo Bonzini #include "hw/qdev-core.h"
294485bd26SMichael S. Tsirkin #if !defined(CONFIG_USER_ONLY)
3047c8ca53SMarcel Apfelbaum #include "hw/boards.h"
3133c11879SPaolo Bonzini #include "hw/xen/xen.h"
324485bd26SMichael S. Tsirkin #endif
339c17d615SPaolo Bonzini #include "sysemu/kvm.h"
342ff3de68SMarkus Armbruster #include "sysemu/sysemu.h"
351de7afc9SPaolo Bonzini #include "qemu/timer.h"
361de7afc9SPaolo Bonzini #include "qemu/config-file.h"
3775a34036SAndreas Färber #include "qemu/error-report.h"
3853a5960aSpbrook #if defined(CONFIG_USER_ONLY)
39a9c94277SMarkus Armbruster #include "qemu.h"
40432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
41741da0d3SPaolo Bonzini #include "hw/hw.h"
42741da0d3SPaolo Bonzini #include "exec/memory.h"
43df43d49cSPaolo Bonzini #include "exec/ioport.h"
44741da0d3SPaolo Bonzini #include "sysemu/dma.h"
45741da0d3SPaolo Bonzini #include "exec/address-spaces.h"
469c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
476506e4f9SStefano Stabellini #include "trace.h"
4853a5960aSpbrook #endif
490d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
500dc3f44aSMike Day #include "qemu/rcu_queue.h"
514840f10eSJan Kiszka #include "qemu/main-loop.h"
525b6dd868SBlue Swirl #include "translate-all.h"
537615936eSPavel Dovgalyuk #include "sysemu/replay.h"
540cac1b66SBlue Swirl 
55022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
56220c3ebdSJuan Quintela #include "exec/ram_addr.h"
57508127e2SPaolo Bonzini #include "exec/log.h"
5867d95c15SAvi Kivity 
599dfeca7cSBharata B Rao #include "migration/vmstate.h"
609dfeca7cSBharata B Rao 
61b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
62794e8f30SMichael S. Tsirkin #ifndef _WIN32
63794e8f30SMichael S. Tsirkin #include "qemu/mmap-alloc.h"
64794e8f30SMichael S. Tsirkin #endif
65b35ba30fSMichael S. Tsirkin 
66db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
671196be37Sths 
6899773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
690dc3f44aSMike Day /* ram_list is read under rcu_read_lock()/rcu_read_unlock().  Writes
700dc3f44aSMike Day  * are protected by the ramlist lock.
710dc3f44aSMike Day  */
720d53d9feSMike Day RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
7362152b8aSAvi Kivity 
7462152b8aSAvi Kivity static MemoryRegion *system_memory;
75309cb471SAvi Kivity static MemoryRegion *system_io;
7662152b8aSAvi Kivity 
77f6790af6SAvi Kivity AddressSpace address_space_io;
78f6790af6SAvi Kivity AddressSpace address_space_memory;
792673a5daSAvi Kivity 
800844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
81acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
820e0df1e2SAvi Kivity 
837bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
847bd4f430SPaolo Bonzini #define RAM_PREALLOC   (1 << 0)
857bd4f430SPaolo Bonzini 
86dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */
87dbcb8981SPaolo Bonzini #define RAM_SHARED     (1 << 1)
88dbcb8981SPaolo Bonzini 
8962be4e3aSMichael S. Tsirkin /* Only a portion of RAM (used_length) is actually used, and migrated.
9062be4e3aSMichael S. Tsirkin  * This used_length size can change across reboots.
9162be4e3aSMichael S. Tsirkin  */
9262be4e3aSMichael S. Tsirkin #define RAM_RESIZEABLE (1 << 2)
9362be4e3aSMichael S. Tsirkin 
94e2eef170Spbrook #endif
959fa3e853Sbellard 
96bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
976a00d601Sbellard /* current CPU in the current thread. It is only valid inside
986a00d601Sbellard    cpu_exec() */
99f240eb6fSPaolo Bonzini __thread CPUState *current_cpu;
1002e70f6efSpbrook /* 0 = Do not count executed instructions.
101bf20dc07Sths    1 = Precise instruction counting.
1022e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1035708fc66SPaolo Bonzini int use_icount;
1046a00d601Sbellard 
105e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1064346ae3eSAvi Kivity 
1071db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
1081db8abb1SPaolo Bonzini 
1091db8abb1SPaolo Bonzini struct PhysPageEntry {
1109736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
1118b795765SMichael S. Tsirkin     uint32_t skip : 6;
1129736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
1138b795765SMichael S. Tsirkin     uint32_t ptr : 26;
1141db8abb1SPaolo Bonzini };
1151db8abb1SPaolo Bonzini 
1168b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
1178b795765SMichael S. Tsirkin 
11803f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
11957271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
12003f49957SPaolo Bonzini 
121026736ceSMichael S. Tsirkin #define P_L2_BITS 9
12203f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
12303f49957SPaolo Bonzini 
12403f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
12503f49957SPaolo Bonzini 
12603f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1270475d94fSPaolo Bonzini 
12853cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
12979e2b9aeSPaolo Bonzini     struct rcu_head rcu;
13079e2b9aeSPaolo Bonzini 
13153cb28cbSMarcel Apfelbaum     unsigned sections_nb;
13253cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
13353cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
13453cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
13553cb28cbSMarcel Apfelbaum     Node *nodes;
13653cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
13753cb28cbSMarcel Apfelbaum } PhysPageMap;
13853cb28cbSMarcel Apfelbaum 
1391db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
14079e2b9aeSPaolo Bonzini     struct rcu_head rcu;
14179e2b9aeSPaolo Bonzini 
142729633c2SFam Zheng     MemoryRegionSection *mru_section;
1431db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1441db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1451db8abb1SPaolo Bonzini      */
1461db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
14753cb28cbSMarcel Apfelbaum     PhysPageMap map;
148acc9d80bSJan Kiszka     AddressSpace *as;
1491db8abb1SPaolo Bonzini };
1501db8abb1SPaolo Bonzini 
15190260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
15290260c6cSJan Kiszka typedef struct subpage_t {
15390260c6cSJan Kiszka     MemoryRegion iomem;
154acc9d80bSJan Kiszka     AddressSpace *as;
15590260c6cSJan Kiszka     hwaddr base;
15690260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
15790260c6cSJan Kiszka } subpage_t;
15890260c6cSJan Kiszka 
159b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
160b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1
161b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2
162b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3
1635312bd8bSAvi Kivity 
164e2eef170Spbrook static void io_mem_init(void);
16562152b8aSAvi Kivity static void memory_map_init(void);
16609daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener);
167e2eef170Spbrook 
1681ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
16932857f4dSPeter Maydell 
17032857f4dSPeter Maydell /**
17132857f4dSPeter Maydell  * CPUAddressSpace: all the information a CPU needs about an AddressSpace
17232857f4dSPeter Maydell  * @cpu: the CPU whose AddressSpace this is
17332857f4dSPeter Maydell  * @as: the AddressSpace itself
17432857f4dSPeter Maydell  * @memory_dispatch: its dispatch pointer (cached, RCU protected)
17532857f4dSPeter Maydell  * @tcg_as_listener: listener for tracking changes to the AddressSpace
17632857f4dSPeter Maydell  */
17732857f4dSPeter Maydell struct CPUAddressSpace {
17832857f4dSPeter Maydell     CPUState *cpu;
17932857f4dSPeter Maydell     AddressSpace *as;
18032857f4dSPeter Maydell     struct AddressSpaceDispatch *memory_dispatch;
18132857f4dSPeter Maydell     MemoryListener tcg_as_listener;
18232857f4dSPeter Maydell };
18332857f4dSPeter Maydell 
1846658ffb8Spbrook #endif
18554936004Sbellard 
1866d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
187d6f2ea22SAvi Kivity 
18853cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
189f7bf5461SAvi Kivity {
190101420b8SPeter Lieven     static unsigned alloc_hint = 16;
19153cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
192101420b8SPeter Lieven         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
19353cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
19453cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
195101420b8SPeter Lieven         alloc_hint = map->nodes_nb_alloc;
196f7bf5461SAvi Kivity     }
197f7bf5461SAvi Kivity }
198f7bf5461SAvi Kivity 
199db94604bSPaolo Bonzini static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
200d6f2ea22SAvi Kivity {
201d6f2ea22SAvi Kivity     unsigned i;
2028b795765SMichael S. Tsirkin     uint32_t ret;
203db94604bSPaolo Bonzini     PhysPageEntry e;
204db94604bSPaolo Bonzini     PhysPageEntry *p;
205d6f2ea22SAvi Kivity 
20653cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
207db94604bSPaolo Bonzini     p = map->nodes[ret];
208d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
20953cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
210db94604bSPaolo Bonzini 
211db94604bSPaolo Bonzini     e.skip = leaf ? 0 : 1;
212db94604bSPaolo Bonzini     e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
21303f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
214db94604bSPaolo Bonzini         memcpy(&p[i], &e, sizeof(e));
215d6f2ea22SAvi Kivity     }
216f7bf5461SAvi Kivity     return ret;
217d6f2ea22SAvi Kivity }
218d6f2ea22SAvi Kivity 
21953cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
22053cb28cbSMarcel Apfelbaum                                 hwaddr *index, hwaddr *nb, uint16_t leaf,
2212999097bSAvi Kivity                                 int level)
22292e873b9Sbellard {
223f7bf5461SAvi Kivity     PhysPageEntry *p;
22403f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
2255cd2c5b6SRichard Henderson 
2269736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
227db94604bSPaolo Bonzini         lp->ptr = phys_map_node_alloc(map, level == 0);
228db94604bSPaolo Bonzini     }
22953cb28cbSMarcel Apfelbaum     p = map->nodes[lp->ptr];
23003f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
231f7bf5461SAvi Kivity 
23203f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
23307f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2349736e55bSMichael S. Tsirkin             lp->skip = 0;
235c19e8800SAvi Kivity             lp->ptr = leaf;
23607f07b31SAvi Kivity             *index += step;
23707f07b31SAvi Kivity             *nb -= step;
238f7bf5461SAvi Kivity         } else {
23953cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2402999097bSAvi Kivity         }
2412999097bSAvi Kivity         ++lp;
242f7bf5461SAvi Kivity     }
2434346ae3eSAvi Kivity }
2445cd2c5b6SRichard Henderson 
245ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
246a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2472999097bSAvi Kivity                           uint16_t leaf)
248f7bf5461SAvi Kivity {
2492999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
25053cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
251f7bf5461SAvi Kivity 
25253cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
25392e873b9Sbellard }
25492e873b9Sbellard 
255b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
256b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
257b35ba30fSMichael S. Tsirkin  */
258efee678dSMarc-André Lureau static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
259b35ba30fSMichael S. Tsirkin {
260b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
261b35ba30fSMichael S. Tsirkin     int valid = 0;
262b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
263b35ba30fSMichael S. Tsirkin     int i;
264b35ba30fSMichael S. Tsirkin 
265b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
266b35ba30fSMichael S. Tsirkin         return;
267b35ba30fSMichael S. Tsirkin     }
268b35ba30fSMichael S. Tsirkin 
269b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
270b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
271b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
272b35ba30fSMichael S. Tsirkin             continue;
273b35ba30fSMichael S. Tsirkin         }
274b35ba30fSMichael S. Tsirkin 
275b35ba30fSMichael S. Tsirkin         valid_ptr = i;
276b35ba30fSMichael S. Tsirkin         valid++;
277b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
278efee678dSMarc-André Lureau             phys_page_compact(&p[i], nodes);
279b35ba30fSMichael S. Tsirkin         }
280b35ba30fSMichael S. Tsirkin     }
281b35ba30fSMichael S. Tsirkin 
282b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
283b35ba30fSMichael S. Tsirkin     if (valid != 1) {
284b35ba30fSMichael S. Tsirkin         return;
285b35ba30fSMichael S. Tsirkin     }
286b35ba30fSMichael S. Tsirkin 
287b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
288b35ba30fSMichael S. Tsirkin 
289b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
290b35ba30fSMichael S. Tsirkin     if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
291b35ba30fSMichael S. Tsirkin         return;
292b35ba30fSMichael S. Tsirkin     }
293b35ba30fSMichael S. Tsirkin 
294b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
295b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
296b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
297b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
298b35ba30fSMichael S. Tsirkin          * should never reach here.
299b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
300b35ba30fSMichael S. Tsirkin          * change this rule.
301b35ba30fSMichael S. Tsirkin          */
302b35ba30fSMichael S. Tsirkin         lp->skip = 0;
303b35ba30fSMichael S. Tsirkin     } else {
304b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
305b35ba30fSMichael S. Tsirkin     }
306b35ba30fSMichael S. Tsirkin }
307b35ba30fSMichael S. Tsirkin 
308b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
309b35ba30fSMichael S. Tsirkin {
310b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
311efee678dSMarc-André Lureau         phys_page_compact(&d->phys_map, d->map.nodes);
312b35ba30fSMichael S. Tsirkin     }
313b35ba30fSMichael S. Tsirkin }
314b35ba30fSMichael S. Tsirkin 
31529cb533dSFam Zheng static inline bool section_covers_addr(const MemoryRegionSection *section,
31629cb533dSFam Zheng                                        hwaddr addr)
31729cb533dSFam Zheng {
31829cb533dSFam Zheng     /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
31929cb533dSFam Zheng      * the section must cover the entire address space.
32029cb533dSFam Zheng      */
32129cb533dSFam Zheng     return section->size.hi ||
32229cb533dSFam Zheng            range_covers_byte(section->offset_within_address_space,
32329cb533dSFam Zheng                              section->size.lo, addr);
32429cb533dSFam Zheng }
32529cb533dSFam Zheng 
32697115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
3279affd6fcSPaolo Bonzini                                            Node *nodes, MemoryRegionSection *sections)
32892e873b9Sbellard {
32931ab2b4aSAvi Kivity     PhysPageEntry *p;
33097115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
33131ab2b4aSAvi Kivity     int i;
332f1f6e3b8SAvi Kivity 
3339736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
334c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
3359affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
336f1f6e3b8SAvi Kivity         }
3379affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
33803f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
33931ab2b4aSAvi Kivity     }
340b35ba30fSMichael S. Tsirkin 
34129cb533dSFam Zheng     if (section_covers_addr(&sections[lp.ptr], addr)) {
3429affd6fcSPaolo Bonzini         return &sections[lp.ptr];
343b35ba30fSMichael S. Tsirkin     } else {
344b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
345b35ba30fSMichael S. Tsirkin     }
346f3705d53SAvi Kivity }
347f3705d53SAvi Kivity 
348e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
349e5548617SBlue Swirl {
3502a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
351e5548617SBlue Swirl         && mr != &io_mem_watch;
352e5548617SBlue Swirl }
353149f54b5SPaolo Bonzini 
35479e2b9aeSPaolo Bonzini /* Called from RCU critical section */
355c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
35690260c6cSJan Kiszka                                                         hwaddr addr,
35790260c6cSJan Kiszka                                                         bool resolve_subpage)
3589f029603SJan Kiszka {
359729633c2SFam Zheng     MemoryRegionSection *section = atomic_read(&d->mru_section);
36090260c6cSJan Kiszka     subpage_t *subpage;
361729633c2SFam Zheng     bool update;
36290260c6cSJan Kiszka 
363729633c2SFam Zheng     if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
364729633c2SFam Zheng         section_covers_addr(section, addr)) {
365729633c2SFam Zheng         update = false;
366729633c2SFam Zheng     } else {
367729633c2SFam Zheng         section = phys_page_find(d->phys_map, addr, d->map.nodes,
368729633c2SFam Zheng                                  d->map.sections);
369729633c2SFam Zheng         update = true;
370729633c2SFam Zheng     }
37190260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
37290260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
37353cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
37490260c6cSJan Kiszka     }
375729633c2SFam Zheng     if (update) {
376729633c2SFam Zheng         atomic_set(&d->mru_section, section);
377729633c2SFam Zheng     }
37890260c6cSJan Kiszka     return section;
3799f029603SJan Kiszka }
3809f029603SJan Kiszka 
38179e2b9aeSPaolo Bonzini /* Called from RCU critical section */
38290260c6cSJan Kiszka static MemoryRegionSection *
383c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
38490260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
385149f54b5SPaolo Bonzini {
386149f54b5SPaolo Bonzini     MemoryRegionSection *section;
387965eb2fcSPaolo Bonzini     MemoryRegion *mr;
388a87f3954SPaolo Bonzini     Int128 diff;
389149f54b5SPaolo Bonzini 
390c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
391149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
392149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
393149f54b5SPaolo Bonzini 
394149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
395149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
396149f54b5SPaolo Bonzini 
397965eb2fcSPaolo Bonzini     mr = section->mr;
398b242e0e0SPaolo Bonzini 
399b242e0e0SPaolo Bonzini     /* MMIO registers can be expected to perform full-width accesses based only
400b242e0e0SPaolo Bonzini      * on their address, without considering adjacent registers that could
401b242e0e0SPaolo Bonzini      * decode to completely different MemoryRegions.  When such registers
402b242e0e0SPaolo Bonzini      * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
403b242e0e0SPaolo Bonzini      * regions overlap wildly.  For this reason we cannot clamp the accesses
404b242e0e0SPaolo Bonzini      * here.
405b242e0e0SPaolo Bonzini      *
406b242e0e0SPaolo Bonzini      * If the length is small (as is the case for address_space_ldl/stl),
407b242e0e0SPaolo Bonzini      * everything works fine.  If the incoming length is large, however,
408b242e0e0SPaolo Bonzini      * the caller really has to do the clamping through memory_access_size.
409b242e0e0SPaolo Bonzini      */
410965eb2fcSPaolo Bonzini     if (memory_region_is_ram(mr)) {
411e4a511f8SPaolo Bonzini         diff = int128_sub(section->size, int128_make64(addr));
4123752a036SPeter Maydell         *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
413965eb2fcSPaolo Bonzini     }
414149f54b5SPaolo Bonzini     return section;
415149f54b5SPaolo Bonzini }
41690260c6cSJan Kiszka 
41741063e1eSPaolo Bonzini /* Called from RCU critical section */
4185c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
41990260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
42090260c6cSJan Kiszka                                       bool is_write)
42190260c6cSJan Kiszka {
42230951157SAvi Kivity     IOMMUTLBEntry iotlb;
42330951157SAvi Kivity     MemoryRegionSection *section;
42430951157SAvi Kivity     MemoryRegion *mr;
42530951157SAvi Kivity 
42630951157SAvi Kivity     for (;;) {
42779e2b9aeSPaolo Bonzini         AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
42879e2b9aeSPaolo Bonzini         section = address_space_translate_internal(d, addr, &addr, plen, true);
42930951157SAvi Kivity         mr = section->mr;
43030951157SAvi Kivity 
43130951157SAvi Kivity         if (!mr->iommu_ops) {
43230951157SAvi Kivity             break;
43330951157SAvi Kivity         }
43430951157SAvi Kivity 
4358d7b8cb9SLe Tan         iotlb = mr->iommu_ops->translate(mr, addr, is_write);
43630951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
43730951157SAvi Kivity                 | (addr & iotlb.addr_mask));
43823820dbfSPeter Crosthwaite         *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
43930951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
44030951157SAvi Kivity             mr = &io_mem_unassigned;
44130951157SAvi Kivity             break;
44230951157SAvi Kivity         }
44330951157SAvi Kivity 
44430951157SAvi Kivity         as = iotlb.target_as;
44530951157SAvi Kivity     }
44630951157SAvi Kivity 
447fe680d0dSAlexey Kardashevskiy     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
448a87f3954SPaolo Bonzini         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
44923820dbfSPeter Crosthwaite         *plen = MIN(page, *plen);
450a87f3954SPaolo Bonzini     }
451a87f3954SPaolo Bonzini 
45230951157SAvi Kivity     *xlat = addr;
45330951157SAvi Kivity     return mr;
45490260c6cSJan Kiszka }
45590260c6cSJan Kiszka 
45679e2b9aeSPaolo Bonzini /* Called from RCU critical section */
45790260c6cSJan Kiszka MemoryRegionSection *
458d7898cdaSPeter Maydell address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
4599d82b5a7SPaolo Bonzini                                   hwaddr *xlat, hwaddr *plen)
46090260c6cSJan Kiszka {
46130951157SAvi Kivity     MemoryRegionSection *section;
462d7898cdaSPeter Maydell     AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
463d7898cdaSPeter Maydell 
464d7898cdaSPeter Maydell     section = address_space_translate_internal(d, addr, xlat, plen, false);
46530951157SAvi Kivity 
46630951157SAvi Kivity     assert(!section->mr->iommu_ops);
46730951157SAvi Kivity     return section;
46890260c6cSJan Kiszka }
4699fa3e853Sbellard #endif
470fd6ce8f6Sbellard 
471b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
4729656f324Spbrook 
473e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
474e7f4eff7SJuan Quintela {
475259186a7SAndreas Färber     CPUState *cpu = opaque;
476e7f4eff7SJuan Quintela 
4773098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
4783098dba0Saurel32        version_id is increased. */
479259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
480c01a71c1SChristian Borntraeger     tlb_flush(cpu, 1);
4819656f324Spbrook 
4829656f324Spbrook     return 0;
4839656f324Spbrook }
484e7f4eff7SJuan Quintela 
4856c3bff0eSPavel Dovgaluk static int cpu_common_pre_load(void *opaque)
4866c3bff0eSPavel Dovgaluk {
4876c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4886c3bff0eSPavel Dovgaluk 
489adee6424SPaolo Bonzini     cpu->exception_index = -1;
4906c3bff0eSPavel Dovgaluk 
4916c3bff0eSPavel Dovgaluk     return 0;
4926c3bff0eSPavel Dovgaluk }
4936c3bff0eSPavel Dovgaluk 
4946c3bff0eSPavel Dovgaluk static bool cpu_common_exception_index_needed(void *opaque)
4956c3bff0eSPavel Dovgaluk {
4966c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4976c3bff0eSPavel Dovgaluk 
498adee6424SPaolo Bonzini     return tcg_enabled() && cpu->exception_index != -1;
4996c3bff0eSPavel Dovgaluk }
5006c3bff0eSPavel Dovgaluk 
5016c3bff0eSPavel Dovgaluk static const VMStateDescription vmstate_cpu_common_exception_index = {
5026c3bff0eSPavel Dovgaluk     .name = "cpu_common/exception_index",
5036c3bff0eSPavel Dovgaluk     .version_id = 1,
5046c3bff0eSPavel Dovgaluk     .minimum_version_id = 1,
5055cd8cadaSJuan Quintela     .needed = cpu_common_exception_index_needed,
5066c3bff0eSPavel Dovgaluk     .fields = (VMStateField[]) {
5076c3bff0eSPavel Dovgaluk         VMSTATE_INT32(exception_index, CPUState),
5086c3bff0eSPavel Dovgaluk         VMSTATE_END_OF_LIST()
5096c3bff0eSPavel Dovgaluk     }
5106c3bff0eSPavel Dovgaluk };
5116c3bff0eSPavel Dovgaluk 
512bac05aa9SAndrey Smetanin static bool cpu_common_crash_occurred_needed(void *opaque)
513bac05aa9SAndrey Smetanin {
514bac05aa9SAndrey Smetanin     CPUState *cpu = opaque;
515bac05aa9SAndrey Smetanin 
516bac05aa9SAndrey Smetanin     return cpu->crash_occurred;
517bac05aa9SAndrey Smetanin }
518bac05aa9SAndrey Smetanin 
519bac05aa9SAndrey Smetanin static const VMStateDescription vmstate_cpu_common_crash_occurred = {
520bac05aa9SAndrey Smetanin     .name = "cpu_common/crash_occurred",
521bac05aa9SAndrey Smetanin     .version_id = 1,
522bac05aa9SAndrey Smetanin     .minimum_version_id = 1,
523bac05aa9SAndrey Smetanin     .needed = cpu_common_crash_occurred_needed,
524bac05aa9SAndrey Smetanin     .fields = (VMStateField[]) {
525bac05aa9SAndrey Smetanin         VMSTATE_BOOL(crash_occurred, CPUState),
526bac05aa9SAndrey Smetanin         VMSTATE_END_OF_LIST()
527bac05aa9SAndrey Smetanin     }
528bac05aa9SAndrey Smetanin };
529bac05aa9SAndrey Smetanin 
5301a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
531e7f4eff7SJuan Quintela     .name = "cpu_common",
532e7f4eff7SJuan Quintela     .version_id = 1,
533e7f4eff7SJuan Quintela     .minimum_version_id = 1,
5346c3bff0eSPavel Dovgaluk     .pre_load = cpu_common_pre_load,
535e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
536e7f4eff7SJuan Quintela     .fields = (VMStateField[]) {
537259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
538259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
539e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
5406c3bff0eSPavel Dovgaluk     },
5415cd8cadaSJuan Quintela     .subsections = (const VMStateDescription*[]) {
5425cd8cadaSJuan Quintela         &vmstate_cpu_common_exception_index,
543bac05aa9SAndrey Smetanin         &vmstate_cpu_common_crash_occurred,
5445cd8cadaSJuan Quintela         NULL
545e7f4eff7SJuan Quintela     }
546e7f4eff7SJuan Quintela };
5471a1562f5SAndreas Färber 
5489656f324Spbrook #endif
5499656f324Spbrook 
55038d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
551950f1472SGlauber Costa {
552bdc44640SAndreas Färber     CPUState *cpu;
553950f1472SGlauber Costa 
554bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
55555e5c285SAndreas Färber         if (cpu->cpu_index == index) {
556bdc44640SAndreas Färber             return cpu;
55755e5c285SAndreas Färber         }
558950f1472SGlauber Costa     }
559950f1472SGlauber Costa 
560bdc44640SAndreas Färber     return NULL;
561950f1472SGlauber Costa }
562950f1472SGlauber Costa 
56309daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY)
56456943e8cSPeter Maydell void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
56509daed84SEdgar E. Iglesias {
56612ebc9a7SPeter Maydell     CPUAddressSpace *newas;
56712ebc9a7SPeter Maydell 
56812ebc9a7SPeter Maydell     /* Target code should have set num_ases before calling us */
56912ebc9a7SPeter Maydell     assert(asidx < cpu->num_ases);
57012ebc9a7SPeter Maydell 
57156943e8cSPeter Maydell     if (asidx == 0) {
57256943e8cSPeter Maydell         /* address space 0 gets the convenience alias */
57356943e8cSPeter Maydell         cpu->as = as;
57456943e8cSPeter Maydell     }
57556943e8cSPeter Maydell 
57612ebc9a7SPeter Maydell     /* KVM cannot currently support multiple address spaces. */
57712ebc9a7SPeter Maydell     assert(asidx == 0 || !kvm_enabled());
57809daed84SEdgar E. Iglesias 
57912ebc9a7SPeter Maydell     if (!cpu->cpu_ases) {
58012ebc9a7SPeter Maydell         cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
58109daed84SEdgar E. Iglesias     }
58232857f4dSPeter Maydell 
58312ebc9a7SPeter Maydell     newas = &cpu->cpu_ases[asidx];
58412ebc9a7SPeter Maydell     newas->cpu = cpu;
58512ebc9a7SPeter Maydell     newas->as = as;
58656943e8cSPeter Maydell     if (tcg_enabled()) {
58712ebc9a7SPeter Maydell         newas->tcg_as_listener.commit = tcg_commit;
58812ebc9a7SPeter Maydell         memory_listener_register(&newas->tcg_as_listener, as);
58909daed84SEdgar E. Iglesias     }
59056943e8cSPeter Maydell }
591651a5bc0SPeter Maydell 
592651a5bc0SPeter Maydell AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
593651a5bc0SPeter Maydell {
594651a5bc0SPeter Maydell     /* Return the AddressSpace corresponding to the specified index */
595651a5bc0SPeter Maydell     return cpu->cpu_ases[asidx].as;
596651a5bc0SPeter Maydell }
59709daed84SEdgar E. Iglesias #endif
59809daed84SEdgar E. Iglesias 
5997bbc124eSLaurent Vivier void cpu_exec_unrealizefn(CPUState *cpu)
6001c59eb39SBharata B Rao {
6019dfeca7cSBharata B Rao     CPUClass *cc = CPU_GET_CLASS(cpu);
6029dfeca7cSBharata B Rao 
603267f685bSPaolo Bonzini     cpu_list_remove(cpu);
6049dfeca7cSBharata B Rao 
6059dfeca7cSBharata B Rao     if (cc->vmsd != NULL) {
6069dfeca7cSBharata B Rao         vmstate_unregister(NULL, cc->vmsd, cpu);
6079dfeca7cSBharata B Rao     }
6089dfeca7cSBharata B Rao     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
6099dfeca7cSBharata B Rao         vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
6109dfeca7cSBharata B Rao     }
6111c59eb39SBharata B Rao }
6121c59eb39SBharata B Rao 
61339e329e3SLaurent Vivier void cpu_exec_initfn(CPUState *cpu)
614fd6ce8f6Sbellard {
61556943e8cSPeter Maydell     cpu->as = NULL;
61612ebc9a7SPeter Maydell     cpu->num_ases = 0;
61756943e8cSPeter Maydell 
618291135b5SEduardo Habkost #ifndef CONFIG_USER_ONLY
619291135b5SEduardo Habkost     cpu->thread_id = qemu_get_thread_id();
6206731d864SPeter Crosthwaite 
6216731d864SPeter Crosthwaite     /* This is a softmmu CPU object, so create a property for it
6226731d864SPeter Crosthwaite      * so users can wire up its memory. (This can't go in qom/cpu.c
6236731d864SPeter Crosthwaite      * because that file is compiled only once for both user-mode
6246731d864SPeter Crosthwaite      * and system builds.) The default if no link is set up is to use
6256731d864SPeter Crosthwaite      * the system address space.
6266731d864SPeter Crosthwaite      */
6276731d864SPeter Crosthwaite     object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
6286731d864SPeter Crosthwaite                              (Object **)&cpu->memory,
6296731d864SPeter Crosthwaite                              qdev_prop_allow_set_link_before_realize,
6306731d864SPeter Crosthwaite                              OBJ_PROP_LINK_UNREF_ON_RELEASE,
6316731d864SPeter Crosthwaite                              &error_abort);
6326731d864SPeter Crosthwaite     cpu->memory = system_memory;
6336731d864SPeter Crosthwaite     object_ref(OBJECT(cpu->memory));
634291135b5SEduardo Habkost #endif
63539e329e3SLaurent Vivier }
63639e329e3SLaurent Vivier 
637ce5b1bbfSLaurent Vivier void cpu_exec_realizefn(CPUState *cpu, Error **errp)
63839e329e3SLaurent Vivier {
63939e329e3SLaurent Vivier     CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
640291135b5SEduardo Habkost 
641267f685bSPaolo Bonzini     cpu_list_add(cpu);
6421bc7e522SIgor Mammedov 
6431bc7e522SIgor Mammedov #ifndef CONFIG_USER_ONLY
644e0d47944SAndreas Färber     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
645741da0d3SPaolo Bonzini         vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
646e0d47944SAndreas Färber     }
647b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
648741da0d3SPaolo Bonzini         vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
649b170fce3SAndreas Färber     }
650741da0d3SPaolo Bonzini #endif
651fd6ce8f6Sbellard }
652fd6ce8f6Sbellard 
65394df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
65400b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
65594df27fdSPaul Brook {
65694df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
65794df27fdSPaul Brook }
65894df27fdSPaul Brook #else
65900b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
6601e7855a5SMax Filippov {
6615232e4c7SPeter Maydell     MemTxAttrs attrs;
6625232e4c7SPeter Maydell     hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
6635232e4c7SPeter Maydell     int asidx = cpu_asidx_from_attrs(cpu, attrs);
664e8262a1bSMax Filippov     if (phys != -1) {
6655232e4c7SPeter Maydell         tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
66629d8ec7bSEdgar E. Iglesias                                 phys | (pc & ~TARGET_PAGE_MASK));
667e8262a1bSMax Filippov     }
6681e7855a5SMax Filippov }
669c27004ecSbellard #endif
670d720b93dSbellard 
671c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
67275a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
673c527ee8fSPaul Brook 
674c527ee8fSPaul Brook {
675c527ee8fSPaul Brook }
676c527ee8fSPaul Brook 
6773ee887e8SPeter Maydell int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
6783ee887e8SPeter Maydell                           int flags)
6793ee887e8SPeter Maydell {
6803ee887e8SPeter Maydell     return -ENOSYS;
6813ee887e8SPeter Maydell }
6823ee887e8SPeter Maydell 
6833ee887e8SPeter Maydell void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
6843ee887e8SPeter Maydell {
6853ee887e8SPeter Maydell }
6863ee887e8SPeter Maydell 
68775a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
688c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
689c527ee8fSPaul Brook {
690c527ee8fSPaul Brook     return -ENOSYS;
691c527ee8fSPaul Brook }
692c527ee8fSPaul Brook #else
6936658ffb8Spbrook /* Add a watchpoint.  */
69475a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
695a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
6966658ffb8Spbrook {
697c0ce998eSaliguori     CPUWatchpoint *wp;
6986658ffb8Spbrook 
69905068c0dSPeter Maydell     /* forbid ranges which are empty or run off the end of the address space */
70007e2863dSMax Filippov     if (len == 0 || (addr + len - 1) < addr) {
70175a34036SAndreas Färber         error_report("tried to set invalid watchpoint at %"
70275a34036SAndreas Färber                      VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
703b4051334Saliguori         return -EINVAL;
704b4051334Saliguori     }
7057267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
7066658ffb8Spbrook 
707a1d1bb31Saliguori     wp->vaddr = addr;
70805068c0dSPeter Maydell     wp->len = len;
709a1d1bb31Saliguori     wp->flags = flags;
710a1d1bb31Saliguori 
7112dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
712ff4700b0SAndreas Färber     if (flags & BP_GDB) {
713ff4700b0SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
714ff4700b0SAndreas Färber     } else {
715ff4700b0SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
716ff4700b0SAndreas Färber     }
717a1d1bb31Saliguori 
71831b030d4SAndreas Färber     tlb_flush_page(cpu, addr);
719a1d1bb31Saliguori 
720a1d1bb31Saliguori     if (watchpoint)
721a1d1bb31Saliguori         *watchpoint = wp;
722a1d1bb31Saliguori     return 0;
7236658ffb8Spbrook }
7246658ffb8Spbrook 
725a1d1bb31Saliguori /* Remove a specific watchpoint.  */
72675a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
727a1d1bb31Saliguori                           int flags)
7286658ffb8Spbrook {
729a1d1bb31Saliguori     CPUWatchpoint *wp;
7306658ffb8Spbrook 
731ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
73205068c0dSPeter Maydell         if (addr == wp->vaddr && len == wp->len
7336e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
73475a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
7356658ffb8Spbrook             return 0;
7366658ffb8Spbrook         }
7376658ffb8Spbrook     }
738a1d1bb31Saliguori     return -ENOENT;
7396658ffb8Spbrook }
7406658ffb8Spbrook 
741a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
74275a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
743a1d1bb31Saliguori {
744ff4700b0SAndreas Färber     QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7457d03f82fSedgar_igl 
74631b030d4SAndreas Färber     tlb_flush_page(cpu, watchpoint->vaddr);
747a1d1bb31Saliguori 
7487267c094SAnthony Liguori     g_free(watchpoint);
7497d03f82fSedgar_igl }
7507d03f82fSedgar_igl 
751a1d1bb31Saliguori /* Remove all matching watchpoints.  */
75275a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
753a1d1bb31Saliguori {
754c0ce998eSaliguori     CPUWatchpoint *wp, *next;
755a1d1bb31Saliguori 
756ff4700b0SAndreas Färber     QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75775a34036SAndreas Färber         if (wp->flags & mask) {
75875a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
75975a34036SAndreas Färber         }
760a1d1bb31Saliguori     }
761c0ce998eSaliguori }
76205068c0dSPeter Maydell 
76305068c0dSPeter Maydell /* Return true if this watchpoint address matches the specified
76405068c0dSPeter Maydell  * access (ie the address range covered by the watchpoint overlaps
76505068c0dSPeter Maydell  * partially or completely with the address range covered by the
76605068c0dSPeter Maydell  * access).
76705068c0dSPeter Maydell  */
76805068c0dSPeter Maydell static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
76905068c0dSPeter Maydell                                                   vaddr addr,
77005068c0dSPeter Maydell                                                   vaddr len)
77105068c0dSPeter Maydell {
77205068c0dSPeter Maydell     /* We know the lengths are non-zero, but a little caution is
77305068c0dSPeter Maydell      * required to avoid errors in the case where the range ends
77405068c0dSPeter Maydell      * exactly at the top of the address space and so addr + len
77505068c0dSPeter Maydell      * wraps round to zero.
77605068c0dSPeter Maydell      */
77705068c0dSPeter Maydell     vaddr wpend = wp->vaddr + wp->len - 1;
77805068c0dSPeter Maydell     vaddr addrend = addr + len - 1;
77905068c0dSPeter Maydell 
78005068c0dSPeter Maydell     return !(addr > wpend || wp->vaddr > addrend);
78105068c0dSPeter Maydell }
78205068c0dSPeter Maydell 
783c527ee8fSPaul Brook #endif
784a1d1bb31Saliguori 
785a1d1bb31Saliguori /* Add a breakpoint.  */
786b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
787a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
7884c3a88a2Sbellard {
789c0ce998eSaliguori     CPUBreakpoint *bp;
7904c3a88a2Sbellard 
7917267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
7924c3a88a2Sbellard 
793a1d1bb31Saliguori     bp->pc = pc;
794a1d1bb31Saliguori     bp->flags = flags;
795a1d1bb31Saliguori 
7962dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
79700b941e5SAndreas Färber     if (flags & BP_GDB) {
798f0c3c505SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
79900b941e5SAndreas Färber     } else {
800f0c3c505SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
80100b941e5SAndreas Färber     }
802d720b93dSbellard 
803f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, pc);
804a1d1bb31Saliguori 
80500b941e5SAndreas Färber     if (breakpoint) {
806a1d1bb31Saliguori         *breakpoint = bp;
80700b941e5SAndreas Färber     }
8084c3a88a2Sbellard     return 0;
8094c3a88a2Sbellard }
8104c3a88a2Sbellard 
811a1d1bb31Saliguori /* Remove a specific breakpoint.  */
812b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
813a1d1bb31Saliguori {
814a1d1bb31Saliguori     CPUBreakpoint *bp;
815a1d1bb31Saliguori 
816f0c3c505SAndreas Färber     QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
817a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
818b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
819a1d1bb31Saliguori             return 0;
8207d03f82fSedgar_igl         }
821a1d1bb31Saliguori     }
822a1d1bb31Saliguori     return -ENOENT;
8237d03f82fSedgar_igl }
8247d03f82fSedgar_igl 
825a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
826b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
8274c3a88a2Sbellard {
828f0c3c505SAndreas Färber     QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
829f0c3c505SAndreas Färber 
830f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, breakpoint->pc);
831a1d1bb31Saliguori 
8327267c094SAnthony Liguori     g_free(breakpoint);
833a1d1bb31Saliguori }
834a1d1bb31Saliguori 
835a1d1bb31Saliguori /* Remove all matching breakpoints. */
836b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
837a1d1bb31Saliguori {
838c0ce998eSaliguori     CPUBreakpoint *bp, *next;
839a1d1bb31Saliguori 
840f0c3c505SAndreas Färber     QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
841b3310ab3SAndreas Färber         if (bp->flags & mask) {
842b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
843b3310ab3SAndreas Färber         }
844c0ce998eSaliguori     }
8454c3a88a2Sbellard }
8464c3a88a2Sbellard 
847c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
848c33a346eSbellard    CPU loop after each instruction */
8493825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled)
850c33a346eSbellard {
851ed2803daSAndreas Färber     if (cpu->singlestep_enabled != enabled) {
852ed2803daSAndreas Färber         cpu->singlestep_enabled = enabled;
853ed2803daSAndreas Färber         if (kvm_enabled()) {
85438e478ecSStefan Weil             kvm_update_guest_debug(cpu, 0);
855ed2803daSAndreas Färber         } else {
856ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
8579fa3e853Sbellard             /* XXX: only flush what is necessary */
858bbd77c18SPeter Crosthwaite             tb_flush(cpu);
859c33a346eSbellard         }
860e22a25c9Saliguori     }
861c33a346eSbellard }
862c33a346eSbellard 
863a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...)
8647501267eSbellard {
8657501267eSbellard     va_list ap;
866493ae1f0Spbrook     va_list ap2;
8677501267eSbellard 
8687501267eSbellard     va_start(ap, fmt);
869493ae1f0Spbrook     va_copy(ap2, ap);
8707501267eSbellard     fprintf(stderr, "qemu: fatal: ");
8717501267eSbellard     vfprintf(stderr, fmt, ap);
8727501267eSbellard     fprintf(stderr, "\n");
873878096eeSAndreas Färber     cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
874013a2942SPaolo Bonzini     if (qemu_log_separate()) {
87593fcfe39Saliguori         qemu_log("qemu: fatal: ");
87693fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
87793fcfe39Saliguori         qemu_log("\n");
878a0762859SAndreas Färber         log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
87931b1a7b4Saliguori         qemu_log_flush();
88093fcfe39Saliguori         qemu_log_close();
881924edcaeSbalrog     }
882493ae1f0Spbrook     va_end(ap2);
883f9373291Sj_mayer     va_end(ap);
8847615936eSPavel Dovgalyuk     replay_finish();
885fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
886fd052bf6SRiku Voipio     {
887fd052bf6SRiku Voipio         struct sigaction act;
888fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
889fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
890fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
891fd052bf6SRiku Voipio     }
892fd052bf6SRiku Voipio #endif
8937501267eSbellard     abort();
8947501267eSbellard }
8957501267eSbellard 
8960124311eSbellard #if !defined(CONFIG_USER_ONLY)
8970dc3f44aSMike Day /* Called from RCU critical section */
898041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
899041603feSPaolo Bonzini {
900041603feSPaolo Bonzini     RAMBlock *block;
901041603feSPaolo Bonzini 
90243771539SPaolo Bonzini     block = atomic_rcu_read(&ram_list.mru_block);
9039b8424d5SMichael S. Tsirkin     if (block && addr - block->offset < block->max_length) {
90468851b98SPaolo Bonzini         return block;
905041603feSPaolo Bonzini     }
9060dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9079b8424d5SMichael S. Tsirkin         if (addr - block->offset < block->max_length) {
908041603feSPaolo Bonzini             goto found;
909041603feSPaolo Bonzini         }
910041603feSPaolo Bonzini     }
911041603feSPaolo Bonzini 
912041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
913041603feSPaolo Bonzini     abort();
914041603feSPaolo Bonzini 
915041603feSPaolo Bonzini found:
91643771539SPaolo Bonzini     /* It is safe to write mru_block outside the iothread lock.  This
91743771539SPaolo Bonzini      * is what happens:
91843771539SPaolo Bonzini      *
91943771539SPaolo Bonzini      *     mru_block = xxx
92043771539SPaolo Bonzini      *     rcu_read_unlock()
92143771539SPaolo Bonzini      *                                        xxx removed from list
92243771539SPaolo Bonzini      *                  rcu_read_lock()
92343771539SPaolo Bonzini      *                  read mru_block
92443771539SPaolo Bonzini      *                                        mru_block = NULL;
92543771539SPaolo Bonzini      *                                        call_rcu(reclaim_ramblock, xxx);
92643771539SPaolo Bonzini      *                  rcu_read_unlock()
92743771539SPaolo Bonzini      *
92843771539SPaolo Bonzini      * atomic_rcu_set is not needed here.  The block was already published
92943771539SPaolo Bonzini      * when it was placed into the list.  Here we're just making an extra
93043771539SPaolo Bonzini      * copy of the pointer.
93143771539SPaolo Bonzini      */
932041603feSPaolo Bonzini     ram_list.mru_block = block;
933041603feSPaolo Bonzini     return block;
934041603feSPaolo Bonzini }
935041603feSPaolo Bonzini 
936a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
9371ccde1cbSbellard {
9389a13565dSPeter Crosthwaite     CPUState *cpu;
939041603feSPaolo Bonzini     ram_addr_t start1;
940a2f4d5beSJuan Quintela     RAMBlock *block;
941a2f4d5beSJuan Quintela     ram_addr_t end;
942a2f4d5beSJuan Quintela 
943a2f4d5beSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length);
944a2f4d5beSJuan Quintela     start &= TARGET_PAGE_MASK;
945f23db169Sbellard 
9460dc3f44aSMike Day     rcu_read_lock();
947041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
948041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
9491240be24SMichael S. Tsirkin     start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
9509a13565dSPeter Crosthwaite     CPU_FOREACH(cpu) {
9519a13565dSPeter Crosthwaite         tlb_reset_dirty(cpu, start1, length);
9529a13565dSPeter Crosthwaite     }
9530dc3f44aSMike Day     rcu_read_unlock();
954d24981d3SJuan Quintela }
955d24981d3SJuan Quintela 
956d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
95703eebc9eSStefan Hajnoczi bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
95803eebc9eSStefan Hajnoczi                                               ram_addr_t length,
95952159192SJuan Quintela                                               unsigned client)
960d24981d3SJuan Quintela {
9615b82b703SStefan Hajnoczi     DirtyMemoryBlocks *blocks;
96203eebc9eSStefan Hajnoczi     unsigned long end, page;
9635b82b703SStefan Hajnoczi     bool dirty = false;
964d24981d3SJuan Quintela 
96503eebc9eSStefan Hajnoczi     if (length == 0) {
96603eebc9eSStefan Hajnoczi         return false;
96703eebc9eSStefan Hajnoczi     }
96803eebc9eSStefan Hajnoczi 
96903eebc9eSStefan Hajnoczi     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
97003eebc9eSStefan Hajnoczi     page = start >> TARGET_PAGE_BITS;
9715b82b703SStefan Hajnoczi 
9725b82b703SStefan Hajnoczi     rcu_read_lock();
9735b82b703SStefan Hajnoczi 
9745b82b703SStefan Hajnoczi     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
9755b82b703SStefan Hajnoczi 
9765b82b703SStefan Hajnoczi     while (page < end) {
9775b82b703SStefan Hajnoczi         unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
9785b82b703SStefan Hajnoczi         unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
9795b82b703SStefan Hajnoczi         unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
9805b82b703SStefan Hajnoczi 
9815b82b703SStefan Hajnoczi         dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
9825b82b703SStefan Hajnoczi                                               offset, num);
9835b82b703SStefan Hajnoczi         page += num;
9845b82b703SStefan Hajnoczi     }
9855b82b703SStefan Hajnoczi 
9865b82b703SStefan Hajnoczi     rcu_read_unlock();
98703eebc9eSStefan Hajnoczi 
98803eebc9eSStefan Hajnoczi     if (dirty && tcg_enabled()) {
989a2f4d5beSJuan Quintela         tlb_reset_dirty_range_all(start, length);
990d24981d3SJuan Quintela     }
99103eebc9eSStefan Hajnoczi 
99203eebc9eSStefan Hajnoczi     return dirty;
9931ccde1cbSbellard }
9941ccde1cbSbellard 
99579e2b9aeSPaolo Bonzini /* Called from RCU critical section */
996bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu,
997e5548617SBlue Swirl                                        MemoryRegionSection *section,
998e5548617SBlue Swirl                                        target_ulong vaddr,
999149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
1000e5548617SBlue Swirl                                        int prot,
1001e5548617SBlue Swirl                                        target_ulong *address)
1002e5548617SBlue Swirl {
1003a8170e5eSAvi Kivity     hwaddr iotlb;
1004e5548617SBlue Swirl     CPUWatchpoint *wp;
1005e5548617SBlue Swirl 
1006cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
1007e5548617SBlue Swirl         /* Normal RAM.  */
1008e4e69794SPaolo Bonzini         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1009e5548617SBlue Swirl         if (!section->readonly) {
1010b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_NOTDIRTY;
1011e5548617SBlue Swirl         } else {
1012b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_ROM;
1013e5548617SBlue Swirl         }
1014e5548617SBlue Swirl     } else {
10150b8e2c10SPeter Maydell         AddressSpaceDispatch *d;
10160b8e2c10SPeter Maydell 
10170b8e2c10SPeter Maydell         d = atomic_rcu_read(&section->address_space->dispatch);
10180b8e2c10SPeter Maydell         iotlb = section - d->map.sections;
1019149f54b5SPaolo Bonzini         iotlb += xlat;
1020e5548617SBlue Swirl     }
1021e5548617SBlue Swirl 
1022e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
1023e5548617SBlue Swirl        watchpoint trap routines.  */
1024ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
102505068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1026e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
1027e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1028b41aac4fSLiu Ping Fan                 iotlb = PHYS_SECTION_WATCH + paddr;
1029e5548617SBlue Swirl                 *address |= TLB_MMIO;
1030e5548617SBlue Swirl                 break;
1031e5548617SBlue Swirl             }
1032e5548617SBlue Swirl         }
1033e5548617SBlue Swirl     }
1034e5548617SBlue Swirl 
1035e5548617SBlue Swirl     return iotlb;
1036e5548617SBlue Swirl }
10379fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
103833417e70Sbellard 
1039e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
10408da3ff18Spbrook 
1041c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
10425312bd8bSAvi Kivity                              uint16_t section);
1043acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
104454688b1eSAvi Kivity 
1045a2b257d6SIgor Mammedov static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1046a2b257d6SIgor Mammedov                                qemu_anon_ram_alloc;
104791138037SMarkus Armbruster 
104891138037SMarkus Armbruster /*
104991138037SMarkus Armbruster  * Set a custom physical guest memory alloator.
105091138037SMarkus Armbruster  * Accelerators with unusual needs may need this.  Hopefully, we can
105191138037SMarkus Armbruster  * get rid of it eventually.
105291138037SMarkus Armbruster  */
1053a2b257d6SIgor Mammedov void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
105491138037SMarkus Armbruster {
105591138037SMarkus Armbruster     phys_mem_alloc = alloc;
105691138037SMarkus Armbruster }
105791138037SMarkus Armbruster 
105853cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
105953cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
10605312bd8bSAvi Kivity {
106168f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
106268f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
106368f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
106468f3f65bSPaolo Bonzini      */
106553cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
106668f3f65bSPaolo Bonzini 
106753cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
106853cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
106953cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
107053cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
10715312bd8bSAvi Kivity     }
107253cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
1073dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
107453cb28cbSMarcel Apfelbaum     return map->sections_nb++;
10755312bd8bSAvi Kivity }
10765312bd8bSAvi Kivity 
1077058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
1078058bc4b5SPaolo Bonzini {
107955b4e80bSDon Slutz     bool have_sub_page = mr->subpage;
108055b4e80bSDon Slutz 
1081dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
1082dfde4e6eSPaolo Bonzini 
108355b4e80bSDon Slutz     if (have_sub_page) {
1084058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
1085b4fefef9SPeter Crosthwaite         object_unref(OBJECT(&subpage->iomem));
1086058bc4b5SPaolo Bonzini         g_free(subpage);
1087058bc4b5SPaolo Bonzini     }
1088058bc4b5SPaolo Bonzini }
1089058bc4b5SPaolo Bonzini 
10906092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
10915312bd8bSAvi Kivity {
10929affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
10939affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
1094058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
1095058bc4b5SPaolo Bonzini     }
10969affd6fcSPaolo Bonzini     g_free(map->sections);
10979affd6fcSPaolo Bonzini     g_free(map->nodes);
10985312bd8bSAvi Kivity }
10995312bd8bSAvi Kivity 
1100ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
11010f0cb164SAvi Kivity {
11020f0cb164SAvi Kivity     subpage_t *subpage;
1103a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
11040f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
110597115a8dSMichael S. Tsirkin     MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
110653cb28cbSMarcel Apfelbaum                                                    d->map.nodes, d->map.sections);
11070f0cb164SAvi Kivity     MemoryRegionSection subsection = {
11080f0cb164SAvi Kivity         .offset_within_address_space = base,
1109052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
11100f0cb164SAvi Kivity     };
1111a8170e5eSAvi Kivity     hwaddr start, end;
11120f0cb164SAvi Kivity 
1113f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
11140f0cb164SAvi Kivity 
1115f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
1116acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
11173be91e86SEdgar E. Iglesias         subsection.address_space = d->as;
11180f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
1119ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
112053cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
11210f0cb164SAvi Kivity     } else {
1122f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
11230f0cb164SAvi Kivity     }
11240f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1125052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
112653cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
112753cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
11280f0cb164SAvi Kivity }
11290f0cb164SAvi Kivity 
11300f0cb164SAvi Kivity 
1131052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
1132052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
113333417e70Sbellard {
1134a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
113553cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
1136052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
1137052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
1138dd81124bSAvi Kivity 
1139733d5ef5SPaolo Bonzini     assert(num_pages);
1140733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
114133417e70Sbellard }
114233417e70Sbellard 
1143ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
11440f0cb164SAvi Kivity {
114589ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
114600752703SPaolo Bonzini     AddressSpaceDispatch *d = as->next_dispatch;
114799b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
1148052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
11490f0cb164SAvi Kivity 
1150733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1151733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1152733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
1153733d5ef5SPaolo Bonzini 
1154052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
1155ac1970fbSAvi Kivity         register_subpage(d, &now);
1156733d5ef5SPaolo Bonzini     } else {
1157052e87b0SPaolo Bonzini         now.size = int128_zero();
1158733d5ef5SPaolo Bonzini     }
1159052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
1160052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
1161052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
1162052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
11630f0cb164SAvi Kivity         now = remain;
1164052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
1165733d5ef5SPaolo Bonzini             register_subpage(d, &now);
116688266249SHu Tao         } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1167052e87b0SPaolo Bonzini             now.size = page_size;
1168ac1970fbSAvi Kivity             register_subpage(d, &now);
116969b67646STyler Hall         } else {
1170052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
1171ac1970fbSAvi Kivity             register_multipage(d, &now);
117269b67646STyler Hall         }
11730f0cb164SAvi Kivity     }
11740f0cb164SAvi Kivity }
11750f0cb164SAvi Kivity 
117662a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
117762a2744cSSheng Yang {
117862a2744cSSheng Yang     if (kvm_enabled())
117962a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
118062a2744cSSheng Yang }
118162a2744cSSheng Yang 
1182b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
1183b2a8658eSUmesh Deshpande {
1184b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
1185b2a8658eSUmesh Deshpande }
1186b2a8658eSUmesh Deshpande 
1187b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
1188b2a8658eSUmesh Deshpande {
1189b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
1190b2a8658eSUmesh Deshpande }
1191b2a8658eSUmesh Deshpande 
1192e1e84ba0SMarkus Armbruster #ifdef __linux__
119304b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
119404b16653SAlex Williamson                             ram_addr_t memory,
11957f56e740SPaolo Bonzini                             const char *path,
11967f56e740SPaolo Bonzini                             Error **errp)
1197c902760fSMarcelo Tosatti {
1198fd97fd44SMarkus Armbruster     bool unlink_on_error = false;
1199c902760fSMarcelo Tosatti     char *filename;
12008ca761f6SPeter Feiner     char *sanitized_name;
12018ca761f6SPeter Feiner     char *c;
1202056b68afSIgor Mammedov     void *area = MAP_FAILED;
12035c3ece79SPaolo Bonzini     int fd = -1;
1204c902760fSMarcelo Tosatti 
1205c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
12067f56e740SPaolo Bonzini         error_setg(errp,
12077f56e740SPaolo Bonzini                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1208fd97fd44SMarkus Armbruster         return NULL;
1209c902760fSMarcelo Tosatti     }
1210c902760fSMarcelo Tosatti 
1211fd97fd44SMarkus Armbruster     for (;;) {
1212fd97fd44SMarkus Armbruster         fd = open(path, O_RDWR);
1213fd97fd44SMarkus Armbruster         if (fd >= 0) {
1214fd97fd44SMarkus Armbruster             /* @path names an existing file, use it */
1215fd97fd44SMarkus Armbruster             break;
1216fd97fd44SMarkus Armbruster         }
1217fd97fd44SMarkus Armbruster         if (errno == ENOENT) {
1218fd97fd44SMarkus Armbruster             /* @path names a file that doesn't exist, create it */
1219fd97fd44SMarkus Armbruster             fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1220fd97fd44SMarkus Armbruster             if (fd >= 0) {
1221fd97fd44SMarkus Armbruster                 unlink_on_error = true;
1222fd97fd44SMarkus Armbruster                 break;
1223fd97fd44SMarkus Armbruster             }
1224fd97fd44SMarkus Armbruster         } else if (errno == EISDIR) {
1225fd97fd44SMarkus Armbruster             /* @path names a directory, create a file there */
12268ca761f6SPeter Feiner             /* Make name safe to use with mkstemp by replacing '/' with '_'. */
122783234bf2SPeter Crosthwaite             sanitized_name = g_strdup(memory_region_name(block->mr));
12288ca761f6SPeter Feiner             for (c = sanitized_name; *c != '\0'; c++) {
12298d31d6b6SPavel Fedin                 if (*c == '/') {
12308ca761f6SPeter Feiner                     *c = '_';
12318ca761f6SPeter Feiner                 }
12328d31d6b6SPavel Fedin             }
12338ca761f6SPeter Feiner 
12348ca761f6SPeter Feiner             filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
12358ca761f6SPeter Feiner                                        sanitized_name);
12368ca761f6SPeter Feiner             g_free(sanitized_name);
1237c902760fSMarcelo Tosatti 
1238c902760fSMarcelo Tosatti             fd = mkstemp(filename);
12398d31d6b6SPavel Fedin             if (fd >= 0) {
12408d31d6b6SPavel Fedin                 unlink(filename);
1241fd97fd44SMarkus Armbruster                 g_free(filename);
1242fd97fd44SMarkus Armbruster                 break;
12438d31d6b6SPavel Fedin             }
12448d31d6b6SPavel Fedin             g_free(filename);
1245fd97fd44SMarkus Armbruster         }
1246fd97fd44SMarkus Armbruster         if (errno != EEXIST && errno != EINTR) {
1247fd97fd44SMarkus Armbruster             error_setg_errno(errp, errno,
1248fd97fd44SMarkus Armbruster                              "can't open backing store %s for guest RAM",
1249fd97fd44SMarkus Armbruster                              path);
1250fd97fd44SMarkus Armbruster             goto error;
1251fd97fd44SMarkus Armbruster         }
1252fd97fd44SMarkus Armbruster         /*
1253fd97fd44SMarkus Armbruster          * Try again on EINTR and EEXIST.  The latter happens when
1254fd97fd44SMarkus Armbruster          * something else creates the file between our two open().
1255fd97fd44SMarkus Armbruster          */
12568d31d6b6SPavel Fedin     }
12578d31d6b6SPavel Fedin 
1258863e9621SDr. David Alan Gilbert     block->page_size = qemu_fd_getpagesize(fd);
12598360668eSHaozhong Zhang     block->mr->align = block->page_size;
12608360668eSHaozhong Zhang #if defined(__s390x__)
12618360668eSHaozhong Zhang     if (kvm_enabled()) {
12628360668eSHaozhong Zhang         block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
12638360668eSHaozhong Zhang     }
12648360668eSHaozhong Zhang #endif
1265fd97fd44SMarkus Armbruster 
1266863e9621SDr. David Alan Gilbert     if (memory < block->page_size) {
1267fd97fd44SMarkus Armbruster         error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1268863e9621SDr. David Alan Gilbert                    "or larger than page size 0x%zx",
1269863e9621SDr. David Alan Gilbert                    memory, block->page_size);
1270f9a49dfaSMarcelo Tosatti         goto error;
1271c902760fSMarcelo Tosatti     }
1272c902760fSMarcelo Tosatti 
1273863e9621SDr. David Alan Gilbert     memory = ROUND_UP(memory, block->page_size);
1274c902760fSMarcelo Tosatti 
1275c902760fSMarcelo Tosatti     /*
1276c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1277c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1278c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1279c902760fSMarcelo Tosatti      * mmap will fail.
1280c902760fSMarcelo Tosatti      */
12817f56e740SPaolo Bonzini     if (ftruncate(fd, memory)) {
1282c902760fSMarcelo Tosatti         perror("ftruncate");
12837f56e740SPaolo Bonzini     }
1284c902760fSMarcelo Tosatti 
1285d2f39addSDominik Dingel     area = qemu_ram_mmap(fd, memory, block->mr->align,
1286d2f39addSDominik Dingel                          block->flags & RAM_SHARED);
1287c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
12887f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
1289fd97fd44SMarkus Armbruster                          "unable to map backing store for guest RAM");
1290f9a49dfaSMarcelo Tosatti         goto error;
1291c902760fSMarcelo Tosatti     }
1292ef36fa14SMarcelo Tosatti 
1293ef36fa14SMarcelo Tosatti     if (mem_prealloc) {
1294056b68afSIgor Mammedov         os_mem_prealloc(fd, area, memory, errp);
1295056b68afSIgor Mammedov         if (errp && *errp) {
1296056b68afSIgor Mammedov             goto error;
1297056b68afSIgor Mammedov         }
1298ef36fa14SMarcelo Tosatti     }
1299ef36fa14SMarcelo Tosatti 
130004b16653SAlex Williamson     block->fd = fd;
1301c902760fSMarcelo Tosatti     return area;
1302f9a49dfaSMarcelo Tosatti 
1303f9a49dfaSMarcelo Tosatti error:
1304056b68afSIgor Mammedov     if (area != MAP_FAILED) {
1305056b68afSIgor Mammedov         qemu_ram_munmap(area, memory);
1306056b68afSIgor Mammedov     }
1307fd97fd44SMarkus Armbruster     if (unlink_on_error) {
1308fd97fd44SMarkus Armbruster         unlink(path);
1309fd97fd44SMarkus Armbruster     }
13105c3ece79SPaolo Bonzini     if (fd != -1) {
1311fd97fd44SMarkus Armbruster         close(fd);
13125c3ece79SPaolo Bonzini     }
1313f9a49dfaSMarcelo Tosatti     return NULL;
1314c902760fSMarcelo Tosatti }
1315c902760fSMarcelo Tosatti #endif
1316c902760fSMarcelo Tosatti 
13170dc3f44aSMike Day /* Called with the ramlist lock held.  */
1318d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1319d17b5288SAlex Williamson {
132004b16653SAlex Williamson     RAMBlock *block, *next_block;
13213e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
132204b16653SAlex Williamson 
132349cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
132449cd9ac6SStefan Hajnoczi 
13250dc3f44aSMike Day     if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
132604b16653SAlex Williamson         return 0;
13270d53d9feSMike Day     }
132804b16653SAlex Williamson 
13290dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1330f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
133104b16653SAlex Williamson 
133262be4e3aSMichael S. Tsirkin         end = block->offset + block->max_length;
133304b16653SAlex Williamson 
13340dc3f44aSMike Day         QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
133504b16653SAlex Williamson             if (next_block->offset >= end) {
133604b16653SAlex Williamson                 next = MIN(next, next_block->offset);
133704b16653SAlex Williamson             }
133804b16653SAlex Williamson         }
133904b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
134004b16653SAlex Williamson             offset = end;
134104b16653SAlex Williamson             mingap = next - end;
134204b16653SAlex Williamson         }
134304b16653SAlex Williamson     }
13443e837b2cSAlex Williamson 
13453e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
13463e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
13473e837b2cSAlex Williamson                 (uint64_t)size);
13483e837b2cSAlex Williamson         abort();
13493e837b2cSAlex Williamson     }
13503e837b2cSAlex Williamson 
135104b16653SAlex Williamson     return offset;
135204b16653SAlex Williamson }
135304b16653SAlex Williamson 
1354652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
135504b16653SAlex Williamson {
1356d17b5288SAlex Williamson     RAMBlock *block;
1357d17b5288SAlex Williamson     ram_addr_t last = 0;
1358d17b5288SAlex Williamson 
13590dc3f44aSMike Day     rcu_read_lock();
13600dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
136162be4e3aSMichael S. Tsirkin         last = MAX(last, block->offset + block->max_length);
13620d53d9feSMike Day     }
13630dc3f44aSMike Day     rcu_read_unlock();
1364d17b5288SAlex Williamson     return last;
1365d17b5288SAlex Williamson }
1366d17b5288SAlex Williamson 
1367ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1368ddb97f1dSJason Baron {
1369ddb97f1dSJason Baron     int ret;
1370ddb97f1dSJason Baron 
1371ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
137247c8ca53SMarcel Apfelbaum     if (!machine_dump_guest_core(current_machine)) {
1373ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1374ddb97f1dSJason Baron         if (ret) {
1375ddb97f1dSJason Baron             perror("qemu_madvise");
1376ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1377ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1378ddb97f1dSJason Baron         }
1379ddb97f1dSJason Baron     }
1380ddb97f1dSJason Baron }
1381ddb97f1dSJason Baron 
1382422148d3SDr. David Alan Gilbert const char *qemu_ram_get_idstr(RAMBlock *rb)
1383422148d3SDr. David Alan Gilbert {
1384422148d3SDr. David Alan Gilbert     return rb->idstr;
1385422148d3SDr. David Alan Gilbert }
1386422148d3SDr. David Alan Gilbert 
1387ae3a7047SMike Day /* Called with iothread lock held.  */
1388fa53a0e5SGonglei void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
138920cfe881SHu Tao {
1390fa53a0e5SGonglei     RAMBlock *block;
139120cfe881SHu Tao 
1392c5705a77SAvi Kivity     assert(new_block);
1393c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
139484b89d78SCam Macdonell 
139509e5ab63SAnthony Liguori     if (dev) {
139609e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
139784b89d78SCam Macdonell         if (id) {
139884b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
13997267c094SAnthony Liguori             g_free(id);
140084b89d78SCam Macdonell         }
140184b89d78SCam Macdonell     }
140284b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
140384b89d78SCam Macdonell 
1404ab0a9956SGonglei     rcu_read_lock();
14050dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1406fa53a0e5SGonglei         if (block != new_block &&
1407fa53a0e5SGonglei             !strcmp(block->idstr, new_block->idstr)) {
140884b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
140984b89d78SCam Macdonell                     new_block->idstr);
141084b89d78SCam Macdonell             abort();
141184b89d78SCam Macdonell         }
141284b89d78SCam Macdonell     }
14130dc3f44aSMike Day     rcu_read_unlock();
1414c5705a77SAvi Kivity }
1415c5705a77SAvi Kivity 
1416ae3a7047SMike Day /* Called with iothread lock held.  */
1417fa53a0e5SGonglei void qemu_ram_unset_idstr(RAMBlock *block)
141820cfe881SHu Tao {
1419ae3a7047SMike Day     /* FIXME: arch_init.c assumes that this is not called throughout
1420ae3a7047SMike Day      * migration.  Ignore the problem since hot-unplug during migration
1421ae3a7047SMike Day      * does not work anyway.
1422ae3a7047SMike Day      */
142320cfe881SHu Tao     if (block) {
142420cfe881SHu Tao         memset(block->idstr, 0, sizeof(block->idstr));
142520cfe881SHu Tao     }
142620cfe881SHu Tao }
142720cfe881SHu Tao 
1428863e9621SDr. David Alan Gilbert size_t qemu_ram_pagesize(RAMBlock *rb)
1429863e9621SDr. David Alan Gilbert {
1430863e9621SDr. David Alan Gilbert     return rb->page_size;
1431863e9621SDr. David Alan Gilbert }
1432863e9621SDr. David Alan Gilbert 
14338490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
14348490fc78SLuiz Capitulino {
143575cc7f01SMarcel Apfelbaum     if (!machine_mem_merge(current_machine)) {
14368490fc78SLuiz Capitulino         /* disabled by the user */
14378490fc78SLuiz Capitulino         return 0;
14388490fc78SLuiz Capitulino     }
14398490fc78SLuiz Capitulino 
14408490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
14418490fc78SLuiz Capitulino }
14428490fc78SLuiz Capitulino 
144362be4e3aSMichael S. Tsirkin /* Only legal before guest might have detected the memory size: e.g. on
144462be4e3aSMichael S. Tsirkin  * incoming migration, or right after reset.
144562be4e3aSMichael S. Tsirkin  *
144662be4e3aSMichael S. Tsirkin  * As memory core doesn't know how is memory accessed, it is up to
144762be4e3aSMichael S. Tsirkin  * resize callback to update device state and/or add assertions to detect
144862be4e3aSMichael S. Tsirkin  * misuse, if necessary.
144962be4e3aSMichael S. Tsirkin  */
1450fa53a0e5SGonglei int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
145162be4e3aSMichael S. Tsirkin {
145262be4e3aSMichael S. Tsirkin     assert(block);
145362be4e3aSMichael S. Tsirkin 
14544ed023ceSDr. David Alan Gilbert     newsize = HOST_PAGE_ALIGN(newsize);
1455129ddaf3SMichael S. Tsirkin 
145662be4e3aSMichael S. Tsirkin     if (block->used_length == newsize) {
145762be4e3aSMichael S. Tsirkin         return 0;
145862be4e3aSMichael S. Tsirkin     }
145962be4e3aSMichael S. Tsirkin 
146062be4e3aSMichael S. Tsirkin     if (!(block->flags & RAM_RESIZEABLE)) {
146162be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
146262be4e3aSMichael S. Tsirkin                          "Length mismatch: %s: 0x" RAM_ADDR_FMT
146362be4e3aSMichael S. Tsirkin                          " in != 0x" RAM_ADDR_FMT, block->idstr,
146462be4e3aSMichael S. Tsirkin                          newsize, block->used_length);
146562be4e3aSMichael S. Tsirkin         return -EINVAL;
146662be4e3aSMichael S. Tsirkin     }
146762be4e3aSMichael S. Tsirkin 
146862be4e3aSMichael S. Tsirkin     if (block->max_length < newsize) {
146962be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
147062be4e3aSMichael S. Tsirkin                          "Length too large: %s: 0x" RAM_ADDR_FMT
147162be4e3aSMichael S. Tsirkin                          " > 0x" RAM_ADDR_FMT, block->idstr,
147262be4e3aSMichael S. Tsirkin                          newsize, block->max_length);
147362be4e3aSMichael S. Tsirkin         return -EINVAL;
147462be4e3aSMichael S. Tsirkin     }
147562be4e3aSMichael S. Tsirkin 
147662be4e3aSMichael S. Tsirkin     cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
147762be4e3aSMichael S. Tsirkin     block->used_length = newsize;
147858d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
147958d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
148062be4e3aSMichael S. Tsirkin     memory_region_set_size(block->mr, newsize);
148162be4e3aSMichael S. Tsirkin     if (block->resized) {
148262be4e3aSMichael S. Tsirkin         block->resized(block->idstr, newsize, block->host);
148362be4e3aSMichael S. Tsirkin     }
148462be4e3aSMichael S. Tsirkin     return 0;
148562be4e3aSMichael S. Tsirkin }
148662be4e3aSMichael S. Tsirkin 
14875b82b703SStefan Hajnoczi /* Called with ram_list.mutex held */
14885b82b703SStefan Hajnoczi static void dirty_memory_extend(ram_addr_t old_ram_size,
14895b82b703SStefan Hajnoczi                                 ram_addr_t new_ram_size)
14905b82b703SStefan Hajnoczi {
14915b82b703SStefan Hajnoczi     ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
14925b82b703SStefan Hajnoczi                                              DIRTY_MEMORY_BLOCK_SIZE);
14935b82b703SStefan Hajnoczi     ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
14945b82b703SStefan Hajnoczi                                              DIRTY_MEMORY_BLOCK_SIZE);
14955b82b703SStefan Hajnoczi     int i;
14965b82b703SStefan Hajnoczi 
14975b82b703SStefan Hajnoczi     /* Only need to extend if block count increased */
14985b82b703SStefan Hajnoczi     if (new_num_blocks <= old_num_blocks) {
14995b82b703SStefan Hajnoczi         return;
15005b82b703SStefan Hajnoczi     }
15015b82b703SStefan Hajnoczi 
15025b82b703SStefan Hajnoczi     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
15035b82b703SStefan Hajnoczi         DirtyMemoryBlocks *old_blocks;
15045b82b703SStefan Hajnoczi         DirtyMemoryBlocks *new_blocks;
15055b82b703SStefan Hajnoczi         int j;
15065b82b703SStefan Hajnoczi 
15075b82b703SStefan Hajnoczi         old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
15085b82b703SStefan Hajnoczi         new_blocks = g_malloc(sizeof(*new_blocks) +
15095b82b703SStefan Hajnoczi                               sizeof(new_blocks->blocks[0]) * new_num_blocks);
15105b82b703SStefan Hajnoczi 
15115b82b703SStefan Hajnoczi         if (old_num_blocks) {
15125b82b703SStefan Hajnoczi             memcpy(new_blocks->blocks, old_blocks->blocks,
15135b82b703SStefan Hajnoczi                    old_num_blocks * sizeof(old_blocks->blocks[0]));
15145b82b703SStefan Hajnoczi         }
15155b82b703SStefan Hajnoczi 
15165b82b703SStefan Hajnoczi         for (j = old_num_blocks; j < new_num_blocks; j++) {
15175b82b703SStefan Hajnoczi             new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
15185b82b703SStefan Hajnoczi         }
15195b82b703SStefan Hajnoczi 
15205b82b703SStefan Hajnoczi         atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
15215b82b703SStefan Hajnoczi 
15225b82b703SStefan Hajnoczi         if (old_blocks) {
15235b82b703SStefan Hajnoczi             g_free_rcu(old_blocks, rcu);
15245b82b703SStefan Hajnoczi         }
15255b82b703SStefan Hajnoczi     }
15265b82b703SStefan Hajnoczi }
15275b82b703SStefan Hajnoczi 
1528528f46afSFam Zheng static void ram_block_add(RAMBlock *new_block, Error **errp)
1529c5705a77SAvi Kivity {
1530e1c57ab8SPaolo Bonzini     RAMBlock *block;
15310d53d9feSMike Day     RAMBlock *last_block = NULL;
15322152f5caSJuan Quintela     ram_addr_t old_ram_size, new_ram_size;
153337aa7a0eSMarkus Armbruster     Error *err = NULL;
15342152f5caSJuan Quintela 
15352152f5caSJuan Quintela     old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1536c5705a77SAvi Kivity 
1537b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
15389b8424d5SMichael S. Tsirkin     new_block->offset = find_ram_offset(new_block->max_length);
1539e1c57ab8SPaolo Bonzini 
15400628c182SMarkus Armbruster     if (!new_block->host) {
1541e1c57ab8SPaolo Bonzini         if (xen_enabled()) {
15429b8424d5SMichael S. Tsirkin             xen_ram_alloc(new_block->offset, new_block->max_length,
154337aa7a0eSMarkus Armbruster                           new_block->mr, &err);
154437aa7a0eSMarkus Armbruster             if (err) {
154537aa7a0eSMarkus Armbruster                 error_propagate(errp, err);
154637aa7a0eSMarkus Armbruster                 qemu_mutex_unlock_ramlist();
154739c350eeSPaolo Bonzini                 return;
154837aa7a0eSMarkus Armbruster             }
1549e1c57ab8SPaolo Bonzini         } else {
15509b8424d5SMichael S. Tsirkin             new_block->host = phys_mem_alloc(new_block->max_length,
1551a2b257d6SIgor Mammedov                                              &new_block->mr->align);
155239228250SMarkus Armbruster             if (!new_block->host) {
1553ef701d7bSHu Tao                 error_setg_errno(errp, errno,
1554ef701d7bSHu Tao                                  "cannot set up guest memory '%s'",
1555ef701d7bSHu Tao                                  memory_region_name(new_block->mr));
1556ef701d7bSHu Tao                 qemu_mutex_unlock_ramlist();
155739c350eeSPaolo Bonzini                 return;
155839228250SMarkus Armbruster             }
15599b8424d5SMichael S. Tsirkin             memory_try_enable_merging(new_block->host, new_block->max_length);
1560c902760fSMarcelo Tosatti         }
15616977dfe6SYoshiaki Tamura     }
156294a6b54fSpbrook 
1563dd631697SLi Zhijian     new_ram_size = MAX(old_ram_size,
1564dd631697SLi Zhijian               (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1565dd631697SLi Zhijian     if (new_ram_size > old_ram_size) {
1566dd631697SLi Zhijian         migration_bitmap_extend(old_ram_size, new_ram_size);
15675b82b703SStefan Hajnoczi         dirty_memory_extend(old_ram_size, new_ram_size);
1568dd631697SLi Zhijian     }
15690d53d9feSMike Day     /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
15700d53d9feSMike Day      * QLIST (which has an RCU-friendly variant) does not have insertion at
15710d53d9feSMike Day      * tail, so save the last element in last_block.
15720d53d9feSMike Day      */
15730dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
15740d53d9feSMike Day         last_block = block;
15759b8424d5SMichael S. Tsirkin         if (block->max_length < new_block->max_length) {
1576abb26d63SPaolo Bonzini             break;
1577abb26d63SPaolo Bonzini         }
1578abb26d63SPaolo Bonzini     }
1579abb26d63SPaolo Bonzini     if (block) {
15800dc3f44aSMike Day         QLIST_INSERT_BEFORE_RCU(block, new_block, next);
15810d53d9feSMike Day     } else if (last_block) {
15820dc3f44aSMike Day         QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
15830d53d9feSMike Day     } else { /* list is empty */
15840dc3f44aSMike Day         QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1585abb26d63SPaolo Bonzini     }
15860d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
158794a6b54fSpbrook 
15880dc3f44aSMike Day     /* Write list before version */
15890dc3f44aSMike Day     smp_wmb();
1590f798b07fSUmesh Deshpande     ram_list.version++;
1591b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1592f798b07fSUmesh Deshpande 
15939b8424d5SMichael S. Tsirkin     cpu_physical_memory_set_dirty_range(new_block->offset,
159458d2707eSPaolo Bonzini                                         new_block->used_length,
159558d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
159694a6b54fSpbrook 
1597a904c911SPaolo Bonzini     if (new_block->host) {
15989b8424d5SMichael S. Tsirkin         qemu_ram_setup_dump(new_block->host, new_block->max_length);
15999b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1600c2cd627dSCao jin         /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
16019b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1602a904c911SPaolo Bonzini     }
160394a6b54fSpbrook }
1604e9a1ab19Sbellard 
16050b183fc8SPaolo Bonzini #ifdef __linux__
1606528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1607dbcb8981SPaolo Bonzini                                    bool share, const char *mem_path,
16087f56e740SPaolo Bonzini                                    Error **errp)
1609e1c57ab8SPaolo Bonzini {
1610e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1611ef701d7bSHu Tao     Error *local_err = NULL;
1612e1c57ab8SPaolo Bonzini 
1613e1c57ab8SPaolo Bonzini     if (xen_enabled()) {
16147f56e740SPaolo Bonzini         error_setg(errp, "-mem-path not supported with Xen");
1615528f46afSFam Zheng         return NULL;
1616e1c57ab8SPaolo Bonzini     }
1617e1c57ab8SPaolo Bonzini 
1618e1c57ab8SPaolo Bonzini     if (phys_mem_alloc != qemu_anon_ram_alloc) {
1619e1c57ab8SPaolo Bonzini         /*
1620e1c57ab8SPaolo Bonzini          * file_ram_alloc() needs to allocate just like
1621e1c57ab8SPaolo Bonzini          * phys_mem_alloc, but we haven't bothered to provide
1622e1c57ab8SPaolo Bonzini          * a hook there.
1623e1c57ab8SPaolo Bonzini          */
16247f56e740SPaolo Bonzini         error_setg(errp,
16257f56e740SPaolo Bonzini                    "-mem-path not supported with this accelerator");
1626528f46afSFam Zheng         return NULL;
1627e1c57ab8SPaolo Bonzini     }
1628e1c57ab8SPaolo Bonzini 
16294ed023ceSDr. David Alan Gilbert     size = HOST_PAGE_ALIGN(size);
1630e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1631e1c57ab8SPaolo Bonzini     new_block->mr = mr;
16329b8424d5SMichael S. Tsirkin     new_block->used_length = size;
16339b8424d5SMichael S. Tsirkin     new_block->max_length = size;
1634dbcb8981SPaolo Bonzini     new_block->flags = share ? RAM_SHARED : 0;
16357f56e740SPaolo Bonzini     new_block->host = file_ram_alloc(new_block, size,
16367f56e740SPaolo Bonzini                                      mem_path, errp);
16377f56e740SPaolo Bonzini     if (!new_block->host) {
16387f56e740SPaolo Bonzini         g_free(new_block);
1639528f46afSFam Zheng         return NULL;
16407f56e740SPaolo Bonzini     }
16417f56e740SPaolo Bonzini 
1642528f46afSFam Zheng     ram_block_add(new_block, &local_err);
1643ef701d7bSHu Tao     if (local_err) {
1644ef701d7bSHu Tao         g_free(new_block);
1645ef701d7bSHu Tao         error_propagate(errp, local_err);
1646528f46afSFam Zheng         return NULL;
1647ef701d7bSHu Tao     }
1648528f46afSFam Zheng     return new_block;
1649e1c57ab8SPaolo Bonzini }
16500b183fc8SPaolo Bonzini #endif
1651e1c57ab8SPaolo Bonzini 
165262be4e3aSMichael S. Tsirkin static
1653528f46afSFam Zheng RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
165462be4e3aSMichael S. Tsirkin                                   void (*resized)(const char*,
165562be4e3aSMichael S. Tsirkin                                                   uint64_t length,
165662be4e3aSMichael S. Tsirkin                                                   void *host),
165762be4e3aSMichael S. Tsirkin                                   void *host, bool resizeable,
1658ef701d7bSHu Tao                                   MemoryRegion *mr, Error **errp)
1659e1c57ab8SPaolo Bonzini {
1660e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1661ef701d7bSHu Tao     Error *local_err = NULL;
1662e1c57ab8SPaolo Bonzini 
16634ed023ceSDr. David Alan Gilbert     size = HOST_PAGE_ALIGN(size);
16644ed023ceSDr. David Alan Gilbert     max_size = HOST_PAGE_ALIGN(max_size);
1665e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1666e1c57ab8SPaolo Bonzini     new_block->mr = mr;
166762be4e3aSMichael S. Tsirkin     new_block->resized = resized;
16689b8424d5SMichael S. Tsirkin     new_block->used_length = size;
16699b8424d5SMichael S. Tsirkin     new_block->max_length = max_size;
167062be4e3aSMichael S. Tsirkin     assert(max_size >= size);
1671e1c57ab8SPaolo Bonzini     new_block->fd = -1;
1672863e9621SDr. David Alan Gilbert     new_block->page_size = getpagesize();
1673e1c57ab8SPaolo Bonzini     new_block->host = host;
1674e1c57ab8SPaolo Bonzini     if (host) {
16757bd4f430SPaolo Bonzini         new_block->flags |= RAM_PREALLOC;
1676e1c57ab8SPaolo Bonzini     }
167762be4e3aSMichael S. Tsirkin     if (resizeable) {
167862be4e3aSMichael S. Tsirkin         new_block->flags |= RAM_RESIZEABLE;
167962be4e3aSMichael S. Tsirkin     }
1680528f46afSFam Zheng     ram_block_add(new_block, &local_err);
1681ef701d7bSHu Tao     if (local_err) {
1682ef701d7bSHu Tao         g_free(new_block);
1683ef701d7bSHu Tao         error_propagate(errp, local_err);
1684528f46afSFam Zheng         return NULL;
1685ef701d7bSHu Tao     }
1686528f46afSFam Zheng     return new_block;
1687e1c57ab8SPaolo Bonzini }
1688e1c57ab8SPaolo Bonzini 
1689528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
169062be4e3aSMichael S. Tsirkin                                    MemoryRegion *mr, Error **errp)
169162be4e3aSMichael S. Tsirkin {
169262be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
169362be4e3aSMichael S. Tsirkin }
169462be4e3aSMichael S. Tsirkin 
1695528f46afSFam Zheng RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
16966977dfe6SYoshiaki Tamura {
169762be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
169862be4e3aSMichael S. Tsirkin }
169962be4e3aSMichael S. Tsirkin 
1700528f46afSFam Zheng RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
170162be4e3aSMichael S. Tsirkin                                      void (*resized)(const char*,
170262be4e3aSMichael S. Tsirkin                                                      uint64_t length,
170362be4e3aSMichael S. Tsirkin                                                      void *host),
170462be4e3aSMichael S. Tsirkin                                      MemoryRegion *mr, Error **errp)
170562be4e3aSMichael S. Tsirkin {
170662be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
17076977dfe6SYoshiaki Tamura }
17086977dfe6SYoshiaki Tamura 
170943771539SPaolo Bonzini static void reclaim_ramblock(RAMBlock *block)
1710e9a1ab19Sbellard {
17117bd4f430SPaolo Bonzini     if (block->flags & RAM_PREALLOC) {
1712cd19cfa2SHuang Ying         ;
1713dfeaf2abSMarkus Armbruster     } else if (xen_enabled()) {
1714dfeaf2abSMarkus Armbruster         xen_invalidate_map_cache_entry(block->host);
1715089f3f76SStefan Weil #ifndef _WIN32
17163435f395SMarkus Armbruster     } else if (block->fd >= 0) {
1717794e8f30SMichael S. Tsirkin         qemu_ram_munmap(block->host, block->max_length);
171804b16653SAlex Williamson         close(block->fd);
1719089f3f76SStefan Weil #endif
172004b16653SAlex Williamson     } else {
17219b8424d5SMichael S. Tsirkin         qemu_anon_ram_free(block->host, block->max_length);
172204b16653SAlex Williamson     }
17237267c094SAnthony Liguori     g_free(block);
172443771539SPaolo Bonzini }
172543771539SPaolo Bonzini 
1726f1060c55SFam Zheng void qemu_ram_free(RAMBlock *block)
172743771539SPaolo Bonzini {
172885bc2a15SMarc-André Lureau     if (!block) {
172985bc2a15SMarc-André Lureau         return;
173085bc2a15SMarc-André Lureau     }
173185bc2a15SMarc-André Lureau 
173243771539SPaolo Bonzini     qemu_mutex_lock_ramlist();
17330dc3f44aSMike Day     QLIST_REMOVE_RCU(block, next);
173443771539SPaolo Bonzini     ram_list.mru_block = NULL;
17350dc3f44aSMike Day     /* Write list before version */
17360dc3f44aSMike Day     smp_wmb();
173743771539SPaolo Bonzini     ram_list.version++;
173843771539SPaolo Bonzini     call_rcu(block, reclaim_ramblock, rcu);
1739b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1740e9a1ab19Sbellard }
1741e9a1ab19Sbellard 
1742cd19cfa2SHuang Ying #ifndef _WIN32
1743cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1744cd19cfa2SHuang Ying {
1745cd19cfa2SHuang Ying     RAMBlock *block;
1746cd19cfa2SHuang Ying     ram_addr_t offset;
1747cd19cfa2SHuang Ying     int flags;
1748cd19cfa2SHuang Ying     void *area, *vaddr;
1749cd19cfa2SHuang Ying 
17500dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1751cd19cfa2SHuang Ying         offset = addr - block->offset;
17529b8424d5SMichael S. Tsirkin         if (offset < block->max_length) {
17531240be24SMichael S. Tsirkin             vaddr = ramblock_ptr(block, offset);
17547bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1755cd19cfa2SHuang Ying                 ;
1756dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1757dfeaf2abSMarkus Armbruster                 abort();
1758cd19cfa2SHuang Ying             } else {
1759cd19cfa2SHuang Ying                 flags = MAP_FIXED;
17603435f395SMarkus Armbruster                 if (block->fd >= 0) {
1761dbcb8981SPaolo Bonzini                     flags |= (block->flags & RAM_SHARED ?
1762dbcb8981SPaolo Bonzini                               MAP_SHARED : MAP_PRIVATE);
1763cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1764cd19cfa2SHuang Ying                                 flags, block->fd, offset);
1765cd19cfa2SHuang Ying                 } else {
17662eb9fbaaSMarkus Armbruster                     /*
17672eb9fbaaSMarkus Armbruster                      * Remap needs to match alloc.  Accelerators that
17682eb9fbaaSMarkus Armbruster                      * set phys_mem_alloc never remap.  If they did,
17692eb9fbaaSMarkus Armbruster                      * we'd need a remap hook here.
17702eb9fbaaSMarkus Armbruster                      */
17712eb9fbaaSMarkus Armbruster                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
17722eb9fbaaSMarkus Armbruster 
1773cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1774cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1775cd19cfa2SHuang Ying                                 flags, -1, 0);
1776cd19cfa2SHuang Ying                 }
1777cd19cfa2SHuang Ying                 if (area != vaddr) {
1778f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1779f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1780cd19cfa2SHuang Ying                             length, addr);
1781cd19cfa2SHuang Ying                     exit(1);
1782cd19cfa2SHuang Ying                 }
17838490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1784ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1785cd19cfa2SHuang Ying             }
1786cd19cfa2SHuang Ying         }
1787cd19cfa2SHuang Ying     }
1788cd19cfa2SHuang Ying }
1789cd19cfa2SHuang Ying #endif /* !_WIN32 */
1790cd19cfa2SHuang Ying 
17911b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.
1792ae3a7047SMike Day  * This should not be used for general purpose DMA.  Use address_space_map
1793ae3a7047SMike Day  * or address_space_rw instead. For local memory (e.g. video ram) that the
1794ae3a7047SMike Day  * device owns, use memory_region_get_ram_ptr.
17950dc3f44aSMike Day  *
179649b24afcSPaolo Bonzini  * Called within RCU critical section.
17971b5ec234SPaolo Bonzini  */
17980878d0e1SPaolo Bonzini void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
17991b5ec234SPaolo Bonzini {
18003655cb9cSGonglei     RAMBlock *block = ram_block;
18013655cb9cSGonglei 
18023655cb9cSGonglei     if (block == NULL) {
18033655cb9cSGonglei         block = qemu_get_ram_block(addr);
18040878d0e1SPaolo Bonzini         addr -= block->offset;
18053655cb9cSGonglei     }
1806ae3a7047SMike Day 
1807ae3a7047SMike Day     if (xen_enabled() && block->host == NULL) {
1808432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1809432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1810712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1811432d268cSJun Nakajima          */
1812432d268cSJun Nakajima         if (block->offset == 0) {
181349b24afcSPaolo Bonzini             return xen_map_cache(addr, 0, 0);
1814432d268cSJun Nakajima         }
1815ae3a7047SMike Day 
1816ae3a7047SMike Day         block->host = xen_map_cache(block->offset, block->max_length, 1);
1817432d268cSJun Nakajima     }
18180878d0e1SPaolo Bonzini     return ramblock_ptr(block, addr);
181994a6b54fSpbrook }
1820f471a17eSAlex Williamson 
18210878d0e1SPaolo Bonzini /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1822ae3a7047SMike Day  * but takes a size argument.
18230dc3f44aSMike Day  *
1824e81bcda5SPaolo Bonzini  * Called within RCU critical section.
1825ae3a7047SMike Day  */
18263655cb9cSGonglei static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
18273655cb9cSGonglei                                  hwaddr *size)
182838bee5dcSStefano Stabellini {
18293655cb9cSGonglei     RAMBlock *block = ram_block;
18308ab934f9SStefano Stabellini     if (*size == 0) {
18318ab934f9SStefano Stabellini         return NULL;
18328ab934f9SStefano Stabellini     }
1833e81bcda5SPaolo Bonzini 
18343655cb9cSGonglei     if (block == NULL) {
1835e81bcda5SPaolo Bonzini         block = qemu_get_ram_block(addr);
18360878d0e1SPaolo Bonzini         addr -= block->offset;
18373655cb9cSGonglei     }
18380878d0e1SPaolo Bonzini     *size = MIN(*size, block->max_length - addr);
1839e81bcda5SPaolo Bonzini 
1840e81bcda5SPaolo Bonzini     if (xen_enabled() && block->host == NULL) {
1841e81bcda5SPaolo Bonzini         /* We need to check if the requested address is in the RAM
1842e81bcda5SPaolo Bonzini          * because we don't want to map the entire memory in QEMU.
1843e81bcda5SPaolo Bonzini          * In that case just map the requested area.
1844e81bcda5SPaolo Bonzini          */
1845e81bcda5SPaolo Bonzini         if (block->offset == 0) {
1846e41d7c69SJan Kiszka             return xen_map_cache(addr, *size, 1);
184738bee5dcSStefano Stabellini         }
184838bee5dcSStefano Stabellini 
1849e81bcda5SPaolo Bonzini         block->host = xen_map_cache(block->offset, block->max_length, 1);
185038bee5dcSStefano Stabellini     }
1851e81bcda5SPaolo Bonzini 
18520878d0e1SPaolo Bonzini     return ramblock_ptr(block, addr);
185338bee5dcSStefano Stabellini }
185438bee5dcSStefano Stabellini 
1855422148d3SDr. David Alan Gilbert /*
1856422148d3SDr. David Alan Gilbert  * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1857422148d3SDr. David Alan Gilbert  * in that RAMBlock.
1858422148d3SDr. David Alan Gilbert  *
1859422148d3SDr. David Alan Gilbert  * ptr: Host pointer to look up
1860422148d3SDr. David Alan Gilbert  * round_offset: If true round the result offset down to a page boundary
1861422148d3SDr. David Alan Gilbert  * *ram_addr: set to result ram_addr
1862422148d3SDr. David Alan Gilbert  * *offset: set to result offset within the RAMBlock
1863422148d3SDr. David Alan Gilbert  *
1864422148d3SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
1865ae3a7047SMike Day  *
1866ae3a7047SMike Day  * By the time this function returns, the returned pointer is not protected
1867ae3a7047SMike Day  * by RCU anymore.  If the caller is not within an RCU critical section and
1868ae3a7047SMike Day  * does not hold the iothread lock, it must have other means of protecting the
1869ae3a7047SMike Day  * pointer, such as a reference to the region that includes the incoming
1870ae3a7047SMike Day  * ram_addr_t.
1871ae3a7047SMike Day  */
1872422148d3SDr. David Alan Gilbert RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1873422148d3SDr. David Alan Gilbert                                    ram_addr_t *offset)
18745579c7f3Spbrook {
187594a6b54fSpbrook     RAMBlock *block;
187694a6b54fSpbrook     uint8_t *host = ptr;
187794a6b54fSpbrook 
1878868bb33fSJan Kiszka     if (xen_enabled()) {
1879f615f396SPaolo Bonzini         ram_addr_t ram_addr;
18800dc3f44aSMike Day         rcu_read_lock();
1881f615f396SPaolo Bonzini         ram_addr = xen_ram_addr_from_mapcache(ptr);
1882f615f396SPaolo Bonzini         block = qemu_get_ram_block(ram_addr);
1883422148d3SDr. David Alan Gilbert         if (block) {
1884d6b6aec4SAnthony PERARD             *offset = ram_addr - block->offset;
1885422148d3SDr. David Alan Gilbert         }
18860dc3f44aSMike Day         rcu_read_unlock();
1887422148d3SDr. David Alan Gilbert         return block;
1888712c2b41SStefano Stabellini     }
1889712c2b41SStefano Stabellini 
18900dc3f44aSMike Day     rcu_read_lock();
18910dc3f44aSMike Day     block = atomic_rcu_read(&ram_list.mru_block);
18929b8424d5SMichael S. Tsirkin     if (block && block->host && host - block->host < block->max_length) {
189323887b79SPaolo Bonzini         goto found;
189423887b79SPaolo Bonzini     }
189523887b79SPaolo Bonzini 
18960dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1897432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1898432d268cSJun Nakajima         if (block->host == NULL) {
1899432d268cSJun Nakajima             continue;
1900432d268cSJun Nakajima         }
19019b8424d5SMichael S. Tsirkin         if (host - block->host < block->max_length) {
190223887b79SPaolo Bonzini             goto found;
190394a6b54fSpbrook         }
1904f471a17eSAlex Williamson     }
1905432d268cSJun Nakajima 
19060dc3f44aSMike Day     rcu_read_unlock();
19071b5ec234SPaolo Bonzini     return NULL;
190823887b79SPaolo Bonzini 
190923887b79SPaolo Bonzini found:
1910422148d3SDr. David Alan Gilbert     *offset = (host - block->host);
1911422148d3SDr. David Alan Gilbert     if (round_offset) {
1912422148d3SDr. David Alan Gilbert         *offset &= TARGET_PAGE_MASK;
1913422148d3SDr. David Alan Gilbert     }
19140dc3f44aSMike Day     rcu_read_unlock();
1915422148d3SDr. David Alan Gilbert     return block;
1916422148d3SDr. David Alan Gilbert }
1917422148d3SDr. David Alan Gilbert 
1918e3dd7493SDr. David Alan Gilbert /*
1919e3dd7493SDr. David Alan Gilbert  * Finds the named RAMBlock
1920e3dd7493SDr. David Alan Gilbert  *
1921e3dd7493SDr. David Alan Gilbert  * name: The name of RAMBlock to find
1922e3dd7493SDr. David Alan Gilbert  *
1923e3dd7493SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
1924e3dd7493SDr. David Alan Gilbert  */
1925e3dd7493SDr. David Alan Gilbert RAMBlock *qemu_ram_block_by_name(const char *name)
1926e3dd7493SDr. David Alan Gilbert {
1927e3dd7493SDr. David Alan Gilbert     RAMBlock *block;
1928e3dd7493SDr. David Alan Gilbert 
1929e3dd7493SDr. David Alan Gilbert     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1930e3dd7493SDr. David Alan Gilbert         if (!strcmp(name, block->idstr)) {
1931e3dd7493SDr. David Alan Gilbert             return block;
1932e3dd7493SDr. David Alan Gilbert         }
1933e3dd7493SDr. David Alan Gilbert     }
1934e3dd7493SDr. David Alan Gilbert 
1935e3dd7493SDr. David Alan Gilbert     return NULL;
1936e3dd7493SDr. David Alan Gilbert }
1937e3dd7493SDr. David Alan Gilbert 
1938422148d3SDr. David Alan Gilbert /* Some of the softmmu routines need to translate from a host pointer
1939422148d3SDr. David Alan Gilbert    (typically a TLB entry) back to a ram offset.  */
194007bdaa41SPaolo Bonzini ram_addr_t qemu_ram_addr_from_host(void *ptr)
1941422148d3SDr. David Alan Gilbert {
1942422148d3SDr. David Alan Gilbert     RAMBlock *block;
1943f615f396SPaolo Bonzini     ram_addr_t offset;
1944422148d3SDr. David Alan Gilbert 
1945f615f396SPaolo Bonzini     block = qemu_ram_block_from_host(ptr, false, &offset);
1946422148d3SDr. David Alan Gilbert     if (!block) {
194707bdaa41SPaolo Bonzini         return RAM_ADDR_INVALID;
1948422148d3SDr. David Alan Gilbert     }
1949422148d3SDr. David Alan Gilbert 
195007bdaa41SPaolo Bonzini     return block->offset + offset;
1951e890261fSMarcelo Tosatti }
1952f471a17eSAlex Williamson 
195349b24afcSPaolo Bonzini /* Called within RCU critical section.  */
1954a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
19550e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
19561ccde1cbSbellard {
195752159192SJuan Quintela     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
19580e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
19593a7d929eSbellard     }
19600e0df1e2SAvi Kivity     switch (size) {
19610e0df1e2SAvi Kivity     case 1:
19620878d0e1SPaolo Bonzini         stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
19630e0df1e2SAvi Kivity         break;
19640e0df1e2SAvi Kivity     case 2:
19650878d0e1SPaolo Bonzini         stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
19660e0df1e2SAvi Kivity         break;
19670e0df1e2SAvi Kivity     case 4:
19680878d0e1SPaolo Bonzini         stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
19690e0df1e2SAvi Kivity         break;
19700e0df1e2SAvi Kivity     default:
19710e0df1e2SAvi Kivity         abort();
19720e0df1e2SAvi Kivity     }
197358d2707eSPaolo Bonzini     /* Set both VGA and migration bits for simplicity and to remove
197458d2707eSPaolo Bonzini      * the notdirty callback faster.
197558d2707eSPaolo Bonzini      */
197658d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(ram_addr, size,
197758d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_NOCODE);
1978f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1979f23db169Sbellard        flushed */
1980a2cd8c85SJuan Quintela     if (!cpu_physical_memory_is_clean(ram_addr)) {
1981bcae01e4SPeter Crosthwaite         tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
19824917cf44SAndreas Färber     }
19831ccde1cbSbellard }
19841ccde1cbSbellard 
1985b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1986b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
1987b018ddf6SPaolo Bonzini {
1988b018ddf6SPaolo Bonzini     return is_write;
1989b018ddf6SPaolo Bonzini }
1990b018ddf6SPaolo Bonzini 
19910e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
19920e0df1e2SAvi Kivity     .write = notdirty_mem_write,
1993b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
19940e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
19951ccde1cbSbellard };
19961ccde1cbSbellard 
19970f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
199866b9b43cSPeter Maydell static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
19990f459d16Spbrook {
200093afeadeSAndreas Färber     CPUState *cpu = current_cpu;
2001568496c0SSergey Fedorov     CPUClass *cc = CPU_GET_CLASS(cpu);
200293afeadeSAndreas Färber     CPUArchState *env = cpu->env_ptr;
200306d55cc1Saliguori     target_ulong pc, cs_base;
20040f459d16Spbrook     target_ulong vaddr;
2005a1d1bb31Saliguori     CPUWatchpoint *wp;
200689fee74aSEmilio G. Cota     uint32_t cpu_flags;
20070f459d16Spbrook 
2008ff4700b0SAndreas Färber     if (cpu->watchpoint_hit) {
200906d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
201006d55cc1Saliguori          * the debug interrupt so that is will trigger after the
201106d55cc1Saliguori          * current instruction. */
201293afeadeSAndreas Färber         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
201306d55cc1Saliguori         return;
201406d55cc1Saliguori     }
201593afeadeSAndreas Färber     vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2016ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
201705068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, len)
201805068c0dSPeter Maydell             && (wp->flags & flags)) {
201908225676SPeter Maydell             if (flags == BP_MEM_READ) {
202008225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_READ;
202108225676SPeter Maydell             } else {
202208225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
202308225676SPeter Maydell             }
202408225676SPeter Maydell             wp->hitaddr = vaddr;
202566b9b43cSPeter Maydell             wp->hitattrs = attrs;
2026ff4700b0SAndreas Färber             if (!cpu->watchpoint_hit) {
2027568496c0SSergey Fedorov                 if (wp->flags & BP_CPU &&
2028568496c0SSergey Fedorov                     !cc->debug_check_watchpoint(cpu, wp)) {
2029568496c0SSergey Fedorov                     wp->flags &= ~BP_WATCHPOINT_HIT;
2030568496c0SSergey Fedorov                     continue;
2031568496c0SSergey Fedorov                 }
2032ff4700b0SAndreas Färber                 cpu->watchpoint_hit = wp;
2033239c51a5SAndreas Färber                 tb_check_watchpoint(cpu);
203406d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
203527103424SAndreas Färber                     cpu->exception_index = EXCP_DEBUG;
20365638d180SAndreas Färber                     cpu_loop_exit(cpu);
203706d55cc1Saliguori                 } else {
203806d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2039648f034cSAndreas Färber                     tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
20406886b980SPeter Maydell                     cpu_loop_exit_noexc(cpu);
20410f459d16Spbrook                 }
2042488d6577SMax Filippov             }
20436e140f28Saliguori         } else {
20446e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
20456e140f28Saliguori         }
20460f459d16Spbrook     }
20470f459d16Spbrook }
20480f459d16Spbrook 
20496658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
20506658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
20516658ffb8Spbrook    phys routines.  */
205266b9b43cSPeter Maydell static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
205366b9b43cSPeter Maydell                                   unsigned size, MemTxAttrs attrs)
20546658ffb8Spbrook {
205566b9b43cSPeter Maydell     MemTxResult res;
205666b9b43cSPeter Maydell     uint64_t data;
205779ed0416SPeter Maydell     int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
205879ed0416SPeter Maydell     AddressSpace *as = current_cpu->cpu_ases[asidx].as;
20596658ffb8Spbrook 
206066b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
20611ec9b909SAvi Kivity     switch (size) {
206267364150SMax Filippov     case 1:
206379ed0416SPeter Maydell         data = address_space_ldub(as, addr, attrs, &res);
206467364150SMax Filippov         break;
206567364150SMax Filippov     case 2:
206679ed0416SPeter Maydell         data = address_space_lduw(as, addr, attrs, &res);
206767364150SMax Filippov         break;
206867364150SMax Filippov     case 4:
206979ed0416SPeter Maydell         data = address_space_ldl(as, addr, attrs, &res);
207067364150SMax Filippov         break;
20711ec9b909SAvi Kivity     default: abort();
20721ec9b909SAvi Kivity     }
207366b9b43cSPeter Maydell     *pdata = data;
207466b9b43cSPeter Maydell     return res;
207566b9b43cSPeter Maydell }
207666b9b43cSPeter Maydell 
207766b9b43cSPeter Maydell static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
207866b9b43cSPeter Maydell                                    uint64_t val, unsigned size,
207966b9b43cSPeter Maydell                                    MemTxAttrs attrs)
208066b9b43cSPeter Maydell {
208166b9b43cSPeter Maydell     MemTxResult res;
208279ed0416SPeter Maydell     int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
208379ed0416SPeter Maydell     AddressSpace *as = current_cpu->cpu_ases[asidx].as;
208466b9b43cSPeter Maydell 
208566b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
208666b9b43cSPeter Maydell     switch (size) {
208766b9b43cSPeter Maydell     case 1:
208879ed0416SPeter Maydell         address_space_stb(as, addr, val, attrs, &res);
208966b9b43cSPeter Maydell         break;
209066b9b43cSPeter Maydell     case 2:
209179ed0416SPeter Maydell         address_space_stw(as, addr, val, attrs, &res);
209266b9b43cSPeter Maydell         break;
209366b9b43cSPeter Maydell     case 4:
209479ed0416SPeter Maydell         address_space_stl(as, addr, val, attrs, &res);
209566b9b43cSPeter Maydell         break;
209666b9b43cSPeter Maydell     default: abort();
209766b9b43cSPeter Maydell     }
209866b9b43cSPeter Maydell     return res;
20996658ffb8Spbrook }
21006658ffb8Spbrook 
21011ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
210266b9b43cSPeter Maydell     .read_with_attrs = watch_mem_read,
210366b9b43cSPeter Maydell     .write_with_attrs = watch_mem_write,
21041ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
21056658ffb8Spbrook };
21066658ffb8Spbrook 
2107f25a49e0SPeter Maydell static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2108f25a49e0SPeter Maydell                                 unsigned len, MemTxAttrs attrs)
2109db7b5426Sblueswir1 {
2110acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2111ff6cff75SPaolo Bonzini     uint8_t buf[8];
21125c9eb028SPeter Maydell     MemTxResult res;
2113791af8c8SPaolo Bonzini 
2114db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2115016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2116acc9d80bSJan Kiszka            subpage, len, addr);
2117db7b5426Sblueswir1 #endif
21185c9eb028SPeter Maydell     res = address_space_read(subpage->as, addr + subpage->base,
21195c9eb028SPeter Maydell                              attrs, buf, len);
21205c9eb028SPeter Maydell     if (res) {
21215c9eb028SPeter Maydell         return res;
2122f25a49e0SPeter Maydell     }
2123acc9d80bSJan Kiszka     switch (len) {
2124acc9d80bSJan Kiszka     case 1:
2125f25a49e0SPeter Maydell         *data = ldub_p(buf);
2126f25a49e0SPeter Maydell         return MEMTX_OK;
2127acc9d80bSJan Kiszka     case 2:
2128f25a49e0SPeter Maydell         *data = lduw_p(buf);
2129f25a49e0SPeter Maydell         return MEMTX_OK;
2130acc9d80bSJan Kiszka     case 4:
2131f25a49e0SPeter Maydell         *data = ldl_p(buf);
2132f25a49e0SPeter Maydell         return MEMTX_OK;
2133ff6cff75SPaolo Bonzini     case 8:
2134f25a49e0SPeter Maydell         *data = ldq_p(buf);
2135f25a49e0SPeter Maydell         return MEMTX_OK;
2136acc9d80bSJan Kiszka     default:
2137acc9d80bSJan Kiszka         abort();
2138acc9d80bSJan Kiszka     }
2139db7b5426Sblueswir1 }
2140db7b5426Sblueswir1 
2141f25a49e0SPeter Maydell static MemTxResult subpage_write(void *opaque, hwaddr addr,
2142f25a49e0SPeter Maydell                                  uint64_t value, unsigned len, MemTxAttrs attrs)
2143db7b5426Sblueswir1 {
2144acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2145ff6cff75SPaolo Bonzini     uint8_t buf[8];
2146acc9d80bSJan Kiszka 
2147db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2148016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2149acc9d80bSJan Kiszka            " value %"PRIx64"\n",
2150acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
2151db7b5426Sblueswir1 #endif
2152acc9d80bSJan Kiszka     switch (len) {
2153acc9d80bSJan Kiszka     case 1:
2154acc9d80bSJan Kiszka         stb_p(buf, value);
2155acc9d80bSJan Kiszka         break;
2156acc9d80bSJan Kiszka     case 2:
2157acc9d80bSJan Kiszka         stw_p(buf, value);
2158acc9d80bSJan Kiszka         break;
2159acc9d80bSJan Kiszka     case 4:
2160acc9d80bSJan Kiszka         stl_p(buf, value);
2161acc9d80bSJan Kiszka         break;
2162ff6cff75SPaolo Bonzini     case 8:
2163ff6cff75SPaolo Bonzini         stq_p(buf, value);
2164ff6cff75SPaolo Bonzini         break;
2165acc9d80bSJan Kiszka     default:
2166acc9d80bSJan Kiszka         abort();
2167acc9d80bSJan Kiszka     }
21685c9eb028SPeter Maydell     return address_space_write(subpage->as, addr + subpage->base,
21695c9eb028SPeter Maydell                                attrs, buf, len);
2170db7b5426Sblueswir1 }
2171db7b5426Sblueswir1 
2172c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
2173016e9d62SAmos Kong                             unsigned len, bool is_write)
2174c353e4ccSPaolo Bonzini {
2175acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2176c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
2177016e9d62SAmos Kong     printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2178acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
2179c353e4ccSPaolo Bonzini #endif
2180c353e4ccSPaolo Bonzini 
2181acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
2182016e9d62SAmos Kong                                       len, is_write);
2183c353e4ccSPaolo Bonzini }
2184c353e4ccSPaolo Bonzini 
218570c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
2186f25a49e0SPeter Maydell     .read_with_attrs = subpage_read,
2187f25a49e0SPeter Maydell     .write_with_attrs = subpage_write,
2188ff6cff75SPaolo Bonzini     .impl.min_access_size = 1,
2189ff6cff75SPaolo Bonzini     .impl.max_access_size = 8,
2190ff6cff75SPaolo Bonzini     .valid.min_access_size = 1,
2191ff6cff75SPaolo Bonzini     .valid.max_access_size = 8,
2192c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
219370c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
2194db7b5426Sblueswir1 };
2195db7b5426Sblueswir1 
2196c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
21975312bd8bSAvi Kivity                              uint16_t section)
2198db7b5426Sblueswir1 {
2199db7b5426Sblueswir1     int idx, eidx;
2200db7b5426Sblueswir1 
2201db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2202db7b5426Sblueswir1         return -1;
2203db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2204db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2205db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2206016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2207016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
2208db7b5426Sblueswir1 #endif
2209db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
22105312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
2211db7b5426Sblueswir1     }
2212db7b5426Sblueswir1 
2213db7b5426Sblueswir1     return 0;
2214db7b5426Sblueswir1 }
2215db7b5426Sblueswir1 
2216acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2217db7b5426Sblueswir1 {
2218c227f099SAnthony Liguori     subpage_t *mmio;
2219db7b5426Sblueswir1 
22207267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
22211eec614bSaliguori 
2222acc9d80bSJan Kiszka     mmio->as = as;
2223db7b5426Sblueswir1     mmio->base = base;
22242c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2225b4fefef9SPeter Crosthwaite                           NULL, TARGET_PAGE_SIZE);
2226b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
2227db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2228016e9d62SAmos Kong     printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2229016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
2230db7b5426Sblueswir1 #endif
2231b41aac4fSLiu Ping Fan     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2232db7b5426Sblueswir1 
2233db7b5426Sblueswir1     return mmio;
2234db7b5426Sblueswir1 }
2235db7b5426Sblueswir1 
2236a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2237a656e22fSPeter Crosthwaite                               MemoryRegion *mr)
22385312bd8bSAvi Kivity {
2239a656e22fSPeter Crosthwaite     assert(as);
22405312bd8bSAvi Kivity     MemoryRegionSection section = {
2241a656e22fSPeter Crosthwaite         .address_space = as,
22425312bd8bSAvi Kivity         .mr = mr,
22435312bd8bSAvi Kivity         .offset_within_address_space = 0,
22445312bd8bSAvi Kivity         .offset_within_region = 0,
2245052e87b0SPaolo Bonzini         .size = int128_2_64(),
22465312bd8bSAvi Kivity     };
22475312bd8bSAvi Kivity 
224853cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
22495312bd8bSAvi Kivity }
22505312bd8bSAvi Kivity 
2251a54c87b6SPeter Maydell MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2252aa102231SAvi Kivity {
2253a54c87b6SPeter Maydell     int asidx = cpu_asidx_from_attrs(cpu, attrs);
2254a54c87b6SPeter Maydell     CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
225532857f4dSPeter Maydell     AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
225679e2b9aeSPaolo Bonzini     MemoryRegionSection *sections = d->map.sections;
22579d82b5a7SPaolo Bonzini 
22589d82b5a7SPaolo Bonzini     return sections[index & ~TARGET_PAGE_MASK].mr;
2259aa102231SAvi Kivity }
2260aa102231SAvi Kivity 
2261e9179ce1SAvi Kivity static void io_mem_init(void)
2262e9179ce1SAvi Kivity {
22631f6245e5SPaolo Bonzini     memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
22642c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
22651f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
22662c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
22671f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
22682c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
22691f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
2270e9179ce1SAvi Kivity }
2271e9179ce1SAvi Kivity 
2272ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
2273ac1970fbSAvi Kivity {
227489ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
227553cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
227653cb28cbSMarcel Apfelbaum     uint16_t n;
227753cb28cbSMarcel Apfelbaum 
2278a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_unassigned);
227953cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
2280a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_notdirty);
228153cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_NOTDIRTY);
2282a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_rom);
228353cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_ROM);
2284a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_watch);
228553cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_WATCH);
228600752703SPaolo Bonzini 
22879736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
228800752703SPaolo Bonzini     d->as = as;
228900752703SPaolo Bonzini     as->next_dispatch = d;
229000752703SPaolo Bonzini }
229100752703SPaolo Bonzini 
229279e2b9aeSPaolo Bonzini static void address_space_dispatch_free(AddressSpaceDispatch *d)
229379e2b9aeSPaolo Bonzini {
229479e2b9aeSPaolo Bonzini     phys_sections_free(&d->map);
229579e2b9aeSPaolo Bonzini     g_free(d);
229679e2b9aeSPaolo Bonzini }
229779e2b9aeSPaolo Bonzini 
229800752703SPaolo Bonzini static void mem_commit(MemoryListener *listener)
229900752703SPaolo Bonzini {
230000752703SPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
23010475d94fSPaolo Bonzini     AddressSpaceDispatch *cur = as->dispatch;
23020475d94fSPaolo Bonzini     AddressSpaceDispatch *next = as->next_dispatch;
2303ac1970fbSAvi Kivity 
230453cb28cbSMarcel Apfelbaum     phys_page_compact_all(next, next->map.nodes_nb);
2305b35ba30fSMichael S. Tsirkin 
230679e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, next);
230753cb28cbSMarcel Apfelbaum     if (cur) {
230879e2b9aeSPaolo Bonzini         call_rcu(cur, address_space_dispatch_free, rcu);
2309ac1970fbSAvi Kivity     }
23109affd6fcSPaolo Bonzini }
23119affd6fcSPaolo Bonzini 
23121d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
231350c1e149SAvi Kivity {
231432857f4dSPeter Maydell     CPUAddressSpace *cpuas;
231532857f4dSPeter Maydell     AddressSpaceDispatch *d;
2316117712c3SAvi Kivity 
2317117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
2318117712c3SAvi Kivity        reset the modified entries */
231932857f4dSPeter Maydell     cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
232032857f4dSPeter Maydell     cpu_reloading_memory_map();
232132857f4dSPeter Maydell     /* The CPU and TLB are protected by the iothread lock.
232232857f4dSPeter Maydell      * We reload the dispatch pointer now because cpu_reloading_memory_map()
232332857f4dSPeter Maydell      * may have split the RCU critical section.
232432857f4dSPeter Maydell      */
232532857f4dSPeter Maydell     d = atomic_rcu_read(&cpuas->as->dispatch);
232632857f4dSPeter Maydell     cpuas->memory_dispatch = d;
232732857f4dSPeter Maydell     tlb_flush(cpuas->cpu, 1);
232850c1e149SAvi Kivity }
232950c1e149SAvi Kivity 
2330ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
2331ac1970fbSAvi Kivity {
233200752703SPaolo Bonzini     as->dispatch = NULL;
233389ae337aSPaolo Bonzini     as->dispatch_listener = (MemoryListener) {
2334ac1970fbSAvi Kivity         .begin = mem_begin,
233500752703SPaolo Bonzini         .commit = mem_commit,
2336ac1970fbSAvi Kivity         .region_add = mem_add,
2337ac1970fbSAvi Kivity         .region_nop = mem_add,
2338ac1970fbSAvi Kivity         .priority = 0,
2339ac1970fbSAvi Kivity     };
234089ae337aSPaolo Bonzini     memory_listener_register(&as->dispatch_listener, as);
2341ac1970fbSAvi Kivity }
2342ac1970fbSAvi Kivity 
23436e48e8f9SPaolo Bonzini void address_space_unregister(AddressSpace *as)
23446e48e8f9SPaolo Bonzini {
23456e48e8f9SPaolo Bonzini     memory_listener_unregister(&as->dispatch_listener);
23466e48e8f9SPaolo Bonzini }
23476e48e8f9SPaolo Bonzini 
234883f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
234983f3c251SAvi Kivity {
235083f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
235183f3c251SAvi Kivity 
235279e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, NULL);
235379e2b9aeSPaolo Bonzini     if (d) {
235479e2b9aeSPaolo Bonzini         call_rcu(d, address_space_dispatch_free, rcu);
235579e2b9aeSPaolo Bonzini     }
235683f3c251SAvi Kivity }
235783f3c251SAvi Kivity 
235862152b8aSAvi Kivity static void memory_map_init(void)
235962152b8aSAvi Kivity {
23607267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
236103f49957SPaolo Bonzini 
236257271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
23637dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
2364309cb471SAvi Kivity 
23657267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
23663bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
23673bb28b72SJan Kiszka                           65536);
23687dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
23692641689aSliguang }
237062152b8aSAvi Kivity 
237162152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
237262152b8aSAvi Kivity {
237362152b8aSAvi Kivity     return system_memory;
237462152b8aSAvi Kivity }
237562152b8aSAvi Kivity 
2376309cb471SAvi Kivity MemoryRegion *get_system_io(void)
2377309cb471SAvi Kivity {
2378309cb471SAvi Kivity     return system_io;
2379309cb471SAvi Kivity }
2380309cb471SAvi Kivity 
2381e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2382e2eef170Spbrook 
238313eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
238413eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
2385f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2386a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
238713eb76e0Sbellard {
238813eb76e0Sbellard     int l, flags;
238913eb76e0Sbellard     target_ulong page;
239053a5960aSpbrook     void * p;
239113eb76e0Sbellard 
239213eb76e0Sbellard     while (len > 0) {
239313eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
239413eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
239513eb76e0Sbellard         if (l > len)
239613eb76e0Sbellard             l = len;
239713eb76e0Sbellard         flags = page_get_flags(page);
239813eb76e0Sbellard         if (!(flags & PAGE_VALID))
2399a68fe89cSPaul Brook             return -1;
240013eb76e0Sbellard         if (is_write) {
240113eb76e0Sbellard             if (!(flags & PAGE_WRITE))
2402a68fe89cSPaul Brook                 return -1;
2403579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
240472fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2405a68fe89cSPaul Brook                 return -1;
240672fb7daaSaurel32             memcpy(p, buf, l);
240772fb7daaSaurel32             unlock_user(p, addr, l);
240813eb76e0Sbellard         } else {
240913eb76e0Sbellard             if (!(flags & PAGE_READ))
2410a68fe89cSPaul Brook                 return -1;
2411579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
241272fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2413a68fe89cSPaul Brook                 return -1;
241472fb7daaSaurel32             memcpy(buf, p, l);
24155b257578Saurel32             unlock_user(p, addr, 0);
241613eb76e0Sbellard         }
241713eb76e0Sbellard         len -= l;
241813eb76e0Sbellard         buf += l;
241913eb76e0Sbellard         addr += l;
242013eb76e0Sbellard     }
2421a68fe89cSPaul Brook     return 0;
242213eb76e0Sbellard }
24238df1cd07Sbellard 
242413eb76e0Sbellard #else
242551d7a9ebSAnthony PERARD 
2426845b6214SPaolo Bonzini static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2427a8170e5eSAvi Kivity                                      hwaddr length)
242851d7a9ebSAnthony PERARD {
2429845b6214SPaolo Bonzini     uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
24300878d0e1SPaolo Bonzini     addr += memory_region_get_ram_addr(mr);
24310878d0e1SPaolo Bonzini 
2432e87f7778SPaolo Bonzini     /* No early return if dirty_log_mask is or becomes 0, because
2433e87f7778SPaolo Bonzini      * cpu_physical_memory_set_dirty_range will still call
2434e87f7778SPaolo Bonzini      * xen_modified_memory.
2435e87f7778SPaolo Bonzini      */
2436e87f7778SPaolo Bonzini     if (dirty_log_mask) {
2437e87f7778SPaolo Bonzini         dirty_log_mask =
2438e87f7778SPaolo Bonzini             cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2439e87f7778SPaolo Bonzini     }
2440845b6214SPaolo Bonzini     if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
244135865339SPaolo Bonzini         tb_invalidate_phys_range(addr, addr + length);
2442845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2443845b6214SPaolo Bonzini     }
244458d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
244549dfcec4SPaolo Bonzini }
244651d7a9ebSAnthony PERARD 
244723326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
244882f2563fSPaolo Bonzini {
2449e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
245023326164SRichard Henderson 
245123326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
245223326164SRichard Henderson        otherwise specified.  */
245323326164SRichard Henderson     if (access_size_max == 0) {
245423326164SRichard Henderson         access_size_max = 4;
245582f2563fSPaolo Bonzini     }
245623326164SRichard Henderson 
245723326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
245823326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
245923326164SRichard Henderson         unsigned align_size_max = addr & -addr;
246023326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
246123326164SRichard Henderson             access_size_max = align_size_max;
246223326164SRichard Henderson         }
246323326164SRichard Henderson     }
246423326164SRichard Henderson 
246523326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
246623326164SRichard Henderson     if (l > access_size_max) {
246723326164SRichard Henderson         l = access_size_max;
246823326164SRichard Henderson     }
24696554f5c0SPeter Maydell     l = pow2floor(l);
247023326164SRichard Henderson 
247123326164SRichard Henderson     return l;
247282f2563fSPaolo Bonzini }
247382f2563fSPaolo Bonzini 
24744840f10eSJan Kiszka static bool prepare_mmio_access(MemoryRegion *mr)
2475125b3806SPaolo Bonzini {
24764840f10eSJan Kiszka     bool unlocked = !qemu_mutex_iothread_locked();
24774840f10eSJan Kiszka     bool release_lock = false;
24784840f10eSJan Kiszka 
24794840f10eSJan Kiszka     if (unlocked && mr->global_locking) {
24804840f10eSJan Kiszka         qemu_mutex_lock_iothread();
24814840f10eSJan Kiszka         unlocked = false;
24824840f10eSJan Kiszka         release_lock = true;
2483125b3806SPaolo Bonzini     }
24844840f10eSJan Kiszka     if (mr->flush_coalesced_mmio) {
24854840f10eSJan Kiszka         if (unlocked) {
24864840f10eSJan Kiszka             qemu_mutex_lock_iothread();
24874840f10eSJan Kiszka         }
24884840f10eSJan Kiszka         qemu_flush_coalesced_mmio_buffer();
24894840f10eSJan Kiszka         if (unlocked) {
24904840f10eSJan Kiszka             qemu_mutex_unlock_iothread();
24914840f10eSJan Kiszka         }
24924840f10eSJan Kiszka     }
24934840f10eSJan Kiszka 
24944840f10eSJan Kiszka     return release_lock;
2495125b3806SPaolo Bonzini }
2496125b3806SPaolo Bonzini 
2497a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
2498a203ac70SPaolo Bonzini static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2499a203ac70SPaolo Bonzini                                                 MemTxAttrs attrs,
2500a203ac70SPaolo Bonzini                                                 const uint8_t *buf,
2501a203ac70SPaolo Bonzini                                                 int len, hwaddr addr1,
2502a203ac70SPaolo Bonzini                                                 hwaddr l, MemoryRegion *mr)
250313eb76e0Sbellard {
250413eb76e0Sbellard     uint8_t *ptr;
2505791af8c8SPaolo Bonzini     uint64_t val;
25063b643495SPeter Maydell     MemTxResult result = MEMTX_OK;
25074840f10eSJan Kiszka     bool release_lock = false;
250813eb76e0Sbellard 
2509a203ac70SPaolo Bonzini     for (;;) {
2510eb7eeb88SPaolo Bonzini         if (!memory_access_is_direct(mr, true)) {
25114840f10eSJan Kiszka             release_lock |= prepare_mmio_access(mr);
25125c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr1);
25134917cf44SAndreas Färber             /* XXX: could force current_cpu to NULL to avoid
25146a00d601Sbellard                potential bugs */
251523326164SRichard Henderson             switch (l) {
251623326164SRichard Henderson             case 8:
251723326164SRichard Henderson                 /* 64 bit write access */
251823326164SRichard Henderson                 val = ldq_p(buf);
25193b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 8,
25203b643495SPeter Maydell                                                        attrs);
252123326164SRichard Henderson                 break;
252223326164SRichard Henderson             case 4:
25231c213d19Sbellard                 /* 32 bit write access */
2524c27004ecSbellard                 val = ldl_p(buf);
25253b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 4,
25263b643495SPeter Maydell                                                        attrs);
252723326164SRichard Henderson                 break;
252823326164SRichard Henderson             case 2:
25291c213d19Sbellard                 /* 16 bit write access */
2530c27004ecSbellard                 val = lduw_p(buf);
25313b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 2,
25323b643495SPeter Maydell                                                        attrs);
253323326164SRichard Henderson                 break;
253423326164SRichard Henderson             case 1:
25351c213d19Sbellard                 /* 8 bit write access */
2536c27004ecSbellard                 val = ldub_p(buf);
25373b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 1,
25383b643495SPeter Maydell                                                        attrs);
253923326164SRichard Henderson                 break;
254023326164SRichard Henderson             default:
254123326164SRichard Henderson                 abort();
254213eb76e0Sbellard             }
25432bbfa05dSPaolo Bonzini         } else {
254413eb76e0Sbellard             /* RAM case */
25450878d0e1SPaolo Bonzini             ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
254613eb76e0Sbellard             memcpy(ptr, buf, l);
2547845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, l);
25483a7d929eSbellard         }
2549eb7eeb88SPaolo Bonzini 
2550eb7eeb88SPaolo Bonzini         if (release_lock) {
2551eb7eeb88SPaolo Bonzini             qemu_mutex_unlock_iothread();
2552eb7eeb88SPaolo Bonzini             release_lock = false;
2553eb7eeb88SPaolo Bonzini         }
2554eb7eeb88SPaolo Bonzini 
2555eb7eeb88SPaolo Bonzini         len -= l;
2556eb7eeb88SPaolo Bonzini         buf += l;
2557eb7eeb88SPaolo Bonzini         addr += l;
2558a203ac70SPaolo Bonzini 
2559a203ac70SPaolo Bonzini         if (!len) {
2560a203ac70SPaolo Bonzini             break;
2561eb7eeb88SPaolo Bonzini         }
2562a203ac70SPaolo Bonzini 
2563a203ac70SPaolo Bonzini         l = len;
2564a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, true);
2565a203ac70SPaolo Bonzini     }
2566eb7eeb88SPaolo Bonzini 
2567eb7eeb88SPaolo Bonzini     return result;
2568eb7eeb88SPaolo Bonzini }
2569eb7eeb88SPaolo Bonzini 
2570a203ac70SPaolo Bonzini MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2571a203ac70SPaolo Bonzini                                 const uint8_t *buf, int len)
2572eb7eeb88SPaolo Bonzini {
2573eb7eeb88SPaolo Bonzini     hwaddr l;
2574eb7eeb88SPaolo Bonzini     hwaddr addr1;
2575eb7eeb88SPaolo Bonzini     MemoryRegion *mr;
2576eb7eeb88SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2577a203ac70SPaolo Bonzini 
2578a203ac70SPaolo Bonzini     if (len > 0) {
2579a203ac70SPaolo Bonzini         rcu_read_lock();
2580a203ac70SPaolo Bonzini         l = len;
2581a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, true);
2582a203ac70SPaolo Bonzini         result = address_space_write_continue(as, addr, attrs, buf, len,
2583a203ac70SPaolo Bonzini                                               addr1, l, mr);
2584a203ac70SPaolo Bonzini         rcu_read_unlock();
2585a203ac70SPaolo Bonzini     }
2586a203ac70SPaolo Bonzini 
2587a203ac70SPaolo Bonzini     return result;
2588a203ac70SPaolo Bonzini }
2589a203ac70SPaolo Bonzini 
2590a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
2591a203ac70SPaolo Bonzini MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2592a203ac70SPaolo Bonzini                                         MemTxAttrs attrs, uint8_t *buf,
2593a203ac70SPaolo Bonzini                                         int len, hwaddr addr1, hwaddr l,
2594a203ac70SPaolo Bonzini                                         MemoryRegion *mr)
2595a203ac70SPaolo Bonzini {
2596a203ac70SPaolo Bonzini     uint8_t *ptr;
2597a203ac70SPaolo Bonzini     uint64_t val;
2598a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2599eb7eeb88SPaolo Bonzini     bool release_lock = false;
2600eb7eeb88SPaolo Bonzini 
2601a203ac70SPaolo Bonzini     for (;;) {
2602eb7eeb88SPaolo Bonzini         if (!memory_access_is_direct(mr, false)) {
260313eb76e0Sbellard             /* I/O case */
26044840f10eSJan Kiszka             release_lock |= prepare_mmio_access(mr);
26055c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr1);
260623326164SRichard Henderson             switch (l) {
260723326164SRichard Henderson             case 8:
260823326164SRichard Henderson                 /* 64 bit read access */
26093b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
26103b643495SPeter Maydell                                                       attrs);
261123326164SRichard Henderson                 stq_p(buf, val);
261223326164SRichard Henderson                 break;
261323326164SRichard Henderson             case 4:
261413eb76e0Sbellard                 /* 32 bit read access */
26153b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
26163b643495SPeter Maydell                                                       attrs);
2617c27004ecSbellard                 stl_p(buf, val);
261823326164SRichard Henderson                 break;
261923326164SRichard Henderson             case 2:
262013eb76e0Sbellard                 /* 16 bit read access */
26213b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
26223b643495SPeter Maydell                                                       attrs);
2623c27004ecSbellard                 stw_p(buf, val);
262423326164SRichard Henderson                 break;
262523326164SRichard Henderson             case 1:
26261c213d19Sbellard                 /* 8 bit read access */
26273b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
26283b643495SPeter Maydell                                                       attrs);
2629c27004ecSbellard                 stb_p(buf, val);
263023326164SRichard Henderson                 break;
263123326164SRichard Henderson             default:
263223326164SRichard Henderson                 abort();
263313eb76e0Sbellard             }
263413eb76e0Sbellard         } else {
263513eb76e0Sbellard             /* RAM case */
26360878d0e1SPaolo Bonzini             ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2637f3705d53SAvi Kivity             memcpy(buf, ptr, l);
263813eb76e0Sbellard         }
26394840f10eSJan Kiszka 
26404840f10eSJan Kiszka         if (release_lock) {
26414840f10eSJan Kiszka             qemu_mutex_unlock_iothread();
26424840f10eSJan Kiszka             release_lock = false;
26434840f10eSJan Kiszka         }
26444840f10eSJan Kiszka 
264513eb76e0Sbellard         len -= l;
264613eb76e0Sbellard         buf += l;
264713eb76e0Sbellard         addr += l;
2648a203ac70SPaolo Bonzini 
2649a203ac70SPaolo Bonzini         if (!len) {
2650a203ac70SPaolo Bonzini             break;
265113eb76e0Sbellard         }
2652a203ac70SPaolo Bonzini 
2653a203ac70SPaolo Bonzini         l = len;
2654a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, false);
2655a203ac70SPaolo Bonzini     }
2656a203ac70SPaolo Bonzini 
2657a203ac70SPaolo Bonzini     return result;
2658a203ac70SPaolo Bonzini }
2659a203ac70SPaolo Bonzini 
26603cc8f884SPaolo Bonzini MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
26613cc8f884SPaolo Bonzini                                     MemTxAttrs attrs, uint8_t *buf, int len)
2662a203ac70SPaolo Bonzini {
2663a203ac70SPaolo Bonzini     hwaddr l;
2664a203ac70SPaolo Bonzini     hwaddr addr1;
2665a203ac70SPaolo Bonzini     MemoryRegion *mr;
2666a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2667a203ac70SPaolo Bonzini 
2668a203ac70SPaolo Bonzini     if (len > 0) {
2669a203ac70SPaolo Bonzini         rcu_read_lock();
2670a203ac70SPaolo Bonzini         l = len;
2671a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, false);
2672a203ac70SPaolo Bonzini         result = address_space_read_continue(as, addr, attrs, buf, len,
2673a203ac70SPaolo Bonzini                                              addr1, l, mr);
267441063e1eSPaolo Bonzini         rcu_read_unlock();
2675a203ac70SPaolo Bonzini     }
2676fd8aaa76SPaolo Bonzini 
26773b643495SPeter Maydell     return result;
267813eb76e0Sbellard }
26798df1cd07Sbellard 
2680eb7eeb88SPaolo Bonzini MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2681eb7eeb88SPaolo Bonzini                              uint8_t *buf, int len, bool is_write)
2682ac1970fbSAvi Kivity {
2683eb7eeb88SPaolo Bonzini     if (is_write) {
2684eb7eeb88SPaolo Bonzini         return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2685eb7eeb88SPaolo Bonzini     } else {
2686eb7eeb88SPaolo Bonzini         return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2687ac1970fbSAvi Kivity     }
2688ac1970fbSAvi Kivity }
2689ac1970fbSAvi Kivity 
2690a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2691ac1970fbSAvi Kivity                             int len, int is_write)
2692ac1970fbSAvi Kivity {
26935c9eb028SPeter Maydell     address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
26945c9eb028SPeter Maydell                      buf, len, is_write);
2695ac1970fbSAvi Kivity }
2696ac1970fbSAvi Kivity 
2697582b55a9SAlexander Graf enum write_rom_type {
2698582b55a9SAlexander Graf     WRITE_DATA,
2699582b55a9SAlexander Graf     FLUSH_CACHE,
2700582b55a9SAlexander Graf };
2701582b55a9SAlexander Graf 
27022a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2703582b55a9SAlexander Graf     hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2704d0ecd2aaSbellard {
2705149f54b5SPaolo Bonzini     hwaddr l;
2706d0ecd2aaSbellard     uint8_t *ptr;
2707149f54b5SPaolo Bonzini     hwaddr addr1;
27085c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2709d0ecd2aaSbellard 
271041063e1eSPaolo Bonzini     rcu_read_lock();
2711d0ecd2aaSbellard     while (len > 0) {
2712d0ecd2aaSbellard         l = len;
27132a221651SEdgar E. Iglesias         mr = address_space_translate(as, addr, &addr1, &l, true);
2714d0ecd2aaSbellard 
27155c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
27165c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2717b242e0e0SPaolo Bonzini             l = memory_access_size(mr, l, addr1);
2718d0ecd2aaSbellard         } else {
2719d0ecd2aaSbellard             /* ROM/RAM case */
27200878d0e1SPaolo Bonzini             ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2721582b55a9SAlexander Graf             switch (type) {
2722582b55a9SAlexander Graf             case WRITE_DATA:
2723d0ecd2aaSbellard                 memcpy(ptr, buf, l);
2724845b6214SPaolo Bonzini                 invalidate_and_set_dirty(mr, addr1, l);
2725582b55a9SAlexander Graf                 break;
2726582b55a9SAlexander Graf             case FLUSH_CACHE:
2727582b55a9SAlexander Graf                 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2728582b55a9SAlexander Graf                 break;
2729582b55a9SAlexander Graf             }
2730d0ecd2aaSbellard         }
2731d0ecd2aaSbellard         len -= l;
2732d0ecd2aaSbellard         buf += l;
2733d0ecd2aaSbellard         addr += l;
2734d0ecd2aaSbellard     }
273541063e1eSPaolo Bonzini     rcu_read_unlock();
2736d0ecd2aaSbellard }
2737d0ecd2aaSbellard 
2738582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
27392a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2740582b55a9SAlexander Graf                                    const uint8_t *buf, int len)
2741582b55a9SAlexander Graf {
27422a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2743582b55a9SAlexander Graf }
2744582b55a9SAlexander Graf 
2745582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len)
2746582b55a9SAlexander Graf {
2747582b55a9SAlexander Graf     /*
2748582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
2749582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
2750582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
2751582b55a9SAlexander Graf      * the host's instruction cache at least.
2752582b55a9SAlexander Graf      */
2753582b55a9SAlexander Graf     if (tcg_enabled()) {
2754582b55a9SAlexander Graf         return;
2755582b55a9SAlexander Graf     }
2756582b55a9SAlexander Graf 
27572a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(&address_space_memory,
27582a221651SEdgar E. Iglesias                                            start, NULL, len, FLUSH_CACHE);
2759582b55a9SAlexander Graf }
2760582b55a9SAlexander Graf 
27616d16c2f8Saliguori typedef struct {
2762d3e71559SPaolo Bonzini     MemoryRegion *mr;
27636d16c2f8Saliguori     void *buffer;
2764a8170e5eSAvi Kivity     hwaddr addr;
2765a8170e5eSAvi Kivity     hwaddr len;
2766c2cba0ffSFam Zheng     bool in_use;
27676d16c2f8Saliguori } BounceBuffer;
27686d16c2f8Saliguori 
27696d16c2f8Saliguori static BounceBuffer bounce;
27706d16c2f8Saliguori 
2771ba223c29Saliguori typedef struct MapClient {
2772e95205e1SFam Zheng     QEMUBH *bh;
277372cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2774ba223c29Saliguori } MapClient;
2775ba223c29Saliguori 
277638e047b5SFam Zheng QemuMutex map_client_list_lock;
277772cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
277872cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2779ba223c29Saliguori 
2780e95205e1SFam Zheng static void cpu_unregister_map_client_do(MapClient *client)
2781ba223c29Saliguori {
278272cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
27837267c094SAnthony Liguori     g_free(client);
2784ba223c29Saliguori }
2785ba223c29Saliguori 
278633b6c2edSFam Zheng static void cpu_notify_map_clients_locked(void)
2787ba223c29Saliguori {
2788ba223c29Saliguori     MapClient *client;
2789ba223c29Saliguori 
279072cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
279172cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2792e95205e1SFam Zheng         qemu_bh_schedule(client->bh);
2793e95205e1SFam Zheng         cpu_unregister_map_client_do(client);
2794ba223c29Saliguori     }
2795ba223c29Saliguori }
2796ba223c29Saliguori 
2797e95205e1SFam Zheng void cpu_register_map_client(QEMUBH *bh)
2798d0ecd2aaSbellard {
2799d0ecd2aaSbellard     MapClient *client = g_malloc(sizeof(*client));
2800d0ecd2aaSbellard 
280138e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2802e95205e1SFam Zheng     client->bh = bh;
2803d0ecd2aaSbellard     QLIST_INSERT_HEAD(&map_client_list, client, link);
280433b6c2edSFam Zheng     if (!atomic_read(&bounce.in_use)) {
280533b6c2edSFam Zheng         cpu_notify_map_clients_locked();
280633b6c2edSFam Zheng     }
280738e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2808d0ecd2aaSbellard }
2809d0ecd2aaSbellard 
281038e047b5SFam Zheng void cpu_exec_init_all(void)
281138e047b5SFam Zheng {
281238e047b5SFam Zheng     qemu_mutex_init(&ram_list.mutex);
281338e047b5SFam Zheng     io_mem_init();
2814680a4783SPaolo Bonzini     memory_map_init();
281538e047b5SFam Zheng     qemu_mutex_init(&map_client_list_lock);
281638e047b5SFam Zheng }
281738e047b5SFam Zheng 
2818e95205e1SFam Zheng void cpu_unregister_map_client(QEMUBH *bh)
2819d0ecd2aaSbellard {
2820e95205e1SFam Zheng     MapClient *client;
2821d0ecd2aaSbellard 
2822e95205e1SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2823e95205e1SFam Zheng     QLIST_FOREACH(client, &map_client_list, link) {
2824e95205e1SFam Zheng         if (client->bh == bh) {
2825e95205e1SFam Zheng             cpu_unregister_map_client_do(client);
2826e95205e1SFam Zheng             break;
2827e95205e1SFam Zheng         }
2828e95205e1SFam Zheng     }
2829e95205e1SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2830d0ecd2aaSbellard }
2831d0ecd2aaSbellard 
2832d0ecd2aaSbellard static void cpu_notify_map_clients(void)
2833d0ecd2aaSbellard {
283438e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
283533b6c2edSFam Zheng     cpu_notify_map_clients_locked();
283638e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
28376d16c2f8Saliguori }
28386d16c2f8Saliguori 
283951644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
284051644ab7SPaolo Bonzini {
28415c8a00ceSPaolo Bonzini     MemoryRegion *mr;
284251644ab7SPaolo Bonzini     hwaddr l, xlat;
284351644ab7SPaolo Bonzini 
284441063e1eSPaolo Bonzini     rcu_read_lock();
284551644ab7SPaolo Bonzini     while (len > 0) {
284651644ab7SPaolo Bonzini         l = len;
28475c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
28485c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
28495c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
28505c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
285151644ab7SPaolo Bonzini                 return false;
285251644ab7SPaolo Bonzini             }
285351644ab7SPaolo Bonzini         }
285451644ab7SPaolo Bonzini 
285551644ab7SPaolo Bonzini         len -= l;
285651644ab7SPaolo Bonzini         addr += l;
285751644ab7SPaolo Bonzini     }
285841063e1eSPaolo Bonzini     rcu_read_unlock();
285951644ab7SPaolo Bonzini     return true;
286051644ab7SPaolo Bonzini }
286151644ab7SPaolo Bonzini 
28626d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
28636d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
28646d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
28656d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2866ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2867ba223c29Saliguori  * likely to succeed.
28686d16c2f8Saliguori  */
2869ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2870a8170e5eSAvi Kivity                         hwaddr addr,
2871a8170e5eSAvi Kivity                         hwaddr *plen,
2872ac1970fbSAvi Kivity                         bool is_write)
28736d16c2f8Saliguori {
2874a8170e5eSAvi Kivity     hwaddr len = *plen;
2875e3127ae0SPaolo Bonzini     hwaddr done = 0;
2876e3127ae0SPaolo Bonzini     hwaddr l, xlat, base;
2877e3127ae0SPaolo Bonzini     MemoryRegion *mr, *this_mr;
2878e81bcda5SPaolo Bonzini     void *ptr;
28796d16c2f8Saliguori 
2880e3127ae0SPaolo Bonzini     if (len == 0) {
2881e3127ae0SPaolo Bonzini         return NULL;
2882e3127ae0SPaolo Bonzini     }
2883e3127ae0SPaolo Bonzini 
28846d16c2f8Saliguori     l = len;
288541063e1eSPaolo Bonzini     rcu_read_lock();
28865c8a00ceSPaolo Bonzini     mr = address_space_translate(as, addr, &xlat, &l, is_write);
288741063e1eSPaolo Bonzini 
28885c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
2889c2cba0ffSFam Zheng         if (atomic_xchg(&bounce.in_use, true)) {
289041063e1eSPaolo Bonzini             rcu_read_unlock();
2891e3127ae0SPaolo Bonzini             return NULL;
28926d16c2f8Saliguori         }
2893e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
2894e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
2895e85d9db5SKevin Wolf         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
28966d16c2f8Saliguori         bounce.addr = addr;
28976d16c2f8Saliguori         bounce.len = l;
2898d3e71559SPaolo Bonzini 
2899d3e71559SPaolo Bonzini         memory_region_ref(mr);
2900d3e71559SPaolo Bonzini         bounce.mr = mr;
29016d16c2f8Saliguori         if (!is_write) {
29025c9eb028SPeter Maydell             address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
29035c9eb028SPeter Maydell                                bounce.buffer, l);
29046d16c2f8Saliguori         }
290538bee5dcSStefano Stabellini 
290641063e1eSPaolo Bonzini         rcu_read_unlock();
290738bee5dcSStefano Stabellini         *plen = l;
290838bee5dcSStefano Stabellini         return bounce.buffer;
29096d16c2f8Saliguori     }
2910e3127ae0SPaolo Bonzini 
2911e3127ae0SPaolo Bonzini     base = xlat;
2912e3127ae0SPaolo Bonzini 
2913e3127ae0SPaolo Bonzini     for (;;) {
2914e3127ae0SPaolo Bonzini         len -= l;
2915e3127ae0SPaolo Bonzini         addr += l;
2916e3127ae0SPaolo Bonzini         done += l;
2917e3127ae0SPaolo Bonzini         if (len == 0) {
2918e3127ae0SPaolo Bonzini             break;
2919e3127ae0SPaolo Bonzini         }
2920e3127ae0SPaolo Bonzini 
2921e3127ae0SPaolo Bonzini         l = len;
2922e3127ae0SPaolo Bonzini         this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2923e3127ae0SPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
2924149f54b5SPaolo Bonzini             break;
2925149f54b5SPaolo Bonzini         }
29268ab934f9SStefano Stabellini     }
29276d16c2f8Saliguori 
2928d3e71559SPaolo Bonzini     memory_region_ref(mr);
2929e3127ae0SPaolo Bonzini     *plen = done;
29300878d0e1SPaolo Bonzini     ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
2931e81bcda5SPaolo Bonzini     rcu_read_unlock();
2932e81bcda5SPaolo Bonzini 
2933e81bcda5SPaolo Bonzini     return ptr;
29346d16c2f8Saliguori }
29356d16c2f8Saliguori 
2936ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
29376d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
29386d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
29396d16c2f8Saliguori  */
2940a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2941a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
29426d16c2f8Saliguori {
29436d16c2f8Saliguori     if (buffer != bounce.buffer) {
2944d3e71559SPaolo Bonzini         MemoryRegion *mr;
29457443b437SPaolo Bonzini         ram_addr_t addr1;
2946d3e71559SPaolo Bonzini 
294707bdaa41SPaolo Bonzini         mr = memory_region_from_host(buffer, &addr1);
29481b5ec234SPaolo Bonzini         assert(mr != NULL);
2949d3e71559SPaolo Bonzini         if (is_write) {
2950845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, access_len);
29516d16c2f8Saliguori         }
2952868bb33fSJan Kiszka         if (xen_enabled()) {
2953e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
2954050a0ddfSAnthony PERARD         }
2955d3e71559SPaolo Bonzini         memory_region_unref(mr);
29566d16c2f8Saliguori         return;
29576d16c2f8Saliguori     }
29586d16c2f8Saliguori     if (is_write) {
29595c9eb028SPeter Maydell         address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
29605c9eb028SPeter Maydell                             bounce.buffer, access_len);
29616d16c2f8Saliguori     }
2962f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
29636d16c2f8Saliguori     bounce.buffer = NULL;
2964d3e71559SPaolo Bonzini     memory_region_unref(bounce.mr);
2965c2cba0ffSFam Zheng     atomic_mb_set(&bounce.in_use, false);
2966ba223c29Saliguori     cpu_notify_map_clients();
29676d16c2f8Saliguori }
2968d0ecd2aaSbellard 
2969a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
2970a8170e5eSAvi Kivity                               hwaddr *plen,
2971ac1970fbSAvi Kivity                               int is_write)
2972ac1970fbSAvi Kivity {
2973ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
2974ac1970fbSAvi Kivity }
2975ac1970fbSAvi Kivity 
2976a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2977a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
2978ac1970fbSAvi Kivity {
2979ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2980ac1970fbSAvi Kivity }
2981ac1970fbSAvi Kivity 
29828df1cd07Sbellard /* warning: addr must be aligned */
298350013115SPeter Maydell static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
298450013115SPeter Maydell                                                   MemTxAttrs attrs,
298550013115SPeter Maydell                                                   MemTxResult *result,
29861e78bcc1SAlexander Graf                                                   enum device_endian endian)
29878df1cd07Sbellard {
29888df1cd07Sbellard     uint8_t *ptr;
2989791af8c8SPaolo Bonzini     uint64_t val;
29905c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2991149f54b5SPaolo Bonzini     hwaddr l = 4;
2992149f54b5SPaolo Bonzini     hwaddr addr1;
299350013115SPeter Maydell     MemTxResult r;
29944840f10eSJan Kiszka     bool release_lock = false;
29958df1cd07Sbellard 
299641063e1eSPaolo Bonzini     rcu_read_lock();
2997fdfba1a2SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, false);
29985c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
29994840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3000125b3806SPaolo Bonzini 
30018df1cd07Sbellard         /* I/O case */
300250013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
30031e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
30041e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
30051e78bcc1SAlexander Graf             val = bswap32(val);
30061e78bcc1SAlexander Graf         }
30071e78bcc1SAlexander Graf #else
30081e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
30091e78bcc1SAlexander Graf             val = bswap32(val);
30101e78bcc1SAlexander Graf         }
30111e78bcc1SAlexander Graf #endif
30128df1cd07Sbellard     } else {
30138df1cd07Sbellard         /* RAM case */
30140878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
30151e78bcc1SAlexander Graf         switch (endian) {
30161e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
30171e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
30181e78bcc1SAlexander Graf             break;
30191e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
30201e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
30211e78bcc1SAlexander Graf             break;
30221e78bcc1SAlexander Graf         default:
30238df1cd07Sbellard             val = ldl_p(ptr);
30241e78bcc1SAlexander Graf             break;
30251e78bcc1SAlexander Graf         }
302650013115SPeter Maydell         r = MEMTX_OK;
302750013115SPeter Maydell     }
302850013115SPeter Maydell     if (result) {
302950013115SPeter Maydell         *result = r;
30308df1cd07Sbellard     }
30314840f10eSJan Kiszka     if (release_lock) {
30324840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
30334840f10eSJan Kiszka     }
303441063e1eSPaolo Bonzini     rcu_read_unlock();
30358df1cd07Sbellard     return val;
30368df1cd07Sbellard }
30378df1cd07Sbellard 
303850013115SPeter Maydell uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
303950013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
304050013115SPeter Maydell {
304150013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
304250013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
304350013115SPeter Maydell }
304450013115SPeter Maydell 
304550013115SPeter Maydell uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
304650013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
304750013115SPeter Maydell {
304850013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
304950013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
305050013115SPeter Maydell }
305150013115SPeter Maydell 
305250013115SPeter Maydell uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
305350013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
305450013115SPeter Maydell {
305550013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
305650013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
305750013115SPeter Maydell }
305850013115SPeter Maydell 
3059fdfba1a2SEdgar E. Iglesias uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
30601e78bcc1SAlexander Graf {
306150013115SPeter Maydell     return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30621e78bcc1SAlexander Graf }
30631e78bcc1SAlexander Graf 
3064fdfba1a2SEdgar E. Iglesias uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
30651e78bcc1SAlexander Graf {
306650013115SPeter Maydell     return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30671e78bcc1SAlexander Graf }
30681e78bcc1SAlexander Graf 
3069fdfba1a2SEdgar E. Iglesias uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
30701e78bcc1SAlexander Graf {
307150013115SPeter Maydell     return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30721e78bcc1SAlexander Graf }
30731e78bcc1SAlexander Graf 
307484b7b8e7Sbellard /* warning: addr must be aligned */
307550013115SPeter Maydell static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
307650013115SPeter Maydell                                                   MemTxAttrs attrs,
307750013115SPeter Maydell                                                   MemTxResult *result,
30781e78bcc1SAlexander Graf                                                   enum device_endian endian)
307984b7b8e7Sbellard {
308084b7b8e7Sbellard     uint8_t *ptr;
308184b7b8e7Sbellard     uint64_t val;
30825c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3083149f54b5SPaolo Bonzini     hwaddr l = 8;
3084149f54b5SPaolo Bonzini     hwaddr addr1;
308550013115SPeter Maydell     MemTxResult r;
30864840f10eSJan Kiszka     bool release_lock = false;
308784b7b8e7Sbellard 
308841063e1eSPaolo Bonzini     rcu_read_lock();
30892c17449bSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3090149f54b5SPaolo Bonzini                                  false);
30915c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
30924840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3093125b3806SPaolo Bonzini 
309484b7b8e7Sbellard         /* I/O case */
309550013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3096968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
3097968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
3098968a5627SPaolo Bonzini             val = bswap64(val);
3099968a5627SPaolo Bonzini         }
3100968a5627SPaolo Bonzini #else
3101968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
3102968a5627SPaolo Bonzini             val = bswap64(val);
3103968a5627SPaolo Bonzini         }
3104968a5627SPaolo Bonzini #endif
310584b7b8e7Sbellard     } else {
310684b7b8e7Sbellard         /* RAM case */
31070878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
31081e78bcc1SAlexander Graf         switch (endian) {
31091e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
31101e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
31111e78bcc1SAlexander Graf             break;
31121e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
31131e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
31141e78bcc1SAlexander Graf             break;
31151e78bcc1SAlexander Graf         default:
311684b7b8e7Sbellard             val = ldq_p(ptr);
31171e78bcc1SAlexander Graf             break;
31181e78bcc1SAlexander Graf         }
311950013115SPeter Maydell         r = MEMTX_OK;
312050013115SPeter Maydell     }
312150013115SPeter Maydell     if (result) {
312250013115SPeter Maydell         *result = r;
312384b7b8e7Sbellard     }
31244840f10eSJan Kiszka     if (release_lock) {
31254840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
31264840f10eSJan Kiszka     }
312741063e1eSPaolo Bonzini     rcu_read_unlock();
312884b7b8e7Sbellard     return val;
312984b7b8e7Sbellard }
313084b7b8e7Sbellard 
313150013115SPeter Maydell uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
313250013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
313350013115SPeter Maydell {
313450013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
313550013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
313650013115SPeter Maydell }
313750013115SPeter Maydell 
313850013115SPeter Maydell uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
313950013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
314050013115SPeter Maydell {
314150013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
314250013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
314350013115SPeter Maydell }
314450013115SPeter Maydell 
314550013115SPeter Maydell uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
314650013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
314750013115SPeter Maydell {
314850013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
314950013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
315050013115SPeter Maydell }
315150013115SPeter Maydell 
31522c17449bSEdgar E. Iglesias uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
31531e78bcc1SAlexander Graf {
315450013115SPeter Maydell     return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31551e78bcc1SAlexander Graf }
31561e78bcc1SAlexander Graf 
31572c17449bSEdgar E. Iglesias uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
31581e78bcc1SAlexander Graf {
315950013115SPeter Maydell     return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31601e78bcc1SAlexander Graf }
31611e78bcc1SAlexander Graf 
31622c17449bSEdgar E. Iglesias uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
31631e78bcc1SAlexander Graf {
316450013115SPeter Maydell     return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31651e78bcc1SAlexander Graf }
31661e78bcc1SAlexander Graf 
3167aab33094Sbellard /* XXX: optimize */
316850013115SPeter Maydell uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
316950013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result)
3170aab33094Sbellard {
3171aab33094Sbellard     uint8_t val;
317250013115SPeter Maydell     MemTxResult r;
317350013115SPeter Maydell 
317450013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &val, 1, 0);
317550013115SPeter Maydell     if (result) {
317650013115SPeter Maydell         *result = r;
317750013115SPeter Maydell     }
3178aab33094Sbellard     return val;
3179aab33094Sbellard }
3180aab33094Sbellard 
318150013115SPeter Maydell uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
318250013115SPeter Maydell {
318350013115SPeter Maydell     return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
318450013115SPeter Maydell }
318550013115SPeter Maydell 
3186733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
318750013115SPeter Maydell static inline uint32_t address_space_lduw_internal(AddressSpace *as,
318850013115SPeter Maydell                                                    hwaddr addr,
318950013115SPeter Maydell                                                    MemTxAttrs attrs,
319050013115SPeter Maydell                                                    MemTxResult *result,
31911e78bcc1SAlexander Graf                                                    enum device_endian endian)
3192aab33094Sbellard {
3193733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3194733f0b02SMichael S. Tsirkin     uint64_t val;
31955c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3196149f54b5SPaolo Bonzini     hwaddr l = 2;
3197149f54b5SPaolo Bonzini     hwaddr addr1;
319850013115SPeter Maydell     MemTxResult r;
31994840f10eSJan Kiszka     bool release_lock = false;
3200733f0b02SMichael S. Tsirkin 
320141063e1eSPaolo Bonzini     rcu_read_lock();
320241701aa4SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3203149f54b5SPaolo Bonzini                                  false);
32045c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
32054840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3206125b3806SPaolo Bonzini 
3207733f0b02SMichael S. Tsirkin         /* I/O case */
320850013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
32091e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
32101e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
32111e78bcc1SAlexander Graf             val = bswap16(val);
32121e78bcc1SAlexander Graf         }
32131e78bcc1SAlexander Graf #else
32141e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
32151e78bcc1SAlexander Graf             val = bswap16(val);
32161e78bcc1SAlexander Graf         }
32171e78bcc1SAlexander Graf #endif
3218733f0b02SMichael S. Tsirkin     } else {
3219733f0b02SMichael S. Tsirkin         /* RAM case */
32200878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
32211e78bcc1SAlexander Graf         switch (endian) {
32221e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
32231e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
32241e78bcc1SAlexander Graf             break;
32251e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
32261e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
32271e78bcc1SAlexander Graf             break;
32281e78bcc1SAlexander Graf         default:
3229733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
32301e78bcc1SAlexander Graf             break;
32311e78bcc1SAlexander Graf         }
323250013115SPeter Maydell         r = MEMTX_OK;
323350013115SPeter Maydell     }
323450013115SPeter Maydell     if (result) {
323550013115SPeter Maydell         *result = r;
3236733f0b02SMichael S. Tsirkin     }
32374840f10eSJan Kiszka     if (release_lock) {
32384840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
32394840f10eSJan Kiszka     }
324041063e1eSPaolo Bonzini     rcu_read_unlock();
3241733f0b02SMichael S. Tsirkin     return val;
3242aab33094Sbellard }
3243aab33094Sbellard 
324450013115SPeter Maydell uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
324550013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
324650013115SPeter Maydell {
324750013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
324850013115SPeter Maydell                                        DEVICE_NATIVE_ENDIAN);
324950013115SPeter Maydell }
325050013115SPeter Maydell 
325150013115SPeter Maydell uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
325250013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
325350013115SPeter Maydell {
325450013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
325550013115SPeter Maydell                                        DEVICE_LITTLE_ENDIAN);
325650013115SPeter Maydell }
325750013115SPeter Maydell 
325850013115SPeter Maydell uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
325950013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
326050013115SPeter Maydell {
326150013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
326250013115SPeter Maydell                                        DEVICE_BIG_ENDIAN);
326350013115SPeter Maydell }
326450013115SPeter Maydell 
326541701aa4SEdgar E. Iglesias uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
32661e78bcc1SAlexander Graf {
326750013115SPeter Maydell     return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32681e78bcc1SAlexander Graf }
32691e78bcc1SAlexander Graf 
327041701aa4SEdgar E. Iglesias uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
32711e78bcc1SAlexander Graf {
327250013115SPeter Maydell     return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32731e78bcc1SAlexander Graf }
32741e78bcc1SAlexander Graf 
327541701aa4SEdgar E. Iglesias uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
32761e78bcc1SAlexander Graf {
327750013115SPeter Maydell     return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32781e78bcc1SAlexander Graf }
32791e78bcc1SAlexander Graf 
32808df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
32818df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
32828df1cd07Sbellard    bits are used to track modified PTEs */
328350013115SPeter Maydell void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
328450013115SPeter Maydell                                 MemTxAttrs attrs, MemTxResult *result)
32858df1cd07Sbellard {
32868df1cd07Sbellard     uint8_t *ptr;
32875c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3288149f54b5SPaolo Bonzini     hwaddr l = 4;
3289149f54b5SPaolo Bonzini     hwaddr addr1;
329050013115SPeter Maydell     MemTxResult r;
3291845b6214SPaolo Bonzini     uint8_t dirty_log_mask;
32924840f10eSJan Kiszka     bool release_lock = false;
32938df1cd07Sbellard 
329441063e1eSPaolo Bonzini     rcu_read_lock();
32952198a121SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3296149f54b5SPaolo Bonzini                                  true);
32975c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
32984840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3299125b3806SPaolo Bonzini 
330050013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
33018df1cd07Sbellard     } else {
33020878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
33038df1cd07Sbellard         stl_p(ptr, val);
330474576198Saliguori 
3305845b6214SPaolo Bonzini         dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3306845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
33070878d0e1SPaolo Bonzini         cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
33080878d0e1SPaolo Bonzini                                             4, dirty_log_mask);
330950013115SPeter Maydell         r = MEMTX_OK;
331050013115SPeter Maydell     }
331150013115SPeter Maydell     if (result) {
331250013115SPeter Maydell         *result = r;
33138df1cd07Sbellard     }
33144840f10eSJan Kiszka     if (release_lock) {
33154840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
33164840f10eSJan Kiszka     }
331741063e1eSPaolo Bonzini     rcu_read_unlock();
33188df1cd07Sbellard }
33198df1cd07Sbellard 
332050013115SPeter Maydell void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
332150013115SPeter Maydell {
332250013115SPeter Maydell     address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
332350013115SPeter Maydell }
332450013115SPeter Maydell 
33258df1cd07Sbellard /* warning: addr must be aligned */
332650013115SPeter Maydell static inline void address_space_stl_internal(AddressSpace *as,
3327ab1da857SEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
332850013115SPeter Maydell                                               MemTxAttrs attrs,
332950013115SPeter Maydell                                               MemTxResult *result,
33301e78bcc1SAlexander Graf                                               enum device_endian endian)
33318df1cd07Sbellard {
33328df1cd07Sbellard     uint8_t *ptr;
33335c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3334149f54b5SPaolo Bonzini     hwaddr l = 4;
3335149f54b5SPaolo Bonzini     hwaddr addr1;
333650013115SPeter Maydell     MemTxResult r;
33374840f10eSJan Kiszka     bool release_lock = false;
33388df1cd07Sbellard 
333941063e1eSPaolo Bonzini     rcu_read_lock();
3340ab1da857SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3341149f54b5SPaolo Bonzini                                  true);
33425c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
33434840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3344125b3806SPaolo Bonzini 
33451e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
33461e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
33471e78bcc1SAlexander Graf             val = bswap32(val);
33481e78bcc1SAlexander Graf         }
33491e78bcc1SAlexander Graf #else
33501e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
33511e78bcc1SAlexander Graf             val = bswap32(val);
33521e78bcc1SAlexander Graf         }
33531e78bcc1SAlexander Graf #endif
335450013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
33558df1cd07Sbellard     } else {
33568df1cd07Sbellard         /* RAM case */
33570878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
33581e78bcc1SAlexander Graf         switch (endian) {
33591e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
33601e78bcc1SAlexander Graf             stl_le_p(ptr, val);
33611e78bcc1SAlexander Graf             break;
33621e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
33631e78bcc1SAlexander Graf             stl_be_p(ptr, val);
33641e78bcc1SAlexander Graf             break;
33651e78bcc1SAlexander Graf         default:
33668df1cd07Sbellard             stl_p(ptr, val);
33671e78bcc1SAlexander Graf             break;
33681e78bcc1SAlexander Graf         }
3369845b6214SPaolo Bonzini         invalidate_and_set_dirty(mr, addr1, 4);
337050013115SPeter Maydell         r = MEMTX_OK;
33718df1cd07Sbellard     }
337250013115SPeter Maydell     if (result) {
337350013115SPeter Maydell         *result = r;
337450013115SPeter Maydell     }
33754840f10eSJan Kiszka     if (release_lock) {
33764840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
33774840f10eSJan Kiszka     }
337841063e1eSPaolo Bonzini     rcu_read_unlock();
337950013115SPeter Maydell }
338050013115SPeter Maydell 
338150013115SPeter Maydell void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
338250013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
338350013115SPeter Maydell {
338450013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
338550013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
338650013115SPeter Maydell }
338750013115SPeter Maydell 
338850013115SPeter Maydell void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
338950013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
339050013115SPeter Maydell {
339150013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
339250013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
339350013115SPeter Maydell }
339450013115SPeter Maydell 
339550013115SPeter Maydell void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
339650013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
339750013115SPeter Maydell {
339850013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
339950013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
34003a7d929eSbellard }
34018df1cd07Sbellard 
3402ab1da857SEdgar E. Iglesias void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34031e78bcc1SAlexander Graf {
340450013115SPeter Maydell     address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34051e78bcc1SAlexander Graf }
34061e78bcc1SAlexander Graf 
3407ab1da857SEdgar E. Iglesias void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34081e78bcc1SAlexander Graf {
340950013115SPeter Maydell     address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34101e78bcc1SAlexander Graf }
34111e78bcc1SAlexander Graf 
3412ab1da857SEdgar E. Iglesias void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34131e78bcc1SAlexander Graf {
341450013115SPeter Maydell     address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34151e78bcc1SAlexander Graf }
34161e78bcc1SAlexander Graf 
3417aab33094Sbellard /* XXX: optimize */
341850013115SPeter Maydell void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
341950013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
3420aab33094Sbellard {
3421aab33094Sbellard     uint8_t v = val;
342250013115SPeter Maydell     MemTxResult r;
342350013115SPeter Maydell 
342450013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &v, 1, 1);
342550013115SPeter Maydell     if (result) {
342650013115SPeter Maydell         *result = r;
342750013115SPeter Maydell     }
342850013115SPeter Maydell }
342950013115SPeter Maydell 
343050013115SPeter Maydell void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
343150013115SPeter Maydell {
343250013115SPeter Maydell     address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3433aab33094Sbellard }
3434aab33094Sbellard 
3435733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
343650013115SPeter Maydell static inline void address_space_stw_internal(AddressSpace *as,
34375ce5944dSEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
343850013115SPeter Maydell                                               MemTxAttrs attrs,
343950013115SPeter Maydell                                               MemTxResult *result,
34401e78bcc1SAlexander Graf                                               enum device_endian endian)
3441aab33094Sbellard {
3442733f0b02SMichael S. Tsirkin     uint8_t *ptr;
34435c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3444149f54b5SPaolo Bonzini     hwaddr l = 2;
3445149f54b5SPaolo Bonzini     hwaddr addr1;
344650013115SPeter Maydell     MemTxResult r;
34474840f10eSJan Kiszka     bool release_lock = false;
3448733f0b02SMichael S. Tsirkin 
344941063e1eSPaolo Bonzini     rcu_read_lock();
34505ce5944dSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, true);
34515c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
34524840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3453125b3806SPaolo Bonzini 
34541e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
34551e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
34561e78bcc1SAlexander Graf             val = bswap16(val);
34571e78bcc1SAlexander Graf         }
34581e78bcc1SAlexander Graf #else
34591e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
34601e78bcc1SAlexander Graf             val = bswap16(val);
34611e78bcc1SAlexander Graf         }
34621e78bcc1SAlexander Graf #endif
346350013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3464733f0b02SMichael S. Tsirkin     } else {
3465733f0b02SMichael S. Tsirkin         /* RAM case */
34660878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
34671e78bcc1SAlexander Graf         switch (endian) {
34681e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
34691e78bcc1SAlexander Graf             stw_le_p(ptr, val);
34701e78bcc1SAlexander Graf             break;
34711e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
34721e78bcc1SAlexander Graf             stw_be_p(ptr, val);
34731e78bcc1SAlexander Graf             break;
34741e78bcc1SAlexander Graf         default:
3475733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
34761e78bcc1SAlexander Graf             break;
34771e78bcc1SAlexander Graf         }
3478845b6214SPaolo Bonzini         invalidate_and_set_dirty(mr, addr1, 2);
347950013115SPeter Maydell         r = MEMTX_OK;
3480733f0b02SMichael S. Tsirkin     }
348150013115SPeter Maydell     if (result) {
348250013115SPeter Maydell         *result = r;
348350013115SPeter Maydell     }
34844840f10eSJan Kiszka     if (release_lock) {
34854840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
34864840f10eSJan Kiszka     }
348741063e1eSPaolo Bonzini     rcu_read_unlock();
348850013115SPeter Maydell }
348950013115SPeter Maydell 
349050013115SPeter Maydell void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
349150013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
349250013115SPeter Maydell {
349350013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
349450013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
349550013115SPeter Maydell }
349650013115SPeter Maydell 
349750013115SPeter Maydell void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
349850013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
349950013115SPeter Maydell {
350050013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
350150013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
350250013115SPeter Maydell }
350350013115SPeter Maydell 
350450013115SPeter Maydell void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
350550013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
350650013115SPeter Maydell {
350750013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
350850013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
3509aab33094Sbellard }
3510aab33094Sbellard 
35115ce5944dSEdgar E. Iglesias void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35121e78bcc1SAlexander Graf {
351350013115SPeter Maydell     address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35141e78bcc1SAlexander Graf }
35151e78bcc1SAlexander Graf 
35165ce5944dSEdgar E. Iglesias void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35171e78bcc1SAlexander Graf {
351850013115SPeter Maydell     address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35191e78bcc1SAlexander Graf }
35201e78bcc1SAlexander Graf 
35215ce5944dSEdgar E. Iglesias void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35221e78bcc1SAlexander Graf {
352350013115SPeter Maydell     address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35241e78bcc1SAlexander Graf }
35251e78bcc1SAlexander Graf 
3526aab33094Sbellard /* XXX: optimize */
352750013115SPeter Maydell void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
352850013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
352950013115SPeter Maydell {
353050013115SPeter Maydell     MemTxResult r;
353150013115SPeter Maydell     val = tswap64(val);
353250013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
353350013115SPeter Maydell     if (result) {
353450013115SPeter Maydell         *result = r;
353550013115SPeter Maydell     }
353650013115SPeter Maydell }
353750013115SPeter Maydell 
353850013115SPeter Maydell void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
353950013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
354050013115SPeter Maydell {
354150013115SPeter Maydell     MemTxResult r;
354250013115SPeter Maydell     val = cpu_to_le64(val);
354350013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
354450013115SPeter Maydell     if (result) {
354550013115SPeter Maydell         *result = r;
354650013115SPeter Maydell     }
354750013115SPeter Maydell }
354850013115SPeter Maydell void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
354950013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
355050013115SPeter Maydell {
355150013115SPeter Maydell     MemTxResult r;
355250013115SPeter Maydell     val = cpu_to_be64(val);
355350013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
355450013115SPeter Maydell     if (result) {
355550013115SPeter Maydell         *result = r;
355650013115SPeter Maydell     }
355750013115SPeter Maydell }
355850013115SPeter Maydell 
3559f606604fSEdgar E. Iglesias void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3560aab33094Sbellard {
356150013115SPeter Maydell     address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3562aab33094Sbellard }
3563aab33094Sbellard 
3564f606604fSEdgar E. Iglesias void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
35651e78bcc1SAlexander Graf {
356650013115SPeter Maydell     address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35671e78bcc1SAlexander Graf }
35681e78bcc1SAlexander Graf 
3569f606604fSEdgar E. Iglesias void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
35701e78bcc1SAlexander Graf {
357150013115SPeter Maydell     address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35721e78bcc1SAlexander Graf }
35731e78bcc1SAlexander Graf 
35745e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
3575f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3576b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
357713eb76e0Sbellard {
357813eb76e0Sbellard     int l;
3579a8170e5eSAvi Kivity     hwaddr phys_addr;
35809b3c35e0Sj_mayer     target_ulong page;
358113eb76e0Sbellard 
358213eb76e0Sbellard     while (len > 0) {
35835232e4c7SPeter Maydell         int asidx;
35845232e4c7SPeter Maydell         MemTxAttrs attrs;
35855232e4c7SPeter Maydell 
358613eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
35875232e4c7SPeter Maydell         phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
35885232e4c7SPeter Maydell         asidx = cpu_asidx_from_attrs(cpu, attrs);
358913eb76e0Sbellard         /* if no physical page mapped, return an error */
359013eb76e0Sbellard         if (phys_addr == -1)
359113eb76e0Sbellard             return -1;
359213eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
359313eb76e0Sbellard         if (l > len)
359413eb76e0Sbellard             l = len;
35955e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
35962e38847bSEdgar E. Iglesias         if (is_write) {
35975232e4c7SPeter Maydell             cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
35985232e4c7SPeter Maydell                                           phys_addr, buf, l);
35992e38847bSEdgar E. Iglesias         } else {
36005232e4c7SPeter Maydell             address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
36015232e4c7SPeter Maydell                              MEMTXATTRS_UNSPECIFIED,
36025c9eb028SPeter Maydell                              buf, l, 0);
36032e38847bSEdgar E. Iglesias         }
360413eb76e0Sbellard         len -= l;
360513eb76e0Sbellard         buf += l;
360613eb76e0Sbellard         addr += l;
360713eb76e0Sbellard     }
360813eb76e0Sbellard     return 0;
360913eb76e0Sbellard }
3610038629a6SDr. David Alan Gilbert 
3611038629a6SDr. David Alan Gilbert /*
3612038629a6SDr. David Alan Gilbert  * Allows code that needs to deal with migration bitmaps etc to still be built
3613038629a6SDr. David Alan Gilbert  * target independent.
3614038629a6SDr. David Alan Gilbert  */
3615038629a6SDr. David Alan Gilbert size_t qemu_target_page_bits(void)
3616038629a6SDr. David Alan Gilbert {
3617038629a6SDr. David Alan Gilbert     return TARGET_PAGE_BITS;
3618038629a6SDr. David Alan Gilbert }
3619038629a6SDr. David Alan Gilbert 
3620a68fe89cSPaul Brook #endif
362113eb76e0Sbellard 
36228e4a424bSBlue Swirl /*
36238e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
36248e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
36258e4a424bSBlue Swirl  */
362698ed8ecfSGreg Kurz bool target_words_bigendian(void);
362798ed8ecfSGreg Kurz bool target_words_bigendian(void)
36288e4a424bSBlue Swirl {
36298e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
36308e4a424bSBlue Swirl     return true;
36318e4a424bSBlue Swirl #else
36328e4a424bSBlue Swirl     return false;
36338e4a424bSBlue Swirl #endif
36348e4a424bSBlue Swirl }
36358e4a424bSBlue Swirl 
363676f35538SWen Congyang #ifndef CONFIG_USER_ONLY
3637a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
363876f35538SWen Congyang {
36395c8a00ceSPaolo Bonzini     MemoryRegion*mr;
3640149f54b5SPaolo Bonzini     hwaddr l = 1;
364141063e1eSPaolo Bonzini     bool res;
364276f35538SWen Congyang 
364341063e1eSPaolo Bonzini     rcu_read_lock();
36445c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
3645149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
364676f35538SWen Congyang 
364741063e1eSPaolo Bonzini     res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
364841063e1eSPaolo Bonzini     rcu_read_unlock();
364941063e1eSPaolo Bonzini     return res;
365076f35538SWen Congyang }
3651bd2fa51fSMichael R. Hines 
3652e3807054SDr. David Alan Gilbert int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3653bd2fa51fSMichael R. Hines {
3654bd2fa51fSMichael R. Hines     RAMBlock *block;
3655e3807054SDr. David Alan Gilbert     int ret = 0;
3656bd2fa51fSMichael R. Hines 
36570dc3f44aSMike Day     rcu_read_lock();
36580dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3659e3807054SDr. David Alan Gilbert         ret = func(block->idstr, block->host, block->offset,
3660e3807054SDr. David Alan Gilbert                    block->used_length, opaque);
3661e3807054SDr. David Alan Gilbert         if (ret) {
3662e3807054SDr. David Alan Gilbert             break;
3663e3807054SDr. David Alan Gilbert         }
3664bd2fa51fSMichael R. Hines     }
36650dc3f44aSMike Day     rcu_read_unlock();
3666e3807054SDr. David Alan Gilbert     return ret;
3667bd2fa51fSMichael R. Hines }
3668ec3f8c99SPeter Maydell #endif
3669