xref: /qemu/system/physmem.c (revision c2cd627ddb13f62557aaf66305edb03cc3d9612d)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
197b31bbc2SPeter Maydell #include "qemu/osdep.h"
20da34e65cSMarkus Armbruster #include "qapi/error.h"
21777872e5SStefan Weil #ifndef _WIN32
22d5a8f07cSbellard #endif
2354936004Sbellard 
24f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
256180a181Sbellard #include "cpu.h"
2663c91552SPaolo Bonzini #include "exec/exec-all.h"
27b67d9a52Sbellard #include "tcg.h"
28741da0d3SPaolo Bonzini #include "hw/qdev-core.h"
294485bd26SMichael S. Tsirkin #if !defined(CONFIG_USER_ONLY)
3047c8ca53SMarcel Apfelbaum #include "hw/boards.h"
3133c11879SPaolo Bonzini #include "hw/xen/xen.h"
324485bd26SMichael S. Tsirkin #endif
339c17d615SPaolo Bonzini #include "sysemu/kvm.h"
342ff3de68SMarkus Armbruster #include "sysemu/sysemu.h"
351de7afc9SPaolo Bonzini #include "qemu/timer.h"
361de7afc9SPaolo Bonzini #include "qemu/config-file.h"
3775a34036SAndreas Färber #include "qemu/error-report.h"
3853a5960aSpbrook #if defined(CONFIG_USER_ONLY)
39a9c94277SMarkus Armbruster #include "qemu.h"
40432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
41741da0d3SPaolo Bonzini #include "hw/hw.h"
42741da0d3SPaolo Bonzini #include "exec/memory.h"
43df43d49cSPaolo Bonzini #include "exec/ioport.h"
44741da0d3SPaolo Bonzini #include "sysemu/dma.h"
45741da0d3SPaolo Bonzini #include "exec/address-spaces.h"
469c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
476506e4f9SStefano Stabellini #include "trace.h"
4853a5960aSpbrook #endif
490d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
500dc3f44aSMike Day #include "qemu/rcu_queue.h"
514840f10eSJan Kiszka #include "qemu/main-loop.h"
525b6dd868SBlue Swirl #include "translate-all.h"
537615936eSPavel Dovgalyuk #include "sysemu/replay.h"
540cac1b66SBlue Swirl 
55022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
56220c3ebdSJuan Quintela #include "exec/ram_addr.h"
57508127e2SPaolo Bonzini #include "exec/log.h"
5867d95c15SAvi Kivity 
599dfeca7cSBharata B Rao #include "migration/vmstate.h"
609dfeca7cSBharata B Rao 
61b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
62794e8f30SMichael S. Tsirkin #ifndef _WIN32
63794e8f30SMichael S. Tsirkin #include "qemu/mmap-alloc.h"
64794e8f30SMichael S. Tsirkin #endif
65b35ba30fSMichael S. Tsirkin 
66db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
671196be37Sths 
6899773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
690dc3f44aSMike Day /* ram_list is read under rcu_read_lock()/rcu_read_unlock().  Writes
700dc3f44aSMike Day  * are protected by the ramlist lock.
710dc3f44aSMike Day  */
720d53d9feSMike Day RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
7362152b8aSAvi Kivity 
7462152b8aSAvi Kivity static MemoryRegion *system_memory;
75309cb471SAvi Kivity static MemoryRegion *system_io;
7662152b8aSAvi Kivity 
77f6790af6SAvi Kivity AddressSpace address_space_io;
78f6790af6SAvi Kivity AddressSpace address_space_memory;
792673a5daSAvi Kivity 
800844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
81acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
820e0df1e2SAvi Kivity 
837bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
847bd4f430SPaolo Bonzini #define RAM_PREALLOC   (1 << 0)
857bd4f430SPaolo Bonzini 
86dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */
87dbcb8981SPaolo Bonzini #define RAM_SHARED     (1 << 1)
88dbcb8981SPaolo Bonzini 
8962be4e3aSMichael S. Tsirkin /* Only a portion of RAM (used_length) is actually used, and migrated.
9062be4e3aSMichael S. Tsirkin  * This used_length size can change across reboots.
9162be4e3aSMichael S. Tsirkin  */
9262be4e3aSMichael S. Tsirkin #define RAM_RESIZEABLE (1 << 2)
9362be4e3aSMichael S. Tsirkin 
94e2eef170Spbrook #endif
959fa3e853Sbellard 
96bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
976a00d601Sbellard /* current CPU in the current thread. It is only valid inside
986a00d601Sbellard    cpu_exec() */
99f240eb6fSPaolo Bonzini __thread CPUState *current_cpu;
1002e70f6efSpbrook /* 0 = Do not count executed instructions.
101bf20dc07Sths    1 = Precise instruction counting.
1022e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1035708fc66SPaolo Bonzini int use_icount;
1046a00d601Sbellard 
105e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1064346ae3eSAvi Kivity 
1071db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
1081db8abb1SPaolo Bonzini 
1091db8abb1SPaolo Bonzini struct PhysPageEntry {
1109736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
1118b795765SMichael S. Tsirkin     uint32_t skip : 6;
1129736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
1138b795765SMichael S. Tsirkin     uint32_t ptr : 26;
1141db8abb1SPaolo Bonzini };
1151db8abb1SPaolo Bonzini 
1168b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
1178b795765SMichael S. Tsirkin 
11803f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
11957271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
12003f49957SPaolo Bonzini 
121026736ceSMichael S. Tsirkin #define P_L2_BITS 9
12203f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
12303f49957SPaolo Bonzini 
12403f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
12503f49957SPaolo Bonzini 
12603f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1270475d94fSPaolo Bonzini 
12853cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
12979e2b9aeSPaolo Bonzini     struct rcu_head rcu;
13079e2b9aeSPaolo Bonzini 
13153cb28cbSMarcel Apfelbaum     unsigned sections_nb;
13253cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
13353cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
13453cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
13553cb28cbSMarcel Apfelbaum     Node *nodes;
13653cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
13753cb28cbSMarcel Apfelbaum } PhysPageMap;
13853cb28cbSMarcel Apfelbaum 
1391db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
14079e2b9aeSPaolo Bonzini     struct rcu_head rcu;
14179e2b9aeSPaolo Bonzini 
142729633c2SFam Zheng     MemoryRegionSection *mru_section;
1431db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1441db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1451db8abb1SPaolo Bonzini      */
1461db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
14753cb28cbSMarcel Apfelbaum     PhysPageMap map;
148acc9d80bSJan Kiszka     AddressSpace *as;
1491db8abb1SPaolo Bonzini };
1501db8abb1SPaolo Bonzini 
15190260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
15290260c6cSJan Kiszka typedef struct subpage_t {
15390260c6cSJan Kiszka     MemoryRegion iomem;
154acc9d80bSJan Kiszka     AddressSpace *as;
15590260c6cSJan Kiszka     hwaddr base;
15690260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
15790260c6cSJan Kiszka } subpage_t;
15890260c6cSJan Kiszka 
159b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
160b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1
161b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2
162b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3
1635312bd8bSAvi Kivity 
164e2eef170Spbrook static void io_mem_init(void);
16562152b8aSAvi Kivity static void memory_map_init(void);
16609daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener);
167e2eef170Spbrook 
1681ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
16932857f4dSPeter Maydell 
17032857f4dSPeter Maydell /**
17132857f4dSPeter Maydell  * CPUAddressSpace: all the information a CPU needs about an AddressSpace
17232857f4dSPeter Maydell  * @cpu: the CPU whose AddressSpace this is
17332857f4dSPeter Maydell  * @as: the AddressSpace itself
17432857f4dSPeter Maydell  * @memory_dispatch: its dispatch pointer (cached, RCU protected)
17532857f4dSPeter Maydell  * @tcg_as_listener: listener for tracking changes to the AddressSpace
17632857f4dSPeter Maydell  */
17732857f4dSPeter Maydell struct CPUAddressSpace {
17832857f4dSPeter Maydell     CPUState *cpu;
17932857f4dSPeter Maydell     AddressSpace *as;
18032857f4dSPeter Maydell     struct AddressSpaceDispatch *memory_dispatch;
18132857f4dSPeter Maydell     MemoryListener tcg_as_listener;
18232857f4dSPeter Maydell };
18332857f4dSPeter Maydell 
1846658ffb8Spbrook #endif
18554936004Sbellard 
1866d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
187d6f2ea22SAvi Kivity 
18853cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
189f7bf5461SAvi Kivity {
190101420b8SPeter Lieven     static unsigned alloc_hint = 16;
19153cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
192101420b8SPeter Lieven         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
19353cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
19453cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
195101420b8SPeter Lieven         alloc_hint = map->nodes_nb_alloc;
196f7bf5461SAvi Kivity     }
197f7bf5461SAvi Kivity }
198f7bf5461SAvi Kivity 
199db94604bSPaolo Bonzini static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
200d6f2ea22SAvi Kivity {
201d6f2ea22SAvi Kivity     unsigned i;
2028b795765SMichael S. Tsirkin     uint32_t ret;
203db94604bSPaolo Bonzini     PhysPageEntry e;
204db94604bSPaolo Bonzini     PhysPageEntry *p;
205d6f2ea22SAvi Kivity 
20653cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
207db94604bSPaolo Bonzini     p = map->nodes[ret];
208d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
20953cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
210db94604bSPaolo Bonzini 
211db94604bSPaolo Bonzini     e.skip = leaf ? 0 : 1;
212db94604bSPaolo Bonzini     e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
21303f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
214db94604bSPaolo Bonzini         memcpy(&p[i], &e, sizeof(e));
215d6f2ea22SAvi Kivity     }
216f7bf5461SAvi Kivity     return ret;
217d6f2ea22SAvi Kivity }
218d6f2ea22SAvi Kivity 
21953cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
22053cb28cbSMarcel Apfelbaum                                 hwaddr *index, hwaddr *nb, uint16_t leaf,
2212999097bSAvi Kivity                                 int level)
22292e873b9Sbellard {
223f7bf5461SAvi Kivity     PhysPageEntry *p;
22403f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
2255cd2c5b6SRichard Henderson 
2269736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
227db94604bSPaolo Bonzini         lp->ptr = phys_map_node_alloc(map, level == 0);
228db94604bSPaolo Bonzini     }
22953cb28cbSMarcel Apfelbaum     p = map->nodes[lp->ptr];
23003f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
231f7bf5461SAvi Kivity 
23203f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
23307f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2349736e55bSMichael S. Tsirkin             lp->skip = 0;
235c19e8800SAvi Kivity             lp->ptr = leaf;
23607f07b31SAvi Kivity             *index += step;
23707f07b31SAvi Kivity             *nb -= step;
238f7bf5461SAvi Kivity         } else {
23953cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2402999097bSAvi Kivity         }
2412999097bSAvi Kivity         ++lp;
242f7bf5461SAvi Kivity     }
2434346ae3eSAvi Kivity }
2445cd2c5b6SRichard Henderson 
245ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
246a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2472999097bSAvi Kivity                           uint16_t leaf)
248f7bf5461SAvi Kivity {
2492999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
25053cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
251f7bf5461SAvi Kivity 
25253cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
25392e873b9Sbellard }
25492e873b9Sbellard 
255b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
256b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
257b35ba30fSMichael S. Tsirkin  */
258b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
259b35ba30fSMichael S. Tsirkin {
260b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
261b35ba30fSMichael S. Tsirkin     int valid = 0;
262b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
263b35ba30fSMichael S. Tsirkin     int i;
264b35ba30fSMichael S. Tsirkin 
265b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
266b35ba30fSMichael S. Tsirkin         return;
267b35ba30fSMichael S. Tsirkin     }
268b35ba30fSMichael S. Tsirkin 
269b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
270b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
271b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
272b35ba30fSMichael S. Tsirkin             continue;
273b35ba30fSMichael S. Tsirkin         }
274b35ba30fSMichael S. Tsirkin 
275b35ba30fSMichael S. Tsirkin         valid_ptr = i;
276b35ba30fSMichael S. Tsirkin         valid++;
277b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
278b35ba30fSMichael S. Tsirkin             phys_page_compact(&p[i], nodes, compacted);
279b35ba30fSMichael S. Tsirkin         }
280b35ba30fSMichael S. Tsirkin     }
281b35ba30fSMichael S. Tsirkin 
282b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
283b35ba30fSMichael S. Tsirkin     if (valid != 1) {
284b35ba30fSMichael S. Tsirkin         return;
285b35ba30fSMichael S. Tsirkin     }
286b35ba30fSMichael S. Tsirkin 
287b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
288b35ba30fSMichael S. Tsirkin 
289b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
290b35ba30fSMichael S. Tsirkin     if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
291b35ba30fSMichael S. Tsirkin         return;
292b35ba30fSMichael S. Tsirkin     }
293b35ba30fSMichael S. Tsirkin 
294b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
295b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
296b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
297b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
298b35ba30fSMichael S. Tsirkin          * should never reach here.
299b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
300b35ba30fSMichael S. Tsirkin          * change this rule.
301b35ba30fSMichael S. Tsirkin          */
302b35ba30fSMichael S. Tsirkin         lp->skip = 0;
303b35ba30fSMichael S. Tsirkin     } else {
304b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
305b35ba30fSMichael S. Tsirkin     }
306b35ba30fSMichael S. Tsirkin }
307b35ba30fSMichael S. Tsirkin 
308b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
309b35ba30fSMichael S. Tsirkin {
310b35ba30fSMichael S. Tsirkin     DECLARE_BITMAP(compacted, nodes_nb);
311b35ba30fSMichael S. Tsirkin 
312b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
31353cb28cbSMarcel Apfelbaum         phys_page_compact(&d->phys_map, d->map.nodes, compacted);
314b35ba30fSMichael S. Tsirkin     }
315b35ba30fSMichael S. Tsirkin }
316b35ba30fSMichael S. Tsirkin 
31729cb533dSFam Zheng static inline bool section_covers_addr(const MemoryRegionSection *section,
31829cb533dSFam Zheng                                        hwaddr addr)
31929cb533dSFam Zheng {
32029cb533dSFam Zheng     /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
32129cb533dSFam Zheng      * the section must cover the entire address space.
32229cb533dSFam Zheng      */
32329cb533dSFam Zheng     return section->size.hi ||
32429cb533dSFam Zheng            range_covers_byte(section->offset_within_address_space,
32529cb533dSFam Zheng                              section->size.lo, addr);
32629cb533dSFam Zheng }
32729cb533dSFam Zheng 
32897115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
3299affd6fcSPaolo Bonzini                                            Node *nodes, MemoryRegionSection *sections)
33092e873b9Sbellard {
33131ab2b4aSAvi Kivity     PhysPageEntry *p;
33297115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
33331ab2b4aSAvi Kivity     int i;
334f1f6e3b8SAvi Kivity 
3359736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
336c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
3379affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
338f1f6e3b8SAvi Kivity         }
3399affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
34003f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
34131ab2b4aSAvi Kivity     }
342b35ba30fSMichael S. Tsirkin 
34329cb533dSFam Zheng     if (section_covers_addr(&sections[lp.ptr], addr)) {
3449affd6fcSPaolo Bonzini         return &sections[lp.ptr];
345b35ba30fSMichael S. Tsirkin     } else {
346b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
347b35ba30fSMichael S. Tsirkin     }
348f3705d53SAvi Kivity }
349f3705d53SAvi Kivity 
350e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
351e5548617SBlue Swirl {
3522a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
353e5548617SBlue Swirl         && mr != &io_mem_watch;
354e5548617SBlue Swirl }
355149f54b5SPaolo Bonzini 
35679e2b9aeSPaolo Bonzini /* Called from RCU critical section */
357c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
35890260c6cSJan Kiszka                                                         hwaddr addr,
35990260c6cSJan Kiszka                                                         bool resolve_subpage)
3609f029603SJan Kiszka {
361729633c2SFam Zheng     MemoryRegionSection *section = atomic_read(&d->mru_section);
36290260c6cSJan Kiszka     subpage_t *subpage;
363729633c2SFam Zheng     bool update;
36490260c6cSJan Kiszka 
365729633c2SFam Zheng     if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
366729633c2SFam Zheng         section_covers_addr(section, addr)) {
367729633c2SFam Zheng         update = false;
368729633c2SFam Zheng     } else {
369729633c2SFam Zheng         section = phys_page_find(d->phys_map, addr, d->map.nodes,
370729633c2SFam Zheng                                  d->map.sections);
371729633c2SFam Zheng         update = true;
372729633c2SFam Zheng     }
37390260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
37490260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
37553cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
37690260c6cSJan Kiszka     }
377729633c2SFam Zheng     if (update) {
378729633c2SFam Zheng         atomic_set(&d->mru_section, section);
379729633c2SFam Zheng     }
38090260c6cSJan Kiszka     return section;
3819f029603SJan Kiszka }
3829f029603SJan Kiszka 
38379e2b9aeSPaolo Bonzini /* Called from RCU critical section */
38490260c6cSJan Kiszka static MemoryRegionSection *
385c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
38690260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
387149f54b5SPaolo Bonzini {
388149f54b5SPaolo Bonzini     MemoryRegionSection *section;
389965eb2fcSPaolo Bonzini     MemoryRegion *mr;
390a87f3954SPaolo Bonzini     Int128 diff;
391149f54b5SPaolo Bonzini 
392c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
393149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
394149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
395149f54b5SPaolo Bonzini 
396149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
397149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
398149f54b5SPaolo Bonzini 
399965eb2fcSPaolo Bonzini     mr = section->mr;
400b242e0e0SPaolo Bonzini 
401b242e0e0SPaolo Bonzini     /* MMIO registers can be expected to perform full-width accesses based only
402b242e0e0SPaolo Bonzini      * on their address, without considering adjacent registers that could
403b242e0e0SPaolo Bonzini      * decode to completely different MemoryRegions.  When such registers
404b242e0e0SPaolo Bonzini      * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
405b242e0e0SPaolo Bonzini      * regions overlap wildly.  For this reason we cannot clamp the accesses
406b242e0e0SPaolo Bonzini      * here.
407b242e0e0SPaolo Bonzini      *
408b242e0e0SPaolo Bonzini      * If the length is small (as is the case for address_space_ldl/stl),
409b242e0e0SPaolo Bonzini      * everything works fine.  If the incoming length is large, however,
410b242e0e0SPaolo Bonzini      * the caller really has to do the clamping through memory_access_size.
411b242e0e0SPaolo Bonzini      */
412965eb2fcSPaolo Bonzini     if (memory_region_is_ram(mr)) {
413e4a511f8SPaolo Bonzini         diff = int128_sub(section->size, int128_make64(addr));
4143752a036SPeter Maydell         *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
415965eb2fcSPaolo Bonzini     }
416149f54b5SPaolo Bonzini     return section;
417149f54b5SPaolo Bonzini }
41890260c6cSJan Kiszka 
41941063e1eSPaolo Bonzini /* Called from RCU critical section */
4205c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
42190260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
42290260c6cSJan Kiszka                                       bool is_write)
42390260c6cSJan Kiszka {
42430951157SAvi Kivity     IOMMUTLBEntry iotlb;
42530951157SAvi Kivity     MemoryRegionSection *section;
42630951157SAvi Kivity     MemoryRegion *mr;
42730951157SAvi Kivity 
42830951157SAvi Kivity     for (;;) {
42979e2b9aeSPaolo Bonzini         AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
43079e2b9aeSPaolo Bonzini         section = address_space_translate_internal(d, addr, &addr, plen, true);
43130951157SAvi Kivity         mr = section->mr;
43230951157SAvi Kivity 
43330951157SAvi Kivity         if (!mr->iommu_ops) {
43430951157SAvi Kivity             break;
43530951157SAvi Kivity         }
43630951157SAvi Kivity 
4378d7b8cb9SLe Tan         iotlb = mr->iommu_ops->translate(mr, addr, is_write);
43830951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
43930951157SAvi Kivity                 | (addr & iotlb.addr_mask));
44023820dbfSPeter Crosthwaite         *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
44130951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
44230951157SAvi Kivity             mr = &io_mem_unassigned;
44330951157SAvi Kivity             break;
44430951157SAvi Kivity         }
44530951157SAvi Kivity 
44630951157SAvi Kivity         as = iotlb.target_as;
44730951157SAvi Kivity     }
44830951157SAvi Kivity 
449fe680d0dSAlexey Kardashevskiy     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
450a87f3954SPaolo Bonzini         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
45123820dbfSPeter Crosthwaite         *plen = MIN(page, *plen);
452a87f3954SPaolo Bonzini     }
453a87f3954SPaolo Bonzini 
45430951157SAvi Kivity     *xlat = addr;
45530951157SAvi Kivity     return mr;
45690260c6cSJan Kiszka }
45790260c6cSJan Kiszka 
45879e2b9aeSPaolo Bonzini /* Called from RCU critical section */
45990260c6cSJan Kiszka MemoryRegionSection *
460d7898cdaSPeter Maydell address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
4619d82b5a7SPaolo Bonzini                                   hwaddr *xlat, hwaddr *plen)
46290260c6cSJan Kiszka {
46330951157SAvi Kivity     MemoryRegionSection *section;
464d7898cdaSPeter Maydell     AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
465d7898cdaSPeter Maydell 
466d7898cdaSPeter Maydell     section = address_space_translate_internal(d, addr, xlat, plen, false);
46730951157SAvi Kivity 
46830951157SAvi Kivity     assert(!section->mr->iommu_ops);
46930951157SAvi Kivity     return section;
47090260c6cSJan Kiszka }
4719fa3e853Sbellard #endif
472fd6ce8f6Sbellard 
473b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
4749656f324Spbrook 
475e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
476e7f4eff7SJuan Quintela {
477259186a7SAndreas Färber     CPUState *cpu = opaque;
478e7f4eff7SJuan Quintela 
4793098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
4803098dba0Saurel32        version_id is increased. */
481259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
482c01a71c1SChristian Borntraeger     tlb_flush(cpu, 1);
4839656f324Spbrook 
4849656f324Spbrook     return 0;
4859656f324Spbrook }
486e7f4eff7SJuan Quintela 
4876c3bff0eSPavel Dovgaluk static int cpu_common_pre_load(void *opaque)
4886c3bff0eSPavel Dovgaluk {
4896c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4906c3bff0eSPavel Dovgaluk 
491adee6424SPaolo Bonzini     cpu->exception_index = -1;
4926c3bff0eSPavel Dovgaluk 
4936c3bff0eSPavel Dovgaluk     return 0;
4946c3bff0eSPavel Dovgaluk }
4956c3bff0eSPavel Dovgaluk 
4966c3bff0eSPavel Dovgaluk static bool cpu_common_exception_index_needed(void *opaque)
4976c3bff0eSPavel Dovgaluk {
4986c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4996c3bff0eSPavel Dovgaluk 
500adee6424SPaolo Bonzini     return tcg_enabled() && cpu->exception_index != -1;
5016c3bff0eSPavel Dovgaluk }
5026c3bff0eSPavel Dovgaluk 
5036c3bff0eSPavel Dovgaluk static const VMStateDescription vmstate_cpu_common_exception_index = {
5046c3bff0eSPavel Dovgaluk     .name = "cpu_common/exception_index",
5056c3bff0eSPavel Dovgaluk     .version_id = 1,
5066c3bff0eSPavel Dovgaluk     .minimum_version_id = 1,
5075cd8cadaSJuan Quintela     .needed = cpu_common_exception_index_needed,
5086c3bff0eSPavel Dovgaluk     .fields = (VMStateField[]) {
5096c3bff0eSPavel Dovgaluk         VMSTATE_INT32(exception_index, CPUState),
5106c3bff0eSPavel Dovgaluk         VMSTATE_END_OF_LIST()
5116c3bff0eSPavel Dovgaluk     }
5126c3bff0eSPavel Dovgaluk };
5136c3bff0eSPavel Dovgaluk 
514bac05aa9SAndrey Smetanin static bool cpu_common_crash_occurred_needed(void *opaque)
515bac05aa9SAndrey Smetanin {
516bac05aa9SAndrey Smetanin     CPUState *cpu = opaque;
517bac05aa9SAndrey Smetanin 
518bac05aa9SAndrey Smetanin     return cpu->crash_occurred;
519bac05aa9SAndrey Smetanin }
520bac05aa9SAndrey Smetanin 
521bac05aa9SAndrey Smetanin static const VMStateDescription vmstate_cpu_common_crash_occurred = {
522bac05aa9SAndrey Smetanin     .name = "cpu_common/crash_occurred",
523bac05aa9SAndrey Smetanin     .version_id = 1,
524bac05aa9SAndrey Smetanin     .minimum_version_id = 1,
525bac05aa9SAndrey Smetanin     .needed = cpu_common_crash_occurred_needed,
526bac05aa9SAndrey Smetanin     .fields = (VMStateField[]) {
527bac05aa9SAndrey Smetanin         VMSTATE_BOOL(crash_occurred, CPUState),
528bac05aa9SAndrey Smetanin         VMSTATE_END_OF_LIST()
529bac05aa9SAndrey Smetanin     }
530bac05aa9SAndrey Smetanin };
531bac05aa9SAndrey Smetanin 
5321a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
533e7f4eff7SJuan Quintela     .name = "cpu_common",
534e7f4eff7SJuan Quintela     .version_id = 1,
535e7f4eff7SJuan Quintela     .minimum_version_id = 1,
5366c3bff0eSPavel Dovgaluk     .pre_load = cpu_common_pre_load,
537e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
538e7f4eff7SJuan Quintela     .fields = (VMStateField[]) {
539259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
540259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
541e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
5426c3bff0eSPavel Dovgaluk     },
5435cd8cadaSJuan Quintela     .subsections = (const VMStateDescription*[]) {
5445cd8cadaSJuan Quintela         &vmstate_cpu_common_exception_index,
545bac05aa9SAndrey Smetanin         &vmstate_cpu_common_crash_occurred,
5465cd8cadaSJuan Quintela         NULL
547e7f4eff7SJuan Quintela     }
548e7f4eff7SJuan Quintela };
5491a1562f5SAndreas Färber 
5509656f324Spbrook #endif
5519656f324Spbrook 
55238d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
553950f1472SGlauber Costa {
554bdc44640SAndreas Färber     CPUState *cpu;
555950f1472SGlauber Costa 
556bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
55755e5c285SAndreas Färber         if (cpu->cpu_index == index) {
558bdc44640SAndreas Färber             return cpu;
55955e5c285SAndreas Färber         }
560950f1472SGlauber Costa     }
561950f1472SGlauber Costa 
562bdc44640SAndreas Färber     return NULL;
563950f1472SGlauber Costa }
564950f1472SGlauber Costa 
56509daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY)
56656943e8cSPeter Maydell void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
56709daed84SEdgar E. Iglesias {
56812ebc9a7SPeter Maydell     CPUAddressSpace *newas;
56912ebc9a7SPeter Maydell 
57012ebc9a7SPeter Maydell     /* Target code should have set num_ases before calling us */
57112ebc9a7SPeter Maydell     assert(asidx < cpu->num_ases);
57212ebc9a7SPeter Maydell 
57356943e8cSPeter Maydell     if (asidx == 0) {
57456943e8cSPeter Maydell         /* address space 0 gets the convenience alias */
57556943e8cSPeter Maydell         cpu->as = as;
57656943e8cSPeter Maydell     }
57756943e8cSPeter Maydell 
57812ebc9a7SPeter Maydell     /* KVM cannot currently support multiple address spaces. */
57912ebc9a7SPeter Maydell     assert(asidx == 0 || !kvm_enabled());
58009daed84SEdgar E. Iglesias 
58112ebc9a7SPeter Maydell     if (!cpu->cpu_ases) {
58212ebc9a7SPeter Maydell         cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
58309daed84SEdgar E. Iglesias     }
58432857f4dSPeter Maydell 
58512ebc9a7SPeter Maydell     newas = &cpu->cpu_ases[asidx];
58612ebc9a7SPeter Maydell     newas->cpu = cpu;
58712ebc9a7SPeter Maydell     newas->as = as;
58856943e8cSPeter Maydell     if (tcg_enabled()) {
58912ebc9a7SPeter Maydell         newas->tcg_as_listener.commit = tcg_commit;
59012ebc9a7SPeter Maydell         memory_listener_register(&newas->tcg_as_listener, as);
59109daed84SEdgar E. Iglesias     }
59256943e8cSPeter Maydell }
593651a5bc0SPeter Maydell 
594651a5bc0SPeter Maydell AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
595651a5bc0SPeter Maydell {
596651a5bc0SPeter Maydell     /* Return the AddressSpace corresponding to the specified index */
597651a5bc0SPeter Maydell     return cpu->cpu_ases[asidx].as;
598651a5bc0SPeter Maydell }
59909daed84SEdgar E. Iglesias #endif
60009daed84SEdgar E. Iglesias 
601630eb0faSIgor Mammedov static bool cpu_index_auto_assigned;
602630eb0faSIgor Mammedov 
603a07f953eSIgor Mammedov static int cpu_get_free_index(void)
604b7bca733SBharata B Rao {
605b7bca733SBharata B Rao     CPUState *some_cpu;
606b7bca733SBharata B Rao     int cpu_index = 0;
607b7bca733SBharata B Rao 
608630eb0faSIgor Mammedov     cpu_index_auto_assigned = true;
609b7bca733SBharata B Rao     CPU_FOREACH(some_cpu) {
610b7bca733SBharata B Rao         cpu_index++;
611b7bca733SBharata B Rao     }
612b7bca733SBharata B Rao     return cpu_index;
613b7bca733SBharata B Rao }
614b7bca733SBharata B Rao 
6151c59eb39SBharata B Rao void cpu_exec_exit(CPUState *cpu)
6161c59eb39SBharata B Rao {
6179dfeca7cSBharata B Rao     CPUClass *cc = CPU_GET_CLASS(cpu);
6189dfeca7cSBharata B Rao 
6191c59eb39SBharata B Rao     cpu_list_lock();
6203b8c1761SIgor Mammedov     if (!QTAILQ_IN_USE(cpu, node)) {
6218b1b8350SIgor Mammedov         /* there is nothing to undo since cpu_exec_init() hasn't been called */
6221c59eb39SBharata B Rao         cpu_list_unlock();
6231c59eb39SBharata B Rao         return;
6241c59eb39SBharata B Rao     }
6251c59eb39SBharata B Rao 
626630eb0faSIgor Mammedov     assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));
627630eb0faSIgor Mammedov 
6281c59eb39SBharata B Rao     QTAILQ_REMOVE(&cpus, cpu, node);
629a07f953eSIgor Mammedov     cpu->cpu_index = UNASSIGNED_CPU_INDEX;
6301c59eb39SBharata B Rao     cpu_list_unlock();
6319dfeca7cSBharata B Rao 
6329dfeca7cSBharata B Rao     if (cc->vmsd != NULL) {
6339dfeca7cSBharata B Rao         vmstate_unregister(NULL, cc->vmsd, cpu);
6349dfeca7cSBharata B Rao     }
6359dfeca7cSBharata B Rao     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
6369dfeca7cSBharata B Rao         vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
6379dfeca7cSBharata B Rao     }
6381c59eb39SBharata B Rao }
6391c59eb39SBharata B Rao 
6404bad9e39SPeter Crosthwaite void cpu_exec_init(CPUState *cpu, Error **errp)
641fd6ce8f6Sbellard {
6421bc7e522SIgor Mammedov     CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
643a07f953eSIgor Mammedov     Error *local_err ATTRIBUTE_UNUSED = NULL;
6446a00d601Sbellard 
64556943e8cSPeter Maydell     cpu->as = NULL;
64612ebc9a7SPeter Maydell     cpu->num_ases = 0;
64756943e8cSPeter Maydell 
648291135b5SEduardo Habkost #ifndef CONFIG_USER_ONLY
649291135b5SEduardo Habkost     cpu->thread_id = qemu_get_thread_id();
6506731d864SPeter Crosthwaite 
6516731d864SPeter Crosthwaite     /* This is a softmmu CPU object, so create a property for it
6526731d864SPeter Crosthwaite      * so users can wire up its memory. (This can't go in qom/cpu.c
6536731d864SPeter Crosthwaite      * because that file is compiled only once for both user-mode
6546731d864SPeter Crosthwaite      * and system builds.) The default if no link is set up is to use
6556731d864SPeter Crosthwaite      * the system address space.
6566731d864SPeter Crosthwaite      */
6576731d864SPeter Crosthwaite     object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
6586731d864SPeter Crosthwaite                              (Object **)&cpu->memory,
6596731d864SPeter Crosthwaite                              qdev_prop_allow_set_link_before_realize,
6606731d864SPeter Crosthwaite                              OBJ_PROP_LINK_UNREF_ON_RELEASE,
6616731d864SPeter Crosthwaite                              &error_abort);
6626731d864SPeter Crosthwaite     cpu->memory = system_memory;
6636731d864SPeter Crosthwaite     object_ref(OBJECT(cpu->memory));
664291135b5SEduardo Habkost #endif
665291135b5SEduardo Habkost 
666c2764719Spbrook     cpu_list_lock();
667a07f953eSIgor Mammedov     if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
668a07f953eSIgor Mammedov         cpu->cpu_index = cpu_get_free_index();
669a07f953eSIgor Mammedov         assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
670630eb0faSIgor Mammedov     } else {
671630eb0faSIgor Mammedov         assert(!cpu_index_auto_assigned);
6726a00d601Sbellard     }
673bdc44640SAndreas Färber     QTAILQ_INSERT_TAIL(&cpus, cpu, node);
674c2764719Spbrook     cpu_list_unlock();
6751bc7e522SIgor Mammedov 
6761bc7e522SIgor Mammedov #ifndef CONFIG_USER_ONLY
677e0d47944SAndreas Färber     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
678741da0d3SPaolo Bonzini         vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
679e0d47944SAndreas Färber     }
680b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
681741da0d3SPaolo Bonzini         vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
682b170fce3SAndreas Färber     }
683741da0d3SPaolo Bonzini #endif
684fd6ce8f6Sbellard }
685fd6ce8f6Sbellard 
68694df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
68700b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
68894df27fdSPaul Brook {
68994df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
69094df27fdSPaul Brook }
69194df27fdSPaul Brook #else
69200b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
6931e7855a5SMax Filippov {
6945232e4c7SPeter Maydell     MemTxAttrs attrs;
6955232e4c7SPeter Maydell     hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
6965232e4c7SPeter Maydell     int asidx = cpu_asidx_from_attrs(cpu, attrs);
697e8262a1bSMax Filippov     if (phys != -1) {
6985232e4c7SPeter Maydell         tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
69929d8ec7bSEdgar E. Iglesias                                 phys | (pc & ~TARGET_PAGE_MASK));
700e8262a1bSMax Filippov     }
7011e7855a5SMax Filippov }
702c27004ecSbellard #endif
703d720b93dSbellard 
704c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
70575a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
706c527ee8fSPaul Brook 
707c527ee8fSPaul Brook {
708c527ee8fSPaul Brook }
709c527ee8fSPaul Brook 
7103ee887e8SPeter Maydell int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
7113ee887e8SPeter Maydell                           int flags)
7123ee887e8SPeter Maydell {
7133ee887e8SPeter Maydell     return -ENOSYS;
7143ee887e8SPeter Maydell }
7153ee887e8SPeter Maydell 
7163ee887e8SPeter Maydell void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
7173ee887e8SPeter Maydell {
7183ee887e8SPeter Maydell }
7193ee887e8SPeter Maydell 
72075a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
721c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
722c527ee8fSPaul Brook {
723c527ee8fSPaul Brook     return -ENOSYS;
724c527ee8fSPaul Brook }
725c527ee8fSPaul Brook #else
7266658ffb8Spbrook /* Add a watchpoint.  */
72775a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
728a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
7296658ffb8Spbrook {
730c0ce998eSaliguori     CPUWatchpoint *wp;
7316658ffb8Spbrook 
73205068c0dSPeter Maydell     /* forbid ranges which are empty or run off the end of the address space */
73307e2863dSMax Filippov     if (len == 0 || (addr + len - 1) < addr) {
73475a34036SAndreas Färber         error_report("tried to set invalid watchpoint at %"
73575a34036SAndreas Färber                      VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
736b4051334Saliguori         return -EINVAL;
737b4051334Saliguori     }
7387267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
7396658ffb8Spbrook 
740a1d1bb31Saliguori     wp->vaddr = addr;
74105068c0dSPeter Maydell     wp->len = len;
742a1d1bb31Saliguori     wp->flags = flags;
743a1d1bb31Saliguori 
7442dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
745ff4700b0SAndreas Färber     if (flags & BP_GDB) {
746ff4700b0SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
747ff4700b0SAndreas Färber     } else {
748ff4700b0SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
749ff4700b0SAndreas Färber     }
750a1d1bb31Saliguori 
75131b030d4SAndreas Färber     tlb_flush_page(cpu, addr);
752a1d1bb31Saliguori 
753a1d1bb31Saliguori     if (watchpoint)
754a1d1bb31Saliguori         *watchpoint = wp;
755a1d1bb31Saliguori     return 0;
7566658ffb8Spbrook }
7576658ffb8Spbrook 
758a1d1bb31Saliguori /* Remove a specific watchpoint.  */
75975a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
760a1d1bb31Saliguori                           int flags)
7616658ffb8Spbrook {
762a1d1bb31Saliguori     CPUWatchpoint *wp;
7636658ffb8Spbrook 
764ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
76505068c0dSPeter Maydell         if (addr == wp->vaddr && len == wp->len
7666e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
76775a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
7686658ffb8Spbrook             return 0;
7696658ffb8Spbrook         }
7706658ffb8Spbrook     }
771a1d1bb31Saliguori     return -ENOENT;
7726658ffb8Spbrook }
7736658ffb8Spbrook 
774a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
77575a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
776a1d1bb31Saliguori {
777ff4700b0SAndreas Färber     QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7787d03f82fSedgar_igl 
77931b030d4SAndreas Färber     tlb_flush_page(cpu, watchpoint->vaddr);
780a1d1bb31Saliguori 
7817267c094SAnthony Liguori     g_free(watchpoint);
7827d03f82fSedgar_igl }
7837d03f82fSedgar_igl 
784a1d1bb31Saliguori /* Remove all matching watchpoints.  */
78575a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
786a1d1bb31Saliguori {
787c0ce998eSaliguori     CPUWatchpoint *wp, *next;
788a1d1bb31Saliguori 
789ff4700b0SAndreas Färber     QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
79075a34036SAndreas Färber         if (wp->flags & mask) {
79175a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
79275a34036SAndreas Färber         }
793a1d1bb31Saliguori     }
794c0ce998eSaliguori }
79505068c0dSPeter Maydell 
79605068c0dSPeter Maydell /* Return true if this watchpoint address matches the specified
79705068c0dSPeter Maydell  * access (ie the address range covered by the watchpoint overlaps
79805068c0dSPeter Maydell  * partially or completely with the address range covered by the
79905068c0dSPeter Maydell  * access).
80005068c0dSPeter Maydell  */
80105068c0dSPeter Maydell static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
80205068c0dSPeter Maydell                                                   vaddr addr,
80305068c0dSPeter Maydell                                                   vaddr len)
80405068c0dSPeter Maydell {
80505068c0dSPeter Maydell     /* We know the lengths are non-zero, but a little caution is
80605068c0dSPeter Maydell      * required to avoid errors in the case where the range ends
80705068c0dSPeter Maydell      * exactly at the top of the address space and so addr + len
80805068c0dSPeter Maydell      * wraps round to zero.
80905068c0dSPeter Maydell      */
81005068c0dSPeter Maydell     vaddr wpend = wp->vaddr + wp->len - 1;
81105068c0dSPeter Maydell     vaddr addrend = addr + len - 1;
81205068c0dSPeter Maydell 
81305068c0dSPeter Maydell     return !(addr > wpend || wp->vaddr > addrend);
81405068c0dSPeter Maydell }
81505068c0dSPeter Maydell 
816c527ee8fSPaul Brook #endif
817a1d1bb31Saliguori 
818a1d1bb31Saliguori /* Add a breakpoint.  */
819b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
820a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
8214c3a88a2Sbellard {
822c0ce998eSaliguori     CPUBreakpoint *bp;
8234c3a88a2Sbellard 
8247267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
8254c3a88a2Sbellard 
826a1d1bb31Saliguori     bp->pc = pc;
827a1d1bb31Saliguori     bp->flags = flags;
828a1d1bb31Saliguori 
8292dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
83000b941e5SAndreas Färber     if (flags & BP_GDB) {
831f0c3c505SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
83200b941e5SAndreas Färber     } else {
833f0c3c505SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
83400b941e5SAndreas Färber     }
835d720b93dSbellard 
836f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, pc);
837a1d1bb31Saliguori 
83800b941e5SAndreas Färber     if (breakpoint) {
839a1d1bb31Saliguori         *breakpoint = bp;
84000b941e5SAndreas Färber     }
8414c3a88a2Sbellard     return 0;
8424c3a88a2Sbellard }
8434c3a88a2Sbellard 
844a1d1bb31Saliguori /* Remove a specific breakpoint.  */
845b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
846a1d1bb31Saliguori {
847a1d1bb31Saliguori     CPUBreakpoint *bp;
848a1d1bb31Saliguori 
849f0c3c505SAndreas Färber     QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
850a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
851b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
852a1d1bb31Saliguori             return 0;
8537d03f82fSedgar_igl         }
854a1d1bb31Saliguori     }
855a1d1bb31Saliguori     return -ENOENT;
8567d03f82fSedgar_igl }
8577d03f82fSedgar_igl 
858a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
859b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
8604c3a88a2Sbellard {
861f0c3c505SAndreas Färber     QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
862f0c3c505SAndreas Färber 
863f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, breakpoint->pc);
864a1d1bb31Saliguori 
8657267c094SAnthony Liguori     g_free(breakpoint);
866a1d1bb31Saliguori }
867a1d1bb31Saliguori 
868a1d1bb31Saliguori /* Remove all matching breakpoints. */
869b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
870a1d1bb31Saliguori {
871c0ce998eSaliguori     CPUBreakpoint *bp, *next;
872a1d1bb31Saliguori 
873f0c3c505SAndreas Färber     QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
874b3310ab3SAndreas Färber         if (bp->flags & mask) {
875b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
876b3310ab3SAndreas Färber         }
877c0ce998eSaliguori     }
8784c3a88a2Sbellard }
8794c3a88a2Sbellard 
880c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
881c33a346eSbellard    CPU loop after each instruction */
8823825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled)
883c33a346eSbellard {
884ed2803daSAndreas Färber     if (cpu->singlestep_enabled != enabled) {
885ed2803daSAndreas Färber         cpu->singlestep_enabled = enabled;
886ed2803daSAndreas Färber         if (kvm_enabled()) {
88738e478ecSStefan Weil             kvm_update_guest_debug(cpu, 0);
888ed2803daSAndreas Färber         } else {
889ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
8909fa3e853Sbellard             /* XXX: only flush what is necessary */
891bbd77c18SPeter Crosthwaite             tb_flush(cpu);
892c33a346eSbellard         }
893e22a25c9Saliguori     }
894c33a346eSbellard }
895c33a346eSbellard 
896a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...)
8977501267eSbellard {
8987501267eSbellard     va_list ap;
899493ae1f0Spbrook     va_list ap2;
9007501267eSbellard 
9017501267eSbellard     va_start(ap, fmt);
902493ae1f0Spbrook     va_copy(ap2, ap);
9037501267eSbellard     fprintf(stderr, "qemu: fatal: ");
9047501267eSbellard     vfprintf(stderr, fmt, ap);
9057501267eSbellard     fprintf(stderr, "\n");
906878096eeSAndreas Färber     cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
907013a2942SPaolo Bonzini     if (qemu_log_separate()) {
90893fcfe39Saliguori         qemu_log("qemu: fatal: ");
90993fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
91093fcfe39Saliguori         qemu_log("\n");
911a0762859SAndreas Färber         log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
91231b1a7b4Saliguori         qemu_log_flush();
91393fcfe39Saliguori         qemu_log_close();
914924edcaeSbalrog     }
915493ae1f0Spbrook     va_end(ap2);
916f9373291Sj_mayer     va_end(ap);
9177615936eSPavel Dovgalyuk     replay_finish();
918fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
919fd052bf6SRiku Voipio     {
920fd052bf6SRiku Voipio         struct sigaction act;
921fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
922fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
923fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
924fd052bf6SRiku Voipio     }
925fd052bf6SRiku Voipio #endif
9267501267eSbellard     abort();
9277501267eSbellard }
9287501267eSbellard 
9290124311eSbellard #if !defined(CONFIG_USER_ONLY)
9300dc3f44aSMike Day /* Called from RCU critical section */
931041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
932041603feSPaolo Bonzini {
933041603feSPaolo Bonzini     RAMBlock *block;
934041603feSPaolo Bonzini 
93543771539SPaolo Bonzini     block = atomic_rcu_read(&ram_list.mru_block);
9369b8424d5SMichael S. Tsirkin     if (block && addr - block->offset < block->max_length) {
93768851b98SPaolo Bonzini         return block;
938041603feSPaolo Bonzini     }
9390dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9409b8424d5SMichael S. Tsirkin         if (addr - block->offset < block->max_length) {
941041603feSPaolo Bonzini             goto found;
942041603feSPaolo Bonzini         }
943041603feSPaolo Bonzini     }
944041603feSPaolo Bonzini 
945041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
946041603feSPaolo Bonzini     abort();
947041603feSPaolo Bonzini 
948041603feSPaolo Bonzini found:
94943771539SPaolo Bonzini     /* It is safe to write mru_block outside the iothread lock.  This
95043771539SPaolo Bonzini      * is what happens:
95143771539SPaolo Bonzini      *
95243771539SPaolo Bonzini      *     mru_block = xxx
95343771539SPaolo Bonzini      *     rcu_read_unlock()
95443771539SPaolo Bonzini      *                                        xxx removed from list
95543771539SPaolo Bonzini      *                  rcu_read_lock()
95643771539SPaolo Bonzini      *                  read mru_block
95743771539SPaolo Bonzini      *                                        mru_block = NULL;
95843771539SPaolo Bonzini      *                                        call_rcu(reclaim_ramblock, xxx);
95943771539SPaolo Bonzini      *                  rcu_read_unlock()
96043771539SPaolo Bonzini      *
96143771539SPaolo Bonzini      * atomic_rcu_set is not needed here.  The block was already published
96243771539SPaolo Bonzini      * when it was placed into the list.  Here we're just making an extra
96343771539SPaolo Bonzini      * copy of the pointer.
96443771539SPaolo Bonzini      */
965041603feSPaolo Bonzini     ram_list.mru_block = block;
966041603feSPaolo Bonzini     return block;
967041603feSPaolo Bonzini }
968041603feSPaolo Bonzini 
969a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
9701ccde1cbSbellard {
9719a13565dSPeter Crosthwaite     CPUState *cpu;
972041603feSPaolo Bonzini     ram_addr_t start1;
973a2f4d5beSJuan Quintela     RAMBlock *block;
974a2f4d5beSJuan Quintela     ram_addr_t end;
975a2f4d5beSJuan Quintela 
976a2f4d5beSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length);
977a2f4d5beSJuan Quintela     start &= TARGET_PAGE_MASK;
978f23db169Sbellard 
9790dc3f44aSMike Day     rcu_read_lock();
980041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
981041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
9821240be24SMichael S. Tsirkin     start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
9839a13565dSPeter Crosthwaite     CPU_FOREACH(cpu) {
9849a13565dSPeter Crosthwaite         tlb_reset_dirty(cpu, start1, length);
9859a13565dSPeter Crosthwaite     }
9860dc3f44aSMike Day     rcu_read_unlock();
987d24981d3SJuan Quintela }
988d24981d3SJuan Quintela 
989d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
99003eebc9eSStefan Hajnoczi bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
99103eebc9eSStefan Hajnoczi                                               ram_addr_t length,
99252159192SJuan Quintela                                               unsigned client)
993d24981d3SJuan Quintela {
9945b82b703SStefan Hajnoczi     DirtyMemoryBlocks *blocks;
99503eebc9eSStefan Hajnoczi     unsigned long end, page;
9965b82b703SStefan Hajnoczi     bool dirty = false;
997d24981d3SJuan Quintela 
99803eebc9eSStefan Hajnoczi     if (length == 0) {
99903eebc9eSStefan Hajnoczi         return false;
100003eebc9eSStefan Hajnoczi     }
100103eebc9eSStefan Hajnoczi 
100203eebc9eSStefan Hajnoczi     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
100303eebc9eSStefan Hajnoczi     page = start >> TARGET_PAGE_BITS;
10045b82b703SStefan Hajnoczi 
10055b82b703SStefan Hajnoczi     rcu_read_lock();
10065b82b703SStefan Hajnoczi 
10075b82b703SStefan Hajnoczi     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
10085b82b703SStefan Hajnoczi 
10095b82b703SStefan Hajnoczi     while (page < end) {
10105b82b703SStefan Hajnoczi         unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
10115b82b703SStefan Hajnoczi         unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
10125b82b703SStefan Hajnoczi         unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
10135b82b703SStefan Hajnoczi 
10145b82b703SStefan Hajnoczi         dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
10155b82b703SStefan Hajnoczi                                               offset, num);
10165b82b703SStefan Hajnoczi         page += num;
10175b82b703SStefan Hajnoczi     }
10185b82b703SStefan Hajnoczi 
10195b82b703SStefan Hajnoczi     rcu_read_unlock();
102003eebc9eSStefan Hajnoczi 
102103eebc9eSStefan Hajnoczi     if (dirty && tcg_enabled()) {
1022a2f4d5beSJuan Quintela         tlb_reset_dirty_range_all(start, length);
1023d24981d3SJuan Quintela     }
102403eebc9eSStefan Hajnoczi 
102503eebc9eSStefan Hajnoczi     return dirty;
10261ccde1cbSbellard }
10271ccde1cbSbellard 
102879e2b9aeSPaolo Bonzini /* Called from RCU critical section */
1029bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1030e5548617SBlue Swirl                                        MemoryRegionSection *section,
1031e5548617SBlue Swirl                                        target_ulong vaddr,
1032149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
1033e5548617SBlue Swirl                                        int prot,
1034e5548617SBlue Swirl                                        target_ulong *address)
1035e5548617SBlue Swirl {
1036a8170e5eSAvi Kivity     hwaddr iotlb;
1037e5548617SBlue Swirl     CPUWatchpoint *wp;
1038e5548617SBlue Swirl 
1039cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
1040e5548617SBlue Swirl         /* Normal RAM.  */
1041e4e69794SPaolo Bonzini         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1042e5548617SBlue Swirl         if (!section->readonly) {
1043b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_NOTDIRTY;
1044e5548617SBlue Swirl         } else {
1045b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_ROM;
1046e5548617SBlue Swirl         }
1047e5548617SBlue Swirl     } else {
10480b8e2c10SPeter Maydell         AddressSpaceDispatch *d;
10490b8e2c10SPeter Maydell 
10500b8e2c10SPeter Maydell         d = atomic_rcu_read(&section->address_space->dispatch);
10510b8e2c10SPeter Maydell         iotlb = section - d->map.sections;
1052149f54b5SPaolo Bonzini         iotlb += xlat;
1053e5548617SBlue Swirl     }
1054e5548617SBlue Swirl 
1055e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
1056e5548617SBlue Swirl        watchpoint trap routines.  */
1057ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
105805068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1059e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
1060e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1061b41aac4fSLiu Ping Fan                 iotlb = PHYS_SECTION_WATCH + paddr;
1062e5548617SBlue Swirl                 *address |= TLB_MMIO;
1063e5548617SBlue Swirl                 break;
1064e5548617SBlue Swirl             }
1065e5548617SBlue Swirl         }
1066e5548617SBlue Swirl     }
1067e5548617SBlue Swirl 
1068e5548617SBlue Swirl     return iotlb;
1069e5548617SBlue Swirl }
10709fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
107133417e70Sbellard 
1072e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
10738da3ff18Spbrook 
1074c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
10755312bd8bSAvi Kivity                              uint16_t section);
1076acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
107754688b1eSAvi Kivity 
1078a2b257d6SIgor Mammedov static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1079a2b257d6SIgor Mammedov                                qemu_anon_ram_alloc;
108091138037SMarkus Armbruster 
108191138037SMarkus Armbruster /*
108291138037SMarkus Armbruster  * Set a custom physical guest memory alloator.
108391138037SMarkus Armbruster  * Accelerators with unusual needs may need this.  Hopefully, we can
108491138037SMarkus Armbruster  * get rid of it eventually.
108591138037SMarkus Armbruster  */
1086a2b257d6SIgor Mammedov void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
108791138037SMarkus Armbruster {
108891138037SMarkus Armbruster     phys_mem_alloc = alloc;
108991138037SMarkus Armbruster }
109091138037SMarkus Armbruster 
109153cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
109253cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
10935312bd8bSAvi Kivity {
109468f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
109568f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
109668f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
109768f3f65bSPaolo Bonzini      */
109853cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
109968f3f65bSPaolo Bonzini 
110053cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
110153cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
110253cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
110353cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
11045312bd8bSAvi Kivity     }
110553cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
1106dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
110753cb28cbSMarcel Apfelbaum     return map->sections_nb++;
11085312bd8bSAvi Kivity }
11095312bd8bSAvi Kivity 
1110058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
1111058bc4b5SPaolo Bonzini {
111255b4e80bSDon Slutz     bool have_sub_page = mr->subpage;
111355b4e80bSDon Slutz 
1114dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
1115dfde4e6eSPaolo Bonzini 
111655b4e80bSDon Slutz     if (have_sub_page) {
1117058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
1118b4fefef9SPeter Crosthwaite         object_unref(OBJECT(&subpage->iomem));
1119058bc4b5SPaolo Bonzini         g_free(subpage);
1120058bc4b5SPaolo Bonzini     }
1121058bc4b5SPaolo Bonzini }
1122058bc4b5SPaolo Bonzini 
11236092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
11245312bd8bSAvi Kivity {
11259affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
11269affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
1127058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
1128058bc4b5SPaolo Bonzini     }
11299affd6fcSPaolo Bonzini     g_free(map->sections);
11309affd6fcSPaolo Bonzini     g_free(map->nodes);
11315312bd8bSAvi Kivity }
11325312bd8bSAvi Kivity 
1133ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
11340f0cb164SAvi Kivity {
11350f0cb164SAvi Kivity     subpage_t *subpage;
1136a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
11370f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
113897115a8dSMichael S. Tsirkin     MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
113953cb28cbSMarcel Apfelbaum                                                    d->map.nodes, d->map.sections);
11400f0cb164SAvi Kivity     MemoryRegionSection subsection = {
11410f0cb164SAvi Kivity         .offset_within_address_space = base,
1142052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
11430f0cb164SAvi Kivity     };
1144a8170e5eSAvi Kivity     hwaddr start, end;
11450f0cb164SAvi Kivity 
1146f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
11470f0cb164SAvi Kivity 
1148f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
1149acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
11503be91e86SEdgar E. Iglesias         subsection.address_space = d->as;
11510f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
1152ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
115353cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
11540f0cb164SAvi Kivity     } else {
1155f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
11560f0cb164SAvi Kivity     }
11570f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1158052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
115953cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
116053cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
11610f0cb164SAvi Kivity }
11620f0cb164SAvi Kivity 
11630f0cb164SAvi Kivity 
1164052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
1165052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
116633417e70Sbellard {
1167a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
116853cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
1169052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
1170052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
1171dd81124bSAvi Kivity 
1172733d5ef5SPaolo Bonzini     assert(num_pages);
1173733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
117433417e70Sbellard }
117533417e70Sbellard 
1176ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
11770f0cb164SAvi Kivity {
117889ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
117900752703SPaolo Bonzini     AddressSpaceDispatch *d = as->next_dispatch;
118099b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
1181052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
11820f0cb164SAvi Kivity 
1183733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1184733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1185733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
1186733d5ef5SPaolo Bonzini 
1187052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
1188ac1970fbSAvi Kivity         register_subpage(d, &now);
1189733d5ef5SPaolo Bonzini     } else {
1190052e87b0SPaolo Bonzini         now.size = int128_zero();
1191733d5ef5SPaolo Bonzini     }
1192052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
1193052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
1194052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
1195052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
11960f0cb164SAvi Kivity         now = remain;
1197052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
1198733d5ef5SPaolo Bonzini             register_subpage(d, &now);
119988266249SHu Tao         } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1200052e87b0SPaolo Bonzini             now.size = page_size;
1201ac1970fbSAvi Kivity             register_subpage(d, &now);
120269b67646STyler Hall         } else {
1203052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
1204ac1970fbSAvi Kivity             register_multipage(d, &now);
120569b67646STyler Hall         }
12060f0cb164SAvi Kivity     }
12070f0cb164SAvi Kivity }
12080f0cb164SAvi Kivity 
120962a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
121062a2744cSSheng Yang {
121162a2744cSSheng Yang     if (kvm_enabled())
121262a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
121362a2744cSSheng Yang }
121462a2744cSSheng Yang 
1215b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
1216b2a8658eSUmesh Deshpande {
1217b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
1218b2a8658eSUmesh Deshpande }
1219b2a8658eSUmesh Deshpande 
1220b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
1221b2a8658eSUmesh Deshpande {
1222b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
1223b2a8658eSUmesh Deshpande }
1224b2a8658eSUmesh Deshpande 
1225e1e84ba0SMarkus Armbruster #ifdef __linux__
122604b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
122704b16653SAlex Williamson                             ram_addr_t memory,
12287f56e740SPaolo Bonzini                             const char *path,
12297f56e740SPaolo Bonzini                             Error **errp)
1230c902760fSMarcelo Tosatti {
1231fd97fd44SMarkus Armbruster     bool unlink_on_error = false;
1232c902760fSMarcelo Tosatti     char *filename;
12338ca761f6SPeter Feiner     char *sanitized_name;
12348ca761f6SPeter Feiner     char *c;
1235056b68afSIgor Mammedov     void *area = MAP_FAILED;
12365c3ece79SPaolo Bonzini     int fd = -1;
1237e1fb6471SMarkus Armbruster     int64_t page_size;
1238c902760fSMarcelo Tosatti 
1239c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
12407f56e740SPaolo Bonzini         error_setg(errp,
12417f56e740SPaolo Bonzini                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1242fd97fd44SMarkus Armbruster         return NULL;
1243c902760fSMarcelo Tosatti     }
1244c902760fSMarcelo Tosatti 
1245fd97fd44SMarkus Armbruster     for (;;) {
1246fd97fd44SMarkus Armbruster         fd = open(path, O_RDWR);
1247fd97fd44SMarkus Armbruster         if (fd >= 0) {
1248fd97fd44SMarkus Armbruster             /* @path names an existing file, use it */
1249fd97fd44SMarkus Armbruster             break;
1250fd97fd44SMarkus Armbruster         }
1251fd97fd44SMarkus Armbruster         if (errno == ENOENT) {
1252fd97fd44SMarkus Armbruster             /* @path names a file that doesn't exist, create it */
1253fd97fd44SMarkus Armbruster             fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1254fd97fd44SMarkus Armbruster             if (fd >= 0) {
1255fd97fd44SMarkus Armbruster                 unlink_on_error = true;
1256fd97fd44SMarkus Armbruster                 break;
1257fd97fd44SMarkus Armbruster             }
1258fd97fd44SMarkus Armbruster         } else if (errno == EISDIR) {
1259fd97fd44SMarkus Armbruster             /* @path names a directory, create a file there */
12608ca761f6SPeter Feiner             /* Make name safe to use with mkstemp by replacing '/' with '_'. */
126183234bf2SPeter Crosthwaite             sanitized_name = g_strdup(memory_region_name(block->mr));
12628ca761f6SPeter Feiner             for (c = sanitized_name; *c != '\0'; c++) {
12638d31d6b6SPavel Fedin                 if (*c == '/') {
12648ca761f6SPeter Feiner                     *c = '_';
12658ca761f6SPeter Feiner                 }
12668d31d6b6SPavel Fedin             }
12678ca761f6SPeter Feiner 
12688ca761f6SPeter Feiner             filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
12698ca761f6SPeter Feiner                                        sanitized_name);
12708ca761f6SPeter Feiner             g_free(sanitized_name);
1271c902760fSMarcelo Tosatti 
1272c902760fSMarcelo Tosatti             fd = mkstemp(filename);
12738d31d6b6SPavel Fedin             if (fd >= 0) {
12748d31d6b6SPavel Fedin                 unlink(filename);
1275fd97fd44SMarkus Armbruster                 g_free(filename);
1276fd97fd44SMarkus Armbruster                 break;
12778d31d6b6SPavel Fedin             }
12788d31d6b6SPavel Fedin             g_free(filename);
1279fd97fd44SMarkus Armbruster         }
1280fd97fd44SMarkus Armbruster         if (errno != EEXIST && errno != EINTR) {
1281fd97fd44SMarkus Armbruster             error_setg_errno(errp, errno,
1282fd97fd44SMarkus Armbruster                              "can't open backing store %s for guest RAM",
1283fd97fd44SMarkus Armbruster                              path);
1284fd97fd44SMarkus Armbruster             goto error;
1285fd97fd44SMarkus Armbruster         }
1286fd97fd44SMarkus Armbruster         /*
1287fd97fd44SMarkus Armbruster          * Try again on EINTR and EEXIST.  The latter happens when
1288fd97fd44SMarkus Armbruster          * something else creates the file between our two open().
1289fd97fd44SMarkus Armbruster          */
12908d31d6b6SPavel Fedin     }
12918d31d6b6SPavel Fedin 
1292e1fb6471SMarkus Armbruster     page_size = qemu_fd_getpagesize(fd);
1293d2f39addSDominik Dingel     block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
1294fd97fd44SMarkus Armbruster 
1295e1fb6471SMarkus Armbruster     if (memory < page_size) {
1296fd97fd44SMarkus Armbruster         error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1297fd97fd44SMarkus Armbruster                    "or larger than page size 0x%" PRIx64,
1298e1fb6471SMarkus Armbruster                    memory, page_size);
1299f9a49dfaSMarcelo Tosatti         goto error;
1300c902760fSMarcelo Tosatti     }
1301c902760fSMarcelo Tosatti 
1302e1fb6471SMarkus Armbruster     memory = ROUND_UP(memory, page_size);
1303c902760fSMarcelo Tosatti 
1304c902760fSMarcelo Tosatti     /*
1305c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1306c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1307c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1308c902760fSMarcelo Tosatti      * mmap will fail.
1309c902760fSMarcelo Tosatti      */
13107f56e740SPaolo Bonzini     if (ftruncate(fd, memory)) {
1311c902760fSMarcelo Tosatti         perror("ftruncate");
13127f56e740SPaolo Bonzini     }
1313c902760fSMarcelo Tosatti 
1314d2f39addSDominik Dingel     area = qemu_ram_mmap(fd, memory, block->mr->align,
1315d2f39addSDominik Dingel                          block->flags & RAM_SHARED);
1316c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
13177f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
1318fd97fd44SMarkus Armbruster                          "unable to map backing store for guest RAM");
1319f9a49dfaSMarcelo Tosatti         goto error;
1320c902760fSMarcelo Tosatti     }
1321ef36fa14SMarcelo Tosatti 
1322ef36fa14SMarcelo Tosatti     if (mem_prealloc) {
1323056b68afSIgor Mammedov         os_mem_prealloc(fd, area, memory, errp);
1324056b68afSIgor Mammedov         if (errp && *errp) {
1325056b68afSIgor Mammedov             goto error;
1326056b68afSIgor Mammedov         }
1327ef36fa14SMarcelo Tosatti     }
1328ef36fa14SMarcelo Tosatti 
132904b16653SAlex Williamson     block->fd = fd;
1330c902760fSMarcelo Tosatti     return area;
1331f9a49dfaSMarcelo Tosatti 
1332f9a49dfaSMarcelo Tosatti error:
1333056b68afSIgor Mammedov     if (area != MAP_FAILED) {
1334056b68afSIgor Mammedov         qemu_ram_munmap(area, memory);
1335056b68afSIgor Mammedov     }
1336fd97fd44SMarkus Armbruster     if (unlink_on_error) {
1337fd97fd44SMarkus Armbruster         unlink(path);
1338fd97fd44SMarkus Armbruster     }
13395c3ece79SPaolo Bonzini     if (fd != -1) {
1340fd97fd44SMarkus Armbruster         close(fd);
13415c3ece79SPaolo Bonzini     }
1342f9a49dfaSMarcelo Tosatti     return NULL;
1343c902760fSMarcelo Tosatti }
1344c902760fSMarcelo Tosatti #endif
1345c902760fSMarcelo Tosatti 
13460dc3f44aSMike Day /* Called with the ramlist lock held.  */
1347d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1348d17b5288SAlex Williamson {
134904b16653SAlex Williamson     RAMBlock *block, *next_block;
13503e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
135104b16653SAlex Williamson 
135249cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
135349cd9ac6SStefan Hajnoczi 
13540dc3f44aSMike Day     if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
135504b16653SAlex Williamson         return 0;
13560d53d9feSMike Day     }
135704b16653SAlex Williamson 
13580dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1359f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
136004b16653SAlex Williamson 
136162be4e3aSMichael S. Tsirkin         end = block->offset + block->max_length;
136204b16653SAlex Williamson 
13630dc3f44aSMike Day         QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
136404b16653SAlex Williamson             if (next_block->offset >= end) {
136504b16653SAlex Williamson                 next = MIN(next, next_block->offset);
136604b16653SAlex Williamson             }
136704b16653SAlex Williamson         }
136804b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
136904b16653SAlex Williamson             offset = end;
137004b16653SAlex Williamson             mingap = next - end;
137104b16653SAlex Williamson         }
137204b16653SAlex Williamson     }
13733e837b2cSAlex Williamson 
13743e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
13753e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
13763e837b2cSAlex Williamson                 (uint64_t)size);
13773e837b2cSAlex Williamson         abort();
13783e837b2cSAlex Williamson     }
13793e837b2cSAlex Williamson 
138004b16653SAlex Williamson     return offset;
138104b16653SAlex Williamson }
138204b16653SAlex Williamson 
1383652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
138404b16653SAlex Williamson {
1385d17b5288SAlex Williamson     RAMBlock *block;
1386d17b5288SAlex Williamson     ram_addr_t last = 0;
1387d17b5288SAlex Williamson 
13880dc3f44aSMike Day     rcu_read_lock();
13890dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
139062be4e3aSMichael S. Tsirkin         last = MAX(last, block->offset + block->max_length);
13910d53d9feSMike Day     }
13920dc3f44aSMike Day     rcu_read_unlock();
1393d17b5288SAlex Williamson     return last;
1394d17b5288SAlex Williamson }
1395d17b5288SAlex Williamson 
1396ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1397ddb97f1dSJason Baron {
1398ddb97f1dSJason Baron     int ret;
1399ddb97f1dSJason Baron 
1400ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
140147c8ca53SMarcel Apfelbaum     if (!machine_dump_guest_core(current_machine)) {
1402ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1403ddb97f1dSJason Baron         if (ret) {
1404ddb97f1dSJason Baron             perror("qemu_madvise");
1405ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1406ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1407ddb97f1dSJason Baron         }
1408ddb97f1dSJason Baron     }
1409ddb97f1dSJason Baron }
1410ddb97f1dSJason Baron 
1411422148d3SDr. David Alan Gilbert const char *qemu_ram_get_idstr(RAMBlock *rb)
1412422148d3SDr. David Alan Gilbert {
1413422148d3SDr. David Alan Gilbert     return rb->idstr;
1414422148d3SDr. David Alan Gilbert }
1415422148d3SDr. David Alan Gilbert 
1416ae3a7047SMike Day /* Called with iothread lock held.  */
1417fa53a0e5SGonglei void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
141820cfe881SHu Tao {
1419fa53a0e5SGonglei     RAMBlock *block;
142020cfe881SHu Tao 
1421c5705a77SAvi Kivity     assert(new_block);
1422c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
142384b89d78SCam Macdonell 
142409e5ab63SAnthony Liguori     if (dev) {
142509e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
142684b89d78SCam Macdonell         if (id) {
142784b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
14287267c094SAnthony Liguori             g_free(id);
142984b89d78SCam Macdonell         }
143084b89d78SCam Macdonell     }
143184b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
143284b89d78SCam Macdonell 
1433ab0a9956SGonglei     rcu_read_lock();
14340dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1435fa53a0e5SGonglei         if (block != new_block &&
1436fa53a0e5SGonglei             !strcmp(block->idstr, new_block->idstr)) {
143784b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
143884b89d78SCam Macdonell                     new_block->idstr);
143984b89d78SCam Macdonell             abort();
144084b89d78SCam Macdonell         }
144184b89d78SCam Macdonell     }
14420dc3f44aSMike Day     rcu_read_unlock();
1443c5705a77SAvi Kivity }
1444c5705a77SAvi Kivity 
1445ae3a7047SMike Day /* Called with iothread lock held.  */
1446fa53a0e5SGonglei void qemu_ram_unset_idstr(RAMBlock *block)
144720cfe881SHu Tao {
1448ae3a7047SMike Day     /* FIXME: arch_init.c assumes that this is not called throughout
1449ae3a7047SMike Day      * migration.  Ignore the problem since hot-unplug during migration
1450ae3a7047SMike Day      * does not work anyway.
1451ae3a7047SMike Day      */
145220cfe881SHu Tao     if (block) {
145320cfe881SHu Tao         memset(block->idstr, 0, sizeof(block->idstr));
145420cfe881SHu Tao     }
145520cfe881SHu Tao }
145620cfe881SHu Tao 
14578490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
14588490fc78SLuiz Capitulino {
145975cc7f01SMarcel Apfelbaum     if (!machine_mem_merge(current_machine)) {
14608490fc78SLuiz Capitulino         /* disabled by the user */
14618490fc78SLuiz Capitulino         return 0;
14628490fc78SLuiz Capitulino     }
14638490fc78SLuiz Capitulino 
14648490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
14658490fc78SLuiz Capitulino }
14668490fc78SLuiz Capitulino 
146762be4e3aSMichael S. Tsirkin /* Only legal before guest might have detected the memory size: e.g. on
146862be4e3aSMichael S. Tsirkin  * incoming migration, or right after reset.
146962be4e3aSMichael S. Tsirkin  *
147062be4e3aSMichael S. Tsirkin  * As memory core doesn't know how is memory accessed, it is up to
147162be4e3aSMichael S. Tsirkin  * resize callback to update device state and/or add assertions to detect
147262be4e3aSMichael S. Tsirkin  * misuse, if necessary.
147362be4e3aSMichael S. Tsirkin  */
1474fa53a0e5SGonglei int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
147562be4e3aSMichael S. Tsirkin {
147662be4e3aSMichael S. Tsirkin     assert(block);
147762be4e3aSMichael S. Tsirkin 
14784ed023ceSDr. David Alan Gilbert     newsize = HOST_PAGE_ALIGN(newsize);
1479129ddaf3SMichael S. Tsirkin 
148062be4e3aSMichael S. Tsirkin     if (block->used_length == newsize) {
148162be4e3aSMichael S. Tsirkin         return 0;
148262be4e3aSMichael S. Tsirkin     }
148362be4e3aSMichael S. Tsirkin 
148462be4e3aSMichael S. Tsirkin     if (!(block->flags & RAM_RESIZEABLE)) {
148562be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
148662be4e3aSMichael S. Tsirkin                          "Length mismatch: %s: 0x" RAM_ADDR_FMT
148762be4e3aSMichael S. Tsirkin                          " in != 0x" RAM_ADDR_FMT, block->idstr,
148862be4e3aSMichael S. Tsirkin                          newsize, block->used_length);
148962be4e3aSMichael S. Tsirkin         return -EINVAL;
149062be4e3aSMichael S. Tsirkin     }
149162be4e3aSMichael S. Tsirkin 
149262be4e3aSMichael S. Tsirkin     if (block->max_length < newsize) {
149362be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
149462be4e3aSMichael S. Tsirkin                          "Length too large: %s: 0x" RAM_ADDR_FMT
149562be4e3aSMichael S. Tsirkin                          " > 0x" RAM_ADDR_FMT, block->idstr,
149662be4e3aSMichael S. Tsirkin                          newsize, block->max_length);
149762be4e3aSMichael S. Tsirkin         return -EINVAL;
149862be4e3aSMichael S. Tsirkin     }
149962be4e3aSMichael S. Tsirkin 
150062be4e3aSMichael S. Tsirkin     cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
150162be4e3aSMichael S. Tsirkin     block->used_length = newsize;
150258d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
150358d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
150462be4e3aSMichael S. Tsirkin     memory_region_set_size(block->mr, newsize);
150562be4e3aSMichael S. Tsirkin     if (block->resized) {
150662be4e3aSMichael S. Tsirkin         block->resized(block->idstr, newsize, block->host);
150762be4e3aSMichael S. Tsirkin     }
150862be4e3aSMichael S. Tsirkin     return 0;
150962be4e3aSMichael S. Tsirkin }
151062be4e3aSMichael S. Tsirkin 
15115b82b703SStefan Hajnoczi /* Called with ram_list.mutex held */
15125b82b703SStefan Hajnoczi static void dirty_memory_extend(ram_addr_t old_ram_size,
15135b82b703SStefan Hajnoczi                                 ram_addr_t new_ram_size)
15145b82b703SStefan Hajnoczi {
15155b82b703SStefan Hajnoczi     ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
15165b82b703SStefan Hajnoczi                                              DIRTY_MEMORY_BLOCK_SIZE);
15175b82b703SStefan Hajnoczi     ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
15185b82b703SStefan Hajnoczi                                              DIRTY_MEMORY_BLOCK_SIZE);
15195b82b703SStefan Hajnoczi     int i;
15205b82b703SStefan Hajnoczi 
15215b82b703SStefan Hajnoczi     /* Only need to extend if block count increased */
15225b82b703SStefan Hajnoczi     if (new_num_blocks <= old_num_blocks) {
15235b82b703SStefan Hajnoczi         return;
15245b82b703SStefan Hajnoczi     }
15255b82b703SStefan Hajnoczi 
15265b82b703SStefan Hajnoczi     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
15275b82b703SStefan Hajnoczi         DirtyMemoryBlocks *old_blocks;
15285b82b703SStefan Hajnoczi         DirtyMemoryBlocks *new_blocks;
15295b82b703SStefan Hajnoczi         int j;
15305b82b703SStefan Hajnoczi 
15315b82b703SStefan Hajnoczi         old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
15325b82b703SStefan Hajnoczi         new_blocks = g_malloc(sizeof(*new_blocks) +
15335b82b703SStefan Hajnoczi                               sizeof(new_blocks->blocks[0]) * new_num_blocks);
15345b82b703SStefan Hajnoczi 
15355b82b703SStefan Hajnoczi         if (old_num_blocks) {
15365b82b703SStefan Hajnoczi             memcpy(new_blocks->blocks, old_blocks->blocks,
15375b82b703SStefan Hajnoczi                    old_num_blocks * sizeof(old_blocks->blocks[0]));
15385b82b703SStefan Hajnoczi         }
15395b82b703SStefan Hajnoczi 
15405b82b703SStefan Hajnoczi         for (j = old_num_blocks; j < new_num_blocks; j++) {
15415b82b703SStefan Hajnoczi             new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
15425b82b703SStefan Hajnoczi         }
15435b82b703SStefan Hajnoczi 
15445b82b703SStefan Hajnoczi         atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
15455b82b703SStefan Hajnoczi 
15465b82b703SStefan Hajnoczi         if (old_blocks) {
15475b82b703SStefan Hajnoczi             g_free_rcu(old_blocks, rcu);
15485b82b703SStefan Hajnoczi         }
15495b82b703SStefan Hajnoczi     }
15505b82b703SStefan Hajnoczi }
15515b82b703SStefan Hajnoczi 
1552528f46afSFam Zheng static void ram_block_add(RAMBlock *new_block, Error **errp)
1553c5705a77SAvi Kivity {
1554e1c57ab8SPaolo Bonzini     RAMBlock *block;
15550d53d9feSMike Day     RAMBlock *last_block = NULL;
15562152f5caSJuan Quintela     ram_addr_t old_ram_size, new_ram_size;
155737aa7a0eSMarkus Armbruster     Error *err = NULL;
15582152f5caSJuan Quintela 
15592152f5caSJuan Quintela     old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1560c5705a77SAvi Kivity 
1561b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
15629b8424d5SMichael S. Tsirkin     new_block->offset = find_ram_offset(new_block->max_length);
1563e1c57ab8SPaolo Bonzini 
15640628c182SMarkus Armbruster     if (!new_block->host) {
1565e1c57ab8SPaolo Bonzini         if (xen_enabled()) {
15669b8424d5SMichael S. Tsirkin             xen_ram_alloc(new_block->offset, new_block->max_length,
156737aa7a0eSMarkus Armbruster                           new_block->mr, &err);
156837aa7a0eSMarkus Armbruster             if (err) {
156937aa7a0eSMarkus Armbruster                 error_propagate(errp, err);
157037aa7a0eSMarkus Armbruster                 qemu_mutex_unlock_ramlist();
157139c350eeSPaolo Bonzini                 return;
157237aa7a0eSMarkus Armbruster             }
1573e1c57ab8SPaolo Bonzini         } else {
15749b8424d5SMichael S. Tsirkin             new_block->host = phys_mem_alloc(new_block->max_length,
1575a2b257d6SIgor Mammedov                                              &new_block->mr->align);
157639228250SMarkus Armbruster             if (!new_block->host) {
1577ef701d7bSHu Tao                 error_setg_errno(errp, errno,
1578ef701d7bSHu Tao                                  "cannot set up guest memory '%s'",
1579ef701d7bSHu Tao                                  memory_region_name(new_block->mr));
1580ef701d7bSHu Tao                 qemu_mutex_unlock_ramlist();
158139c350eeSPaolo Bonzini                 return;
158239228250SMarkus Armbruster             }
15839b8424d5SMichael S. Tsirkin             memory_try_enable_merging(new_block->host, new_block->max_length);
1584c902760fSMarcelo Tosatti         }
15856977dfe6SYoshiaki Tamura     }
158694a6b54fSpbrook 
1587dd631697SLi Zhijian     new_ram_size = MAX(old_ram_size,
1588dd631697SLi Zhijian               (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1589dd631697SLi Zhijian     if (new_ram_size > old_ram_size) {
1590dd631697SLi Zhijian         migration_bitmap_extend(old_ram_size, new_ram_size);
15915b82b703SStefan Hajnoczi         dirty_memory_extend(old_ram_size, new_ram_size);
1592dd631697SLi Zhijian     }
15930d53d9feSMike Day     /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
15940d53d9feSMike Day      * QLIST (which has an RCU-friendly variant) does not have insertion at
15950d53d9feSMike Day      * tail, so save the last element in last_block.
15960d53d9feSMike Day      */
15970dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
15980d53d9feSMike Day         last_block = block;
15999b8424d5SMichael S. Tsirkin         if (block->max_length < new_block->max_length) {
1600abb26d63SPaolo Bonzini             break;
1601abb26d63SPaolo Bonzini         }
1602abb26d63SPaolo Bonzini     }
1603abb26d63SPaolo Bonzini     if (block) {
16040dc3f44aSMike Day         QLIST_INSERT_BEFORE_RCU(block, new_block, next);
16050d53d9feSMike Day     } else if (last_block) {
16060dc3f44aSMike Day         QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
16070d53d9feSMike Day     } else { /* list is empty */
16080dc3f44aSMike Day         QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1609abb26d63SPaolo Bonzini     }
16100d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
161194a6b54fSpbrook 
16120dc3f44aSMike Day     /* Write list before version */
16130dc3f44aSMike Day     smp_wmb();
1614f798b07fSUmesh Deshpande     ram_list.version++;
1615b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1616f798b07fSUmesh Deshpande 
16179b8424d5SMichael S. Tsirkin     cpu_physical_memory_set_dirty_range(new_block->offset,
161858d2707eSPaolo Bonzini                                         new_block->used_length,
161958d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
162094a6b54fSpbrook 
1621a904c911SPaolo Bonzini     if (new_block->host) {
16229b8424d5SMichael S. Tsirkin         qemu_ram_setup_dump(new_block->host, new_block->max_length);
16239b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1624c2cd627dSCao jin         /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
16259b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1626a904c911SPaolo Bonzini     }
162794a6b54fSpbrook }
1628e9a1ab19Sbellard 
16290b183fc8SPaolo Bonzini #ifdef __linux__
1630528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1631dbcb8981SPaolo Bonzini                                    bool share, const char *mem_path,
16327f56e740SPaolo Bonzini                                    Error **errp)
1633e1c57ab8SPaolo Bonzini {
1634e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1635ef701d7bSHu Tao     Error *local_err = NULL;
1636e1c57ab8SPaolo Bonzini 
1637e1c57ab8SPaolo Bonzini     if (xen_enabled()) {
16387f56e740SPaolo Bonzini         error_setg(errp, "-mem-path not supported with Xen");
1639528f46afSFam Zheng         return NULL;
1640e1c57ab8SPaolo Bonzini     }
1641e1c57ab8SPaolo Bonzini 
1642e1c57ab8SPaolo Bonzini     if (phys_mem_alloc != qemu_anon_ram_alloc) {
1643e1c57ab8SPaolo Bonzini         /*
1644e1c57ab8SPaolo Bonzini          * file_ram_alloc() needs to allocate just like
1645e1c57ab8SPaolo Bonzini          * phys_mem_alloc, but we haven't bothered to provide
1646e1c57ab8SPaolo Bonzini          * a hook there.
1647e1c57ab8SPaolo Bonzini          */
16487f56e740SPaolo Bonzini         error_setg(errp,
16497f56e740SPaolo Bonzini                    "-mem-path not supported with this accelerator");
1650528f46afSFam Zheng         return NULL;
1651e1c57ab8SPaolo Bonzini     }
1652e1c57ab8SPaolo Bonzini 
16534ed023ceSDr. David Alan Gilbert     size = HOST_PAGE_ALIGN(size);
1654e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1655e1c57ab8SPaolo Bonzini     new_block->mr = mr;
16569b8424d5SMichael S. Tsirkin     new_block->used_length = size;
16579b8424d5SMichael S. Tsirkin     new_block->max_length = size;
1658dbcb8981SPaolo Bonzini     new_block->flags = share ? RAM_SHARED : 0;
16597f56e740SPaolo Bonzini     new_block->host = file_ram_alloc(new_block, size,
16607f56e740SPaolo Bonzini                                      mem_path, errp);
16617f56e740SPaolo Bonzini     if (!new_block->host) {
16627f56e740SPaolo Bonzini         g_free(new_block);
1663528f46afSFam Zheng         return NULL;
16647f56e740SPaolo Bonzini     }
16657f56e740SPaolo Bonzini 
1666528f46afSFam Zheng     ram_block_add(new_block, &local_err);
1667ef701d7bSHu Tao     if (local_err) {
1668ef701d7bSHu Tao         g_free(new_block);
1669ef701d7bSHu Tao         error_propagate(errp, local_err);
1670528f46afSFam Zheng         return NULL;
1671ef701d7bSHu Tao     }
1672528f46afSFam Zheng     return new_block;
1673e1c57ab8SPaolo Bonzini }
16740b183fc8SPaolo Bonzini #endif
1675e1c57ab8SPaolo Bonzini 
167662be4e3aSMichael S. Tsirkin static
1677528f46afSFam Zheng RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
167862be4e3aSMichael S. Tsirkin                                   void (*resized)(const char*,
167962be4e3aSMichael S. Tsirkin                                                   uint64_t length,
168062be4e3aSMichael S. Tsirkin                                                   void *host),
168162be4e3aSMichael S. Tsirkin                                   void *host, bool resizeable,
1682ef701d7bSHu Tao                                   MemoryRegion *mr, Error **errp)
1683e1c57ab8SPaolo Bonzini {
1684e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1685ef701d7bSHu Tao     Error *local_err = NULL;
1686e1c57ab8SPaolo Bonzini 
16874ed023ceSDr. David Alan Gilbert     size = HOST_PAGE_ALIGN(size);
16884ed023ceSDr. David Alan Gilbert     max_size = HOST_PAGE_ALIGN(max_size);
1689e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1690e1c57ab8SPaolo Bonzini     new_block->mr = mr;
169162be4e3aSMichael S. Tsirkin     new_block->resized = resized;
16929b8424d5SMichael S. Tsirkin     new_block->used_length = size;
16939b8424d5SMichael S. Tsirkin     new_block->max_length = max_size;
169462be4e3aSMichael S. Tsirkin     assert(max_size >= size);
1695e1c57ab8SPaolo Bonzini     new_block->fd = -1;
1696e1c57ab8SPaolo Bonzini     new_block->host = host;
1697e1c57ab8SPaolo Bonzini     if (host) {
16987bd4f430SPaolo Bonzini         new_block->flags |= RAM_PREALLOC;
1699e1c57ab8SPaolo Bonzini     }
170062be4e3aSMichael S. Tsirkin     if (resizeable) {
170162be4e3aSMichael S. Tsirkin         new_block->flags |= RAM_RESIZEABLE;
170262be4e3aSMichael S. Tsirkin     }
1703528f46afSFam Zheng     ram_block_add(new_block, &local_err);
1704ef701d7bSHu Tao     if (local_err) {
1705ef701d7bSHu Tao         g_free(new_block);
1706ef701d7bSHu Tao         error_propagate(errp, local_err);
1707528f46afSFam Zheng         return NULL;
1708ef701d7bSHu Tao     }
1709528f46afSFam Zheng     return new_block;
1710e1c57ab8SPaolo Bonzini }
1711e1c57ab8SPaolo Bonzini 
1712528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
171362be4e3aSMichael S. Tsirkin                                    MemoryRegion *mr, Error **errp)
171462be4e3aSMichael S. Tsirkin {
171562be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
171662be4e3aSMichael S. Tsirkin }
171762be4e3aSMichael S. Tsirkin 
1718528f46afSFam Zheng RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
17196977dfe6SYoshiaki Tamura {
172062be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
172162be4e3aSMichael S. Tsirkin }
172262be4e3aSMichael S. Tsirkin 
1723528f46afSFam Zheng RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
172462be4e3aSMichael S. Tsirkin                                      void (*resized)(const char*,
172562be4e3aSMichael S. Tsirkin                                                      uint64_t length,
172662be4e3aSMichael S. Tsirkin                                                      void *host),
172762be4e3aSMichael S. Tsirkin                                      MemoryRegion *mr, Error **errp)
172862be4e3aSMichael S. Tsirkin {
172962be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
17306977dfe6SYoshiaki Tamura }
17316977dfe6SYoshiaki Tamura 
173243771539SPaolo Bonzini static void reclaim_ramblock(RAMBlock *block)
1733e9a1ab19Sbellard {
17347bd4f430SPaolo Bonzini     if (block->flags & RAM_PREALLOC) {
1735cd19cfa2SHuang Ying         ;
1736dfeaf2abSMarkus Armbruster     } else if (xen_enabled()) {
1737dfeaf2abSMarkus Armbruster         xen_invalidate_map_cache_entry(block->host);
1738089f3f76SStefan Weil #ifndef _WIN32
17393435f395SMarkus Armbruster     } else if (block->fd >= 0) {
1740794e8f30SMichael S. Tsirkin         qemu_ram_munmap(block->host, block->max_length);
174104b16653SAlex Williamson         close(block->fd);
1742089f3f76SStefan Weil #endif
174304b16653SAlex Williamson     } else {
17449b8424d5SMichael S. Tsirkin         qemu_anon_ram_free(block->host, block->max_length);
174504b16653SAlex Williamson     }
17467267c094SAnthony Liguori     g_free(block);
174743771539SPaolo Bonzini }
174843771539SPaolo Bonzini 
1749f1060c55SFam Zheng void qemu_ram_free(RAMBlock *block)
175043771539SPaolo Bonzini {
175185bc2a15SMarc-André Lureau     if (!block) {
175285bc2a15SMarc-André Lureau         return;
175385bc2a15SMarc-André Lureau     }
175485bc2a15SMarc-André Lureau 
175543771539SPaolo Bonzini     qemu_mutex_lock_ramlist();
17560dc3f44aSMike Day     QLIST_REMOVE_RCU(block, next);
175743771539SPaolo Bonzini     ram_list.mru_block = NULL;
17580dc3f44aSMike Day     /* Write list before version */
17590dc3f44aSMike Day     smp_wmb();
176043771539SPaolo Bonzini     ram_list.version++;
176143771539SPaolo Bonzini     call_rcu(block, reclaim_ramblock, rcu);
1762b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1763e9a1ab19Sbellard }
1764e9a1ab19Sbellard 
1765cd19cfa2SHuang Ying #ifndef _WIN32
1766cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1767cd19cfa2SHuang Ying {
1768cd19cfa2SHuang Ying     RAMBlock *block;
1769cd19cfa2SHuang Ying     ram_addr_t offset;
1770cd19cfa2SHuang Ying     int flags;
1771cd19cfa2SHuang Ying     void *area, *vaddr;
1772cd19cfa2SHuang Ying 
17730dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1774cd19cfa2SHuang Ying         offset = addr - block->offset;
17759b8424d5SMichael S. Tsirkin         if (offset < block->max_length) {
17761240be24SMichael S. Tsirkin             vaddr = ramblock_ptr(block, offset);
17777bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1778cd19cfa2SHuang Ying                 ;
1779dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1780dfeaf2abSMarkus Armbruster                 abort();
1781cd19cfa2SHuang Ying             } else {
1782cd19cfa2SHuang Ying                 flags = MAP_FIXED;
17833435f395SMarkus Armbruster                 if (block->fd >= 0) {
1784dbcb8981SPaolo Bonzini                     flags |= (block->flags & RAM_SHARED ?
1785dbcb8981SPaolo Bonzini                               MAP_SHARED : MAP_PRIVATE);
1786cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1787cd19cfa2SHuang Ying                                 flags, block->fd, offset);
1788cd19cfa2SHuang Ying                 } else {
17892eb9fbaaSMarkus Armbruster                     /*
17902eb9fbaaSMarkus Armbruster                      * Remap needs to match alloc.  Accelerators that
17912eb9fbaaSMarkus Armbruster                      * set phys_mem_alloc never remap.  If they did,
17922eb9fbaaSMarkus Armbruster                      * we'd need a remap hook here.
17932eb9fbaaSMarkus Armbruster                      */
17942eb9fbaaSMarkus Armbruster                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
17952eb9fbaaSMarkus Armbruster 
1796cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1797cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1798cd19cfa2SHuang Ying                                 flags, -1, 0);
1799cd19cfa2SHuang Ying                 }
1800cd19cfa2SHuang Ying                 if (area != vaddr) {
1801f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1802f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1803cd19cfa2SHuang Ying                             length, addr);
1804cd19cfa2SHuang Ying                     exit(1);
1805cd19cfa2SHuang Ying                 }
18068490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1807ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1808cd19cfa2SHuang Ying             }
1809cd19cfa2SHuang Ying         }
1810cd19cfa2SHuang Ying     }
1811cd19cfa2SHuang Ying }
1812cd19cfa2SHuang Ying #endif /* !_WIN32 */
1813cd19cfa2SHuang Ying 
18141b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.
1815ae3a7047SMike Day  * This should not be used for general purpose DMA.  Use address_space_map
1816ae3a7047SMike Day  * or address_space_rw instead. For local memory (e.g. video ram) that the
1817ae3a7047SMike Day  * device owns, use memory_region_get_ram_ptr.
18180dc3f44aSMike Day  *
181949b24afcSPaolo Bonzini  * Called within RCU critical section.
18201b5ec234SPaolo Bonzini  */
18210878d0e1SPaolo Bonzini void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
18221b5ec234SPaolo Bonzini {
18233655cb9cSGonglei     RAMBlock *block = ram_block;
18243655cb9cSGonglei 
18253655cb9cSGonglei     if (block == NULL) {
18263655cb9cSGonglei         block = qemu_get_ram_block(addr);
18270878d0e1SPaolo Bonzini         addr -= block->offset;
18283655cb9cSGonglei     }
1829ae3a7047SMike Day 
1830ae3a7047SMike Day     if (xen_enabled() && block->host == NULL) {
1831432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1832432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1833712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1834432d268cSJun Nakajima          */
1835432d268cSJun Nakajima         if (block->offset == 0) {
183649b24afcSPaolo Bonzini             return xen_map_cache(addr, 0, 0);
1837432d268cSJun Nakajima         }
1838ae3a7047SMike Day 
1839ae3a7047SMike Day         block->host = xen_map_cache(block->offset, block->max_length, 1);
1840432d268cSJun Nakajima     }
18410878d0e1SPaolo Bonzini     return ramblock_ptr(block, addr);
184294a6b54fSpbrook }
1843f471a17eSAlex Williamson 
18440878d0e1SPaolo Bonzini /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1845ae3a7047SMike Day  * but takes a size argument.
18460dc3f44aSMike Day  *
1847e81bcda5SPaolo Bonzini  * Called within RCU critical section.
1848ae3a7047SMike Day  */
18493655cb9cSGonglei static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
18503655cb9cSGonglei                                  hwaddr *size)
185138bee5dcSStefano Stabellini {
18523655cb9cSGonglei     RAMBlock *block = ram_block;
18538ab934f9SStefano Stabellini     if (*size == 0) {
18548ab934f9SStefano Stabellini         return NULL;
18558ab934f9SStefano Stabellini     }
1856e81bcda5SPaolo Bonzini 
18573655cb9cSGonglei     if (block == NULL) {
1858e81bcda5SPaolo Bonzini         block = qemu_get_ram_block(addr);
18590878d0e1SPaolo Bonzini         addr -= block->offset;
18603655cb9cSGonglei     }
18610878d0e1SPaolo Bonzini     *size = MIN(*size, block->max_length - addr);
1862e81bcda5SPaolo Bonzini 
1863e81bcda5SPaolo Bonzini     if (xen_enabled() && block->host == NULL) {
1864e81bcda5SPaolo Bonzini         /* We need to check if the requested address is in the RAM
1865e81bcda5SPaolo Bonzini          * because we don't want to map the entire memory in QEMU.
1866e81bcda5SPaolo Bonzini          * In that case just map the requested area.
1867e81bcda5SPaolo Bonzini          */
1868e81bcda5SPaolo Bonzini         if (block->offset == 0) {
1869e41d7c69SJan Kiszka             return xen_map_cache(addr, *size, 1);
187038bee5dcSStefano Stabellini         }
187138bee5dcSStefano Stabellini 
1872e81bcda5SPaolo Bonzini         block->host = xen_map_cache(block->offset, block->max_length, 1);
187338bee5dcSStefano Stabellini     }
1874e81bcda5SPaolo Bonzini 
18750878d0e1SPaolo Bonzini     return ramblock_ptr(block, addr);
187638bee5dcSStefano Stabellini }
187738bee5dcSStefano Stabellini 
1878422148d3SDr. David Alan Gilbert /*
1879422148d3SDr. David Alan Gilbert  * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1880422148d3SDr. David Alan Gilbert  * in that RAMBlock.
1881422148d3SDr. David Alan Gilbert  *
1882422148d3SDr. David Alan Gilbert  * ptr: Host pointer to look up
1883422148d3SDr. David Alan Gilbert  * round_offset: If true round the result offset down to a page boundary
1884422148d3SDr. David Alan Gilbert  * *ram_addr: set to result ram_addr
1885422148d3SDr. David Alan Gilbert  * *offset: set to result offset within the RAMBlock
1886422148d3SDr. David Alan Gilbert  *
1887422148d3SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
1888ae3a7047SMike Day  *
1889ae3a7047SMike Day  * By the time this function returns, the returned pointer is not protected
1890ae3a7047SMike Day  * by RCU anymore.  If the caller is not within an RCU critical section and
1891ae3a7047SMike Day  * does not hold the iothread lock, it must have other means of protecting the
1892ae3a7047SMike Day  * pointer, such as a reference to the region that includes the incoming
1893ae3a7047SMike Day  * ram_addr_t.
1894ae3a7047SMike Day  */
1895422148d3SDr. David Alan Gilbert RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1896422148d3SDr. David Alan Gilbert                                    ram_addr_t *offset)
18975579c7f3Spbrook {
189894a6b54fSpbrook     RAMBlock *block;
189994a6b54fSpbrook     uint8_t *host = ptr;
190094a6b54fSpbrook 
1901868bb33fSJan Kiszka     if (xen_enabled()) {
1902f615f396SPaolo Bonzini         ram_addr_t ram_addr;
19030dc3f44aSMike Day         rcu_read_lock();
1904f615f396SPaolo Bonzini         ram_addr = xen_ram_addr_from_mapcache(ptr);
1905f615f396SPaolo Bonzini         block = qemu_get_ram_block(ram_addr);
1906422148d3SDr. David Alan Gilbert         if (block) {
1907d6b6aec4SAnthony PERARD             *offset = ram_addr - block->offset;
1908422148d3SDr. David Alan Gilbert         }
19090dc3f44aSMike Day         rcu_read_unlock();
1910422148d3SDr. David Alan Gilbert         return block;
1911712c2b41SStefano Stabellini     }
1912712c2b41SStefano Stabellini 
19130dc3f44aSMike Day     rcu_read_lock();
19140dc3f44aSMike Day     block = atomic_rcu_read(&ram_list.mru_block);
19159b8424d5SMichael S. Tsirkin     if (block && block->host && host - block->host < block->max_length) {
191623887b79SPaolo Bonzini         goto found;
191723887b79SPaolo Bonzini     }
191823887b79SPaolo Bonzini 
19190dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1920432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1921432d268cSJun Nakajima         if (block->host == NULL) {
1922432d268cSJun Nakajima             continue;
1923432d268cSJun Nakajima         }
19249b8424d5SMichael S. Tsirkin         if (host - block->host < block->max_length) {
192523887b79SPaolo Bonzini             goto found;
192694a6b54fSpbrook         }
1927f471a17eSAlex Williamson     }
1928432d268cSJun Nakajima 
19290dc3f44aSMike Day     rcu_read_unlock();
19301b5ec234SPaolo Bonzini     return NULL;
193123887b79SPaolo Bonzini 
193223887b79SPaolo Bonzini found:
1933422148d3SDr. David Alan Gilbert     *offset = (host - block->host);
1934422148d3SDr. David Alan Gilbert     if (round_offset) {
1935422148d3SDr. David Alan Gilbert         *offset &= TARGET_PAGE_MASK;
1936422148d3SDr. David Alan Gilbert     }
19370dc3f44aSMike Day     rcu_read_unlock();
1938422148d3SDr. David Alan Gilbert     return block;
1939422148d3SDr. David Alan Gilbert }
1940422148d3SDr. David Alan Gilbert 
1941e3dd7493SDr. David Alan Gilbert /*
1942e3dd7493SDr. David Alan Gilbert  * Finds the named RAMBlock
1943e3dd7493SDr. David Alan Gilbert  *
1944e3dd7493SDr. David Alan Gilbert  * name: The name of RAMBlock to find
1945e3dd7493SDr. David Alan Gilbert  *
1946e3dd7493SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
1947e3dd7493SDr. David Alan Gilbert  */
1948e3dd7493SDr. David Alan Gilbert RAMBlock *qemu_ram_block_by_name(const char *name)
1949e3dd7493SDr. David Alan Gilbert {
1950e3dd7493SDr. David Alan Gilbert     RAMBlock *block;
1951e3dd7493SDr. David Alan Gilbert 
1952e3dd7493SDr. David Alan Gilbert     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1953e3dd7493SDr. David Alan Gilbert         if (!strcmp(name, block->idstr)) {
1954e3dd7493SDr. David Alan Gilbert             return block;
1955e3dd7493SDr. David Alan Gilbert         }
1956e3dd7493SDr. David Alan Gilbert     }
1957e3dd7493SDr. David Alan Gilbert 
1958e3dd7493SDr. David Alan Gilbert     return NULL;
1959e3dd7493SDr. David Alan Gilbert }
1960e3dd7493SDr. David Alan Gilbert 
1961422148d3SDr. David Alan Gilbert /* Some of the softmmu routines need to translate from a host pointer
1962422148d3SDr. David Alan Gilbert    (typically a TLB entry) back to a ram offset.  */
196307bdaa41SPaolo Bonzini ram_addr_t qemu_ram_addr_from_host(void *ptr)
1964422148d3SDr. David Alan Gilbert {
1965422148d3SDr. David Alan Gilbert     RAMBlock *block;
1966f615f396SPaolo Bonzini     ram_addr_t offset;
1967422148d3SDr. David Alan Gilbert 
1968f615f396SPaolo Bonzini     block = qemu_ram_block_from_host(ptr, false, &offset);
1969422148d3SDr. David Alan Gilbert     if (!block) {
197007bdaa41SPaolo Bonzini         return RAM_ADDR_INVALID;
1971422148d3SDr. David Alan Gilbert     }
1972422148d3SDr. David Alan Gilbert 
197307bdaa41SPaolo Bonzini     return block->offset + offset;
1974e890261fSMarcelo Tosatti }
1975f471a17eSAlex Williamson 
197649b24afcSPaolo Bonzini /* Called within RCU critical section.  */
1977a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
19780e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
19791ccde1cbSbellard {
198052159192SJuan Quintela     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
19810e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
19823a7d929eSbellard     }
19830e0df1e2SAvi Kivity     switch (size) {
19840e0df1e2SAvi Kivity     case 1:
19850878d0e1SPaolo Bonzini         stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
19860e0df1e2SAvi Kivity         break;
19870e0df1e2SAvi Kivity     case 2:
19880878d0e1SPaolo Bonzini         stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
19890e0df1e2SAvi Kivity         break;
19900e0df1e2SAvi Kivity     case 4:
19910878d0e1SPaolo Bonzini         stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
19920e0df1e2SAvi Kivity         break;
19930e0df1e2SAvi Kivity     default:
19940e0df1e2SAvi Kivity         abort();
19950e0df1e2SAvi Kivity     }
199658d2707eSPaolo Bonzini     /* Set both VGA and migration bits for simplicity and to remove
199758d2707eSPaolo Bonzini      * the notdirty callback faster.
199858d2707eSPaolo Bonzini      */
199958d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(ram_addr, size,
200058d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_NOCODE);
2001f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2002f23db169Sbellard        flushed */
2003a2cd8c85SJuan Quintela     if (!cpu_physical_memory_is_clean(ram_addr)) {
2004bcae01e4SPeter Crosthwaite         tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
20054917cf44SAndreas Färber     }
20061ccde1cbSbellard }
20071ccde1cbSbellard 
2008b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2009b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
2010b018ddf6SPaolo Bonzini {
2011b018ddf6SPaolo Bonzini     return is_write;
2012b018ddf6SPaolo Bonzini }
2013b018ddf6SPaolo Bonzini 
20140e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
20150e0df1e2SAvi Kivity     .write = notdirty_mem_write,
2016b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
20170e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
20181ccde1cbSbellard };
20191ccde1cbSbellard 
20200f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
202166b9b43cSPeter Maydell static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
20220f459d16Spbrook {
202393afeadeSAndreas Färber     CPUState *cpu = current_cpu;
2024568496c0SSergey Fedorov     CPUClass *cc = CPU_GET_CLASS(cpu);
202593afeadeSAndreas Färber     CPUArchState *env = cpu->env_ptr;
202606d55cc1Saliguori     target_ulong pc, cs_base;
20270f459d16Spbrook     target_ulong vaddr;
2028a1d1bb31Saliguori     CPUWatchpoint *wp;
202989fee74aSEmilio G. Cota     uint32_t cpu_flags;
20300f459d16Spbrook 
2031ff4700b0SAndreas Färber     if (cpu->watchpoint_hit) {
203206d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
203306d55cc1Saliguori          * the debug interrupt so that is will trigger after the
203406d55cc1Saliguori          * current instruction. */
203593afeadeSAndreas Färber         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
203606d55cc1Saliguori         return;
203706d55cc1Saliguori     }
203893afeadeSAndreas Färber     vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2039ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
204005068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, len)
204105068c0dSPeter Maydell             && (wp->flags & flags)) {
204208225676SPeter Maydell             if (flags == BP_MEM_READ) {
204308225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_READ;
204408225676SPeter Maydell             } else {
204508225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
204608225676SPeter Maydell             }
204708225676SPeter Maydell             wp->hitaddr = vaddr;
204866b9b43cSPeter Maydell             wp->hitattrs = attrs;
2049ff4700b0SAndreas Färber             if (!cpu->watchpoint_hit) {
2050568496c0SSergey Fedorov                 if (wp->flags & BP_CPU &&
2051568496c0SSergey Fedorov                     !cc->debug_check_watchpoint(cpu, wp)) {
2052568496c0SSergey Fedorov                     wp->flags &= ~BP_WATCHPOINT_HIT;
2053568496c0SSergey Fedorov                     continue;
2054568496c0SSergey Fedorov                 }
2055ff4700b0SAndreas Färber                 cpu->watchpoint_hit = wp;
2056239c51a5SAndreas Färber                 tb_check_watchpoint(cpu);
205706d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
205827103424SAndreas Färber                     cpu->exception_index = EXCP_DEBUG;
20595638d180SAndreas Färber                     cpu_loop_exit(cpu);
206006d55cc1Saliguori                 } else {
206106d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2062648f034cSAndreas Färber                     tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
20636886b980SPeter Maydell                     cpu_loop_exit_noexc(cpu);
20640f459d16Spbrook                 }
2065488d6577SMax Filippov             }
20666e140f28Saliguori         } else {
20676e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
20686e140f28Saliguori         }
20690f459d16Spbrook     }
20700f459d16Spbrook }
20710f459d16Spbrook 
20726658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
20736658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
20746658ffb8Spbrook    phys routines.  */
207566b9b43cSPeter Maydell static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
207666b9b43cSPeter Maydell                                   unsigned size, MemTxAttrs attrs)
20776658ffb8Spbrook {
207866b9b43cSPeter Maydell     MemTxResult res;
207966b9b43cSPeter Maydell     uint64_t data;
208079ed0416SPeter Maydell     int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
208179ed0416SPeter Maydell     AddressSpace *as = current_cpu->cpu_ases[asidx].as;
20826658ffb8Spbrook 
208366b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
20841ec9b909SAvi Kivity     switch (size) {
208567364150SMax Filippov     case 1:
208679ed0416SPeter Maydell         data = address_space_ldub(as, addr, attrs, &res);
208767364150SMax Filippov         break;
208867364150SMax Filippov     case 2:
208979ed0416SPeter Maydell         data = address_space_lduw(as, addr, attrs, &res);
209067364150SMax Filippov         break;
209167364150SMax Filippov     case 4:
209279ed0416SPeter Maydell         data = address_space_ldl(as, addr, attrs, &res);
209367364150SMax Filippov         break;
20941ec9b909SAvi Kivity     default: abort();
20951ec9b909SAvi Kivity     }
209666b9b43cSPeter Maydell     *pdata = data;
209766b9b43cSPeter Maydell     return res;
209866b9b43cSPeter Maydell }
209966b9b43cSPeter Maydell 
210066b9b43cSPeter Maydell static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
210166b9b43cSPeter Maydell                                    uint64_t val, unsigned size,
210266b9b43cSPeter Maydell                                    MemTxAttrs attrs)
210366b9b43cSPeter Maydell {
210466b9b43cSPeter Maydell     MemTxResult res;
210579ed0416SPeter Maydell     int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
210679ed0416SPeter Maydell     AddressSpace *as = current_cpu->cpu_ases[asidx].as;
210766b9b43cSPeter Maydell 
210866b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
210966b9b43cSPeter Maydell     switch (size) {
211066b9b43cSPeter Maydell     case 1:
211179ed0416SPeter Maydell         address_space_stb(as, addr, val, attrs, &res);
211266b9b43cSPeter Maydell         break;
211366b9b43cSPeter Maydell     case 2:
211479ed0416SPeter Maydell         address_space_stw(as, addr, val, attrs, &res);
211566b9b43cSPeter Maydell         break;
211666b9b43cSPeter Maydell     case 4:
211779ed0416SPeter Maydell         address_space_stl(as, addr, val, attrs, &res);
211866b9b43cSPeter Maydell         break;
211966b9b43cSPeter Maydell     default: abort();
212066b9b43cSPeter Maydell     }
212166b9b43cSPeter Maydell     return res;
21226658ffb8Spbrook }
21236658ffb8Spbrook 
21241ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
212566b9b43cSPeter Maydell     .read_with_attrs = watch_mem_read,
212666b9b43cSPeter Maydell     .write_with_attrs = watch_mem_write,
21271ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
21286658ffb8Spbrook };
21296658ffb8Spbrook 
2130f25a49e0SPeter Maydell static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2131f25a49e0SPeter Maydell                                 unsigned len, MemTxAttrs attrs)
2132db7b5426Sblueswir1 {
2133acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2134ff6cff75SPaolo Bonzini     uint8_t buf[8];
21355c9eb028SPeter Maydell     MemTxResult res;
2136791af8c8SPaolo Bonzini 
2137db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2138016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2139acc9d80bSJan Kiszka            subpage, len, addr);
2140db7b5426Sblueswir1 #endif
21415c9eb028SPeter Maydell     res = address_space_read(subpage->as, addr + subpage->base,
21425c9eb028SPeter Maydell                              attrs, buf, len);
21435c9eb028SPeter Maydell     if (res) {
21445c9eb028SPeter Maydell         return res;
2145f25a49e0SPeter Maydell     }
2146acc9d80bSJan Kiszka     switch (len) {
2147acc9d80bSJan Kiszka     case 1:
2148f25a49e0SPeter Maydell         *data = ldub_p(buf);
2149f25a49e0SPeter Maydell         return MEMTX_OK;
2150acc9d80bSJan Kiszka     case 2:
2151f25a49e0SPeter Maydell         *data = lduw_p(buf);
2152f25a49e0SPeter Maydell         return MEMTX_OK;
2153acc9d80bSJan Kiszka     case 4:
2154f25a49e0SPeter Maydell         *data = ldl_p(buf);
2155f25a49e0SPeter Maydell         return MEMTX_OK;
2156ff6cff75SPaolo Bonzini     case 8:
2157f25a49e0SPeter Maydell         *data = ldq_p(buf);
2158f25a49e0SPeter Maydell         return MEMTX_OK;
2159acc9d80bSJan Kiszka     default:
2160acc9d80bSJan Kiszka         abort();
2161acc9d80bSJan Kiszka     }
2162db7b5426Sblueswir1 }
2163db7b5426Sblueswir1 
2164f25a49e0SPeter Maydell static MemTxResult subpage_write(void *opaque, hwaddr addr,
2165f25a49e0SPeter Maydell                                  uint64_t value, unsigned len, MemTxAttrs attrs)
2166db7b5426Sblueswir1 {
2167acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2168ff6cff75SPaolo Bonzini     uint8_t buf[8];
2169acc9d80bSJan Kiszka 
2170db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2171016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2172acc9d80bSJan Kiszka            " value %"PRIx64"\n",
2173acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
2174db7b5426Sblueswir1 #endif
2175acc9d80bSJan Kiszka     switch (len) {
2176acc9d80bSJan Kiszka     case 1:
2177acc9d80bSJan Kiszka         stb_p(buf, value);
2178acc9d80bSJan Kiszka         break;
2179acc9d80bSJan Kiszka     case 2:
2180acc9d80bSJan Kiszka         stw_p(buf, value);
2181acc9d80bSJan Kiszka         break;
2182acc9d80bSJan Kiszka     case 4:
2183acc9d80bSJan Kiszka         stl_p(buf, value);
2184acc9d80bSJan Kiszka         break;
2185ff6cff75SPaolo Bonzini     case 8:
2186ff6cff75SPaolo Bonzini         stq_p(buf, value);
2187ff6cff75SPaolo Bonzini         break;
2188acc9d80bSJan Kiszka     default:
2189acc9d80bSJan Kiszka         abort();
2190acc9d80bSJan Kiszka     }
21915c9eb028SPeter Maydell     return address_space_write(subpage->as, addr + subpage->base,
21925c9eb028SPeter Maydell                                attrs, buf, len);
2193db7b5426Sblueswir1 }
2194db7b5426Sblueswir1 
2195c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
2196016e9d62SAmos Kong                             unsigned len, bool is_write)
2197c353e4ccSPaolo Bonzini {
2198acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2199c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
2200016e9d62SAmos Kong     printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2201acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
2202c353e4ccSPaolo Bonzini #endif
2203c353e4ccSPaolo Bonzini 
2204acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
2205016e9d62SAmos Kong                                       len, is_write);
2206c353e4ccSPaolo Bonzini }
2207c353e4ccSPaolo Bonzini 
220870c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
2209f25a49e0SPeter Maydell     .read_with_attrs = subpage_read,
2210f25a49e0SPeter Maydell     .write_with_attrs = subpage_write,
2211ff6cff75SPaolo Bonzini     .impl.min_access_size = 1,
2212ff6cff75SPaolo Bonzini     .impl.max_access_size = 8,
2213ff6cff75SPaolo Bonzini     .valid.min_access_size = 1,
2214ff6cff75SPaolo Bonzini     .valid.max_access_size = 8,
2215c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
221670c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
2217db7b5426Sblueswir1 };
2218db7b5426Sblueswir1 
2219c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
22205312bd8bSAvi Kivity                              uint16_t section)
2221db7b5426Sblueswir1 {
2222db7b5426Sblueswir1     int idx, eidx;
2223db7b5426Sblueswir1 
2224db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2225db7b5426Sblueswir1         return -1;
2226db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2227db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2228db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2229016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2230016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
2231db7b5426Sblueswir1 #endif
2232db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
22335312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
2234db7b5426Sblueswir1     }
2235db7b5426Sblueswir1 
2236db7b5426Sblueswir1     return 0;
2237db7b5426Sblueswir1 }
2238db7b5426Sblueswir1 
2239acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2240db7b5426Sblueswir1 {
2241c227f099SAnthony Liguori     subpage_t *mmio;
2242db7b5426Sblueswir1 
22437267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
22441eec614bSaliguori 
2245acc9d80bSJan Kiszka     mmio->as = as;
2246db7b5426Sblueswir1     mmio->base = base;
22472c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2248b4fefef9SPeter Crosthwaite                           NULL, TARGET_PAGE_SIZE);
2249b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
2250db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2251016e9d62SAmos Kong     printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2252016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
2253db7b5426Sblueswir1 #endif
2254b41aac4fSLiu Ping Fan     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2255db7b5426Sblueswir1 
2256db7b5426Sblueswir1     return mmio;
2257db7b5426Sblueswir1 }
2258db7b5426Sblueswir1 
2259a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2260a656e22fSPeter Crosthwaite                               MemoryRegion *mr)
22615312bd8bSAvi Kivity {
2262a656e22fSPeter Crosthwaite     assert(as);
22635312bd8bSAvi Kivity     MemoryRegionSection section = {
2264a656e22fSPeter Crosthwaite         .address_space = as,
22655312bd8bSAvi Kivity         .mr = mr,
22665312bd8bSAvi Kivity         .offset_within_address_space = 0,
22675312bd8bSAvi Kivity         .offset_within_region = 0,
2268052e87b0SPaolo Bonzini         .size = int128_2_64(),
22695312bd8bSAvi Kivity     };
22705312bd8bSAvi Kivity 
227153cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
22725312bd8bSAvi Kivity }
22735312bd8bSAvi Kivity 
2274a54c87b6SPeter Maydell MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2275aa102231SAvi Kivity {
2276a54c87b6SPeter Maydell     int asidx = cpu_asidx_from_attrs(cpu, attrs);
2277a54c87b6SPeter Maydell     CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
227832857f4dSPeter Maydell     AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
227979e2b9aeSPaolo Bonzini     MemoryRegionSection *sections = d->map.sections;
22809d82b5a7SPaolo Bonzini 
22819d82b5a7SPaolo Bonzini     return sections[index & ~TARGET_PAGE_MASK].mr;
2282aa102231SAvi Kivity }
2283aa102231SAvi Kivity 
2284e9179ce1SAvi Kivity static void io_mem_init(void)
2285e9179ce1SAvi Kivity {
22861f6245e5SPaolo Bonzini     memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
22872c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
22881f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
22892c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
22901f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
22912c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
22921f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
2293e9179ce1SAvi Kivity }
2294e9179ce1SAvi Kivity 
2295ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
2296ac1970fbSAvi Kivity {
229789ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
229853cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
229953cb28cbSMarcel Apfelbaum     uint16_t n;
230053cb28cbSMarcel Apfelbaum 
2301a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_unassigned);
230253cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
2303a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_notdirty);
230453cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_NOTDIRTY);
2305a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_rom);
230653cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_ROM);
2307a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_watch);
230853cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_WATCH);
230900752703SPaolo Bonzini 
23109736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
231100752703SPaolo Bonzini     d->as = as;
231200752703SPaolo Bonzini     as->next_dispatch = d;
231300752703SPaolo Bonzini }
231400752703SPaolo Bonzini 
231579e2b9aeSPaolo Bonzini static void address_space_dispatch_free(AddressSpaceDispatch *d)
231679e2b9aeSPaolo Bonzini {
231779e2b9aeSPaolo Bonzini     phys_sections_free(&d->map);
231879e2b9aeSPaolo Bonzini     g_free(d);
231979e2b9aeSPaolo Bonzini }
232079e2b9aeSPaolo Bonzini 
232100752703SPaolo Bonzini static void mem_commit(MemoryListener *listener)
232200752703SPaolo Bonzini {
232300752703SPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
23240475d94fSPaolo Bonzini     AddressSpaceDispatch *cur = as->dispatch;
23250475d94fSPaolo Bonzini     AddressSpaceDispatch *next = as->next_dispatch;
2326ac1970fbSAvi Kivity 
232753cb28cbSMarcel Apfelbaum     phys_page_compact_all(next, next->map.nodes_nb);
2328b35ba30fSMichael S. Tsirkin 
232979e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, next);
233053cb28cbSMarcel Apfelbaum     if (cur) {
233179e2b9aeSPaolo Bonzini         call_rcu(cur, address_space_dispatch_free, rcu);
2332ac1970fbSAvi Kivity     }
23339affd6fcSPaolo Bonzini }
23349affd6fcSPaolo Bonzini 
23351d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
233650c1e149SAvi Kivity {
233732857f4dSPeter Maydell     CPUAddressSpace *cpuas;
233832857f4dSPeter Maydell     AddressSpaceDispatch *d;
2339117712c3SAvi Kivity 
2340117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
2341117712c3SAvi Kivity        reset the modified entries */
234232857f4dSPeter Maydell     cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
234332857f4dSPeter Maydell     cpu_reloading_memory_map();
234432857f4dSPeter Maydell     /* The CPU and TLB are protected by the iothread lock.
234532857f4dSPeter Maydell      * We reload the dispatch pointer now because cpu_reloading_memory_map()
234632857f4dSPeter Maydell      * may have split the RCU critical section.
234732857f4dSPeter Maydell      */
234832857f4dSPeter Maydell     d = atomic_rcu_read(&cpuas->as->dispatch);
234932857f4dSPeter Maydell     cpuas->memory_dispatch = d;
235032857f4dSPeter Maydell     tlb_flush(cpuas->cpu, 1);
235150c1e149SAvi Kivity }
235250c1e149SAvi Kivity 
2353ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
2354ac1970fbSAvi Kivity {
235500752703SPaolo Bonzini     as->dispatch = NULL;
235689ae337aSPaolo Bonzini     as->dispatch_listener = (MemoryListener) {
2357ac1970fbSAvi Kivity         .begin = mem_begin,
235800752703SPaolo Bonzini         .commit = mem_commit,
2359ac1970fbSAvi Kivity         .region_add = mem_add,
2360ac1970fbSAvi Kivity         .region_nop = mem_add,
2361ac1970fbSAvi Kivity         .priority = 0,
2362ac1970fbSAvi Kivity     };
236389ae337aSPaolo Bonzini     memory_listener_register(&as->dispatch_listener, as);
2364ac1970fbSAvi Kivity }
2365ac1970fbSAvi Kivity 
23666e48e8f9SPaolo Bonzini void address_space_unregister(AddressSpace *as)
23676e48e8f9SPaolo Bonzini {
23686e48e8f9SPaolo Bonzini     memory_listener_unregister(&as->dispatch_listener);
23696e48e8f9SPaolo Bonzini }
23706e48e8f9SPaolo Bonzini 
237183f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
237283f3c251SAvi Kivity {
237383f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
237483f3c251SAvi Kivity 
237579e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, NULL);
237679e2b9aeSPaolo Bonzini     if (d) {
237779e2b9aeSPaolo Bonzini         call_rcu(d, address_space_dispatch_free, rcu);
237879e2b9aeSPaolo Bonzini     }
237983f3c251SAvi Kivity }
238083f3c251SAvi Kivity 
238162152b8aSAvi Kivity static void memory_map_init(void)
238262152b8aSAvi Kivity {
23837267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
238403f49957SPaolo Bonzini 
238557271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
23867dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
2387309cb471SAvi Kivity 
23887267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
23893bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
23903bb28b72SJan Kiszka                           65536);
23917dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
23922641689aSliguang }
239362152b8aSAvi Kivity 
239462152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
239562152b8aSAvi Kivity {
239662152b8aSAvi Kivity     return system_memory;
239762152b8aSAvi Kivity }
239862152b8aSAvi Kivity 
2399309cb471SAvi Kivity MemoryRegion *get_system_io(void)
2400309cb471SAvi Kivity {
2401309cb471SAvi Kivity     return system_io;
2402309cb471SAvi Kivity }
2403309cb471SAvi Kivity 
2404e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2405e2eef170Spbrook 
240613eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
240713eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
2408f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2409a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
241013eb76e0Sbellard {
241113eb76e0Sbellard     int l, flags;
241213eb76e0Sbellard     target_ulong page;
241353a5960aSpbrook     void * p;
241413eb76e0Sbellard 
241513eb76e0Sbellard     while (len > 0) {
241613eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
241713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
241813eb76e0Sbellard         if (l > len)
241913eb76e0Sbellard             l = len;
242013eb76e0Sbellard         flags = page_get_flags(page);
242113eb76e0Sbellard         if (!(flags & PAGE_VALID))
2422a68fe89cSPaul Brook             return -1;
242313eb76e0Sbellard         if (is_write) {
242413eb76e0Sbellard             if (!(flags & PAGE_WRITE))
2425a68fe89cSPaul Brook                 return -1;
2426579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
242772fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2428a68fe89cSPaul Brook                 return -1;
242972fb7daaSaurel32             memcpy(p, buf, l);
243072fb7daaSaurel32             unlock_user(p, addr, l);
243113eb76e0Sbellard         } else {
243213eb76e0Sbellard             if (!(flags & PAGE_READ))
2433a68fe89cSPaul Brook                 return -1;
2434579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
243572fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2436a68fe89cSPaul Brook                 return -1;
243772fb7daaSaurel32             memcpy(buf, p, l);
24385b257578Saurel32             unlock_user(p, addr, 0);
243913eb76e0Sbellard         }
244013eb76e0Sbellard         len -= l;
244113eb76e0Sbellard         buf += l;
244213eb76e0Sbellard         addr += l;
244313eb76e0Sbellard     }
2444a68fe89cSPaul Brook     return 0;
244513eb76e0Sbellard }
24468df1cd07Sbellard 
244713eb76e0Sbellard #else
244851d7a9ebSAnthony PERARD 
2449845b6214SPaolo Bonzini static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2450a8170e5eSAvi Kivity                                      hwaddr length)
245151d7a9ebSAnthony PERARD {
2452845b6214SPaolo Bonzini     uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
24530878d0e1SPaolo Bonzini     addr += memory_region_get_ram_addr(mr);
24540878d0e1SPaolo Bonzini 
2455e87f7778SPaolo Bonzini     /* No early return if dirty_log_mask is or becomes 0, because
2456e87f7778SPaolo Bonzini      * cpu_physical_memory_set_dirty_range will still call
2457e87f7778SPaolo Bonzini      * xen_modified_memory.
2458e87f7778SPaolo Bonzini      */
2459e87f7778SPaolo Bonzini     if (dirty_log_mask) {
2460e87f7778SPaolo Bonzini         dirty_log_mask =
2461e87f7778SPaolo Bonzini             cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2462e87f7778SPaolo Bonzini     }
2463845b6214SPaolo Bonzini     if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
246435865339SPaolo Bonzini         tb_invalidate_phys_range(addr, addr + length);
2465845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2466845b6214SPaolo Bonzini     }
246758d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
246849dfcec4SPaolo Bonzini }
246951d7a9ebSAnthony PERARD 
247023326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
247182f2563fSPaolo Bonzini {
2472e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
247323326164SRichard Henderson 
247423326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
247523326164SRichard Henderson        otherwise specified.  */
247623326164SRichard Henderson     if (access_size_max == 0) {
247723326164SRichard Henderson         access_size_max = 4;
247882f2563fSPaolo Bonzini     }
247923326164SRichard Henderson 
248023326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
248123326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
248223326164SRichard Henderson         unsigned align_size_max = addr & -addr;
248323326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
248423326164SRichard Henderson             access_size_max = align_size_max;
248523326164SRichard Henderson         }
248623326164SRichard Henderson     }
248723326164SRichard Henderson 
248823326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
248923326164SRichard Henderson     if (l > access_size_max) {
249023326164SRichard Henderson         l = access_size_max;
249123326164SRichard Henderson     }
24926554f5c0SPeter Maydell     l = pow2floor(l);
249323326164SRichard Henderson 
249423326164SRichard Henderson     return l;
249582f2563fSPaolo Bonzini }
249682f2563fSPaolo Bonzini 
24974840f10eSJan Kiszka static bool prepare_mmio_access(MemoryRegion *mr)
2498125b3806SPaolo Bonzini {
24994840f10eSJan Kiszka     bool unlocked = !qemu_mutex_iothread_locked();
25004840f10eSJan Kiszka     bool release_lock = false;
25014840f10eSJan Kiszka 
25024840f10eSJan Kiszka     if (unlocked && mr->global_locking) {
25034840f10eSJan Kiszka         qemu_mutex_lock_iothread();
25044840f10eSJan Kiszka         unlocked = false;
25054840f10eSJan Kiszka         release_lock = true;
2506125b3806SPaolo Bonzini     }
25074840f10eSJan Kiszka     if (mr->flush_coalesced_mmio) {
25084840f10eSJan Kiszka         if (unlocked) {
25094840f10eSJan Kiszka             qemu_mutex_lock_iothread();
25104840f10eSJan Kiszka         }
25114840f10eSJan Kiszka         qemu_flush_coalesced_mmio_buffer();
25124840f10eSJan Kiszka         if (unlocked) {
25134840f10eSJan Kiszka             qemu_mutex_unlock_iothread();
25144840f10eSJan Kiszka         }
25154840f10eSJan Kiszka     }
25164840f10eSJan Kiszka 
25174840f10eSJan Kiszka     return release_lock;
2518125b3806SPaolo Bonzini }
2519125b3806SPaolo Bonzini 
2520a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
2521a203ac70SPaolo Bonzini static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2522a203ac70SPaolo Bonzini                                                 MemTxAttrs attrs,
2523a203ac70SPaolo Bonzini                                                 const uint8_t *buf,
2524a203ac70SPaolo Bonzini                                                 int len, hwaddr addr1,
2525a203ac70SPaolo Bonzini                                                 hwaddr l, MemoryRegion *mr)
252613eb76e0Sbellard {
252713eb76e0Sbellard     uint8_t *ptr;
2528791af8c8SPaolo Bonzini     uint64_t val;
25293b643495SPeter Maydell     MemTxResult result = MEMTX_OK;
25304840f10eSJan Kiszka     bool release_lock = false;
253113eb76e0Sbellard 
2532a203ac70SPaolo Bonzini     for (;;) {
2533eb7eeb88SPaolo Bonzini         if (!memory_access_is_direct(mr, true)) {
25344840f10eSJan Kiszka             release_lock |= prepare_mmio_access(mr);
25355c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr1);
25364917cf44SAndreas Färber             /* XXX: could force current_cpu to NULL to avoid
25376a00d601Sbellard                potential bugs */
253823326164SRichard Henderson             switch (l) {
253923326164SRichard Henderson             case 8:
254023326164SRichard Henderson                 /* 64 bit write access */
254123326164SRichard Henderson                 val = ldq_p(buf);
25423b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 8,
25433b643495SPeter Maydell                                                        attrs);
254423326164SRichard Henderson                 break;
254523326164SRichard Henderson             case 4:
25461c213d19Sbellard                 /* 32 bit write access */
2547c27004ecSbellard                 val = ldl_p(buf);
25483b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 4,
25493b643495SPeter Maydell                                                        attrs);
255023326164SRichard Henderson                 break;
255123326164SRichard Henderson             case 2:
25521c213d19Sbellard                 /* 16 bit write access */
2553c27004ecSbellard                 val = lduw_p(buf);
25543b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 2,
25553b643495SPeter Maydell                                                        attrs);
255623326164SRichard Henderson                 break;
255723326164SRichard Henderson             case 1:
25581c213d19Sbellard                 /* 8 bit write access */
2559c27004ecSbellard                 val = ldub_p(buf);
25603b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 1,
25613b643495SPeter Maydell                                                        attrs);
256223326164SRichard Henderson                 break;
256323326164SRichard Henderson             default:
256423326164SRichard Henderson                 abort();
256513eb76e0Sbellard             }
25662bbfa05dSPaolo Bonzini         } else {
256713eb76e0Sbellard             /* RAM case */
25680878d0e1SPaolo Bonzini             ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
256913eb76e0Sbellard             memcpy(ptr, buf, l);
2570845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, l);
25713a7d929eSbellard         }
2572eb7eeb88SPaolo Bonzini 
2573eb7eeb88SPaolo Bonzini         if (release_lock) {
2574eb7eeb88SPaolo Bonzini             qemu_mutex_unlock_iothread();
2575eb7eeb88SPaolo Bonzini             release_lock = false;
2576eb7eeb88SPaolo Bonzini         }
2577eb7eeb88SPaolo Bonzini 
2578eb7eeb88SPaolo Bonzini         len -= l;
2579eb7eeb88SPaolo Bonzini         buf += l;
2580eb7eeb88SPaolo Bonzini         addr += l;
2581a203ac70SPaolo Bonzini 
2582a203ac70SPaolo Bonzini         if (!len) {
2583a203ac70SPaolo Bonzini             break;
2584eb7eeb88SPaolo Bonzini         }
2585a203ac70SPaolo Bonzini 
2586a203ac70SPaolo Bonzini         l = len;
2587a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, true);
2588a203ac70SPaolo Bonzini     }
2589eb7eeb88SPaolo Bonzini 
2590eb7eeb88SPaolo Bonzini     return result;
2591eb7eeb88SPaolo Bonzini }
2592eb7eeb88SPaolo Bonzini 
2593a203ac70SPaolo Bonzini MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2594a203ac70SPaolo Bonzini                                 const uint8_t *buf, int len)
2595eb7eeb88SPaolo Bonzini {
2596eb7eeb88SPaolo Bonzini     hwaddr l;
2597eb7eeb88SPaolo Bonzini     hwaddr addr1;
2598eb7eeb88SPaolo Bonzini     MemoryRegion *mr;
2599eb7eeb88SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2600a203ac70SPaolo Bonzini 
2601a203ac70SPaolo Bonzini     if (len > 0) {
2602a203ac70SPaolo Bonzini         rcu_read_lock();
2603a203ac70SPaolo Bonzini         l = len;
2604a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, true);
2605a203ac70SPaolo Bonzini         result = address_space_write_continue(as, addr, attrs, buf, len,
2606a203ac70SPaolo Bonzini                                               addr1, l, mr);
2607a203ac70SPaolo Bonzini         rcu_read_unlock();
2608a203ac70SPaolo Bonzini     }
2609a203ac70SPaolo Bonzini 
2610a203ac70SPaolo Bonzini     return result;
2611a203ac70SPaolo Bonzini }
2612a203ac70SPaolo Bonzini 
2613a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
2614a203ac70SPaolo Bonzini MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2615a203ac70SPaolo Bonzini                                         MemTxAttrs attrs, uint8_t *buf,
2616a203ac70SPaolo Bonzini                                         int len, hwaddr addr1, hwaddr l,
2617a203ac70SPaolo Bonzini                                         MemoryRegion *mr)
2618a203ac70SPaolo Bonzini {
2619a203ac70SPaolo Bonzini     uint8_t *ptr;
2620a203ac70SPaolo Bonzini     uint64_t val;
2621a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2622eb7eeb88SPaolo Bonzini     bool release_lock = false;
2623eb7eeb88SPaolo Bonzini 
2624a203ac70SPaolo Bonzini     for (;;) {
2625eb7eeb88SPaolo Bonzini         if (!memory_access_is_direct(mr, false)) {
262613eb76e0Sbellard             /* I/O case */
26274840f10eSJan Kiszka             release_lock |= prepare_mmio_access(mr);
26285c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr1);
262923326164SRichard Henderson             switch (l) {
263023326164SRichard Henderson             case 8:
263123326164SRichard Henderson                 /* 64 bit read access */
26323b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
26333b643495SPeter Maydell                                                       attrs);
263423326164SRichard Henderson                 stq_p(buf, val);
263523326164SRichard Henderson                 break;
263623326164SRichard Henderson             case 4:
263713eb76e0Sbellard                 /* 32 bit read access */
26383b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
26393b643495SPeter Maydell                                                       attrs);
2640c27004ecSbellard                 stl_p(buf, val);
264123326164SRichard Henderson                 break;
264223326164SRichard Henderson             case 2:
264313eb76e0Sbellard                 /* 16 bit read access */
26443b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
26453b643495SPeter Maydell                                                       attrs);
2646c27004ecSbellard                 stw_p(buf, val);
264723326164SRichard Henderson                 break;
264823326164SRichard Henderson             case 1:
26491c213d19Sbellard                 /* 8 bit read access */
26503b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
26513b643495SPeter Maydell                                                       attrs);
2652c27004ecSbellard                 stb_p(buf, val);
265323326164SRichard Henderson                 break;
265423326164SRichard Henderson             default:
265523326164SRichard Henderson                 abort();
265613eb76e0Sbellard             }
265713eb76e0Sbellard         } else {
265813eb76e0Sbellard             /* RAM case */
26590878d0e1SPaolo Bonzini             ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2660f3705d53SAvi Kivity             memcpy(buf, ptr, l);
266113eb76e0Sbellard         }
26624840f10eSJan Kiszka 
26634840f10eSJan Kiszka         if (release_lock) {
26644840f10eSJan Kiszka             qemu_mutex_unlock_iothread();
26654840f10eSJan Kiszka             release_lock = false;
26664840f10eSJan Kiszka         }
26674840f10eSJan Kiszka 
266813eb76e0Sbellard         len -= l;
266913eb76e0Sbellard         buf += l;
267013eb76e0Sbellard         addr += l;
2671a203ac70SPaolo Bonzini 
2672a203ac70SPaolo Bonzini         if (!len) {
2673a203ac70SPaolo Bonzini             break;
267413eb76e0Sbellard         }
2675a203ac70SPaolo Bonzini 
2676a203ac70SPaolo Bonzini         l = len;
2677a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, false);
2678a203ac70SPaolo Bonzini     }
2679a203ac70SPaolo Bonzini 
2680a203ac70SPaolo Bonzini     return result;
2681a203ac70SPaolo Bonzini }
2682a203ac70SPaolo Bonzini 
26833cc8f884SPaolo Bonzini MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
26843cc8f884SPaolo Bonzini                                     MemTxAttrs attrs, uint8_t *buf, int len)
2685a203ac70SPaolo Bonzini {
2686a203ac70SPaolo Bonzini     hwaddr l;
2687a203ac70SPaolo Bonzini     hwaddr addr1;
2688a203ac70SPaolo Bonzini     MemoryRegion *mr;
2689a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2690a203ac70SPaolo Bonzini 
2691a203ac70SPaolo Bonzini     if (len > 0) {
2692a203ac70SPaolo Bonzini         rcu_read_lock();
2693a203ac70SPaolo Bonzini         l = len;
2694a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, false);
2695a203ac70SPaolo Bonzini         result = address_space_read_continue(as, addr, attrs, buf, len,
2696a203ac70SPaolo Bonzini                                              addr1, l, mr);
269741063e1eSPaolo Bonzini         rcu_read_unlock();
2698a203ac70SPaolo Bonzini     }
2699fd8aaa76SPaolo Bonzini 
27003b643495SPeter Maydell     return result;
270113eb76e0Sbellard }
27028df1cd07Sbellard 
2703eb7eeb88SPaolo Bonzini MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2704eb7eeb88SPaolo Bonzini                              uint8_t *buf, int len, bool is_write)
2705ac1970fbSAvi Kivity {
2706eb7eeb88SPaolo Bonzini     if (is_write) {
2707eb7eeb88SPaolo Bonzini         return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2708eb7eeb88SPaolo Bonzini     } else {
2709eb7eeb88SPaolo Bonzini         return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2710ac1970fbSAvi Kivity     }
2711ac1970fbSAvi Kivity }
2712ac1970fbSAvi Kivity 
2713a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2714ac1970fbSAvi Kivity                             int len, int is_write)
2715ac1970fbSAvi Kivity {
27165c9eb028SPeter Maydell     address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
27175c9eb028SPeter Maydell                      buf, len, is_write);
2718ac1970fbSAvi Kivity }
2719ac1970fbSAvi Kivity 
2720582b55a9SAlexander Graf enum write_rom_type {
2721582b55a9SAlexander Graf     WRITE_DATA,
2722582b55a9SAlexander Graf     FLUSH_CACHE,
2723582b55a9SAlexander Graf };
2724582b55a9SAlexander Graf 
27252a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2726582b55a9SAlexander Graf     hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2727d0ecd2aaSbellard {
2728149f54b5SPaolo Bonzini     hwaddr l;
2729d0ecd2aaSbellard     uint8_t *ptr;
2730149f54b5SPaolo Bonzini     hwaddr addr1;
27315c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2732d0ecd2aaSbellard 
273341063e1eSPaolo Bonzini     rcu_read_lock();
2734d0ecd2aaSbellard     while (len > 0) {
2735d0ecd2aaSbellard         l = len;
27362a221651SEdgar E. Iglesias         mr = address_space_translate(as, addr, &addr1, &l, true);
2737d0ecd2aaSbellard 
27385c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
27395c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2740b242e0e0SPaolo Bonzini             l = memory_access_size(mr, l, addr1);
2741d0ecd2aaSbellard         } else {
2742d0ecd2aaSbellard             /* ROM/RAM case */
27430878d0e1SPaolo Bonzini             ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2744582b55a9SAlexander Graf             switch (type) {
2745582b55a9SAlexander Graf             case WRITE_DATA:
2746d0ecd2aaSbellard                 memcpy(ptr, buf, l);
2747845b6214SPaolo Bonzini                 invalidate_and_set_dirty(mr, addr1, l);
2748582b55a9SAlexander Graf                 break;
2749582b55a9SAlexander Graf             case FLUSH_CACHE:
2750582b55a9SAlexander Graf                 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2751582b55a9SAlexander Graf                 break;
2752582b55a9SAlexander Graf             }
2753d0ecd2aaSbellard         }
2754d0ecd2aaSbellard         len -= l;
2755d0ecd2aaSbellard         buf += l;
2756d0ecd2aaSbellard         addr += l;
2757d0ecd2aaSbellard     }
275841063e1eSPaolo Bonzini     rcu_read_unlock();
2759d0ecd2aaSbellard }
2760d0ecd2aaSbellard 
2761582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
27622a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2763582b55a9SAlexander Graf                                    const uint8_t *buf, int len)
2764582b55a9SAlexander Graf {
27652a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2766582b55a9SAlexander Graf }
2767582b55a9SAlexander Graf 
2768582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len)
2769582b55a9SAlexander Graf {
2770582b55a9SAlexander Graf     /*
2771582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
2772582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
2773582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
2774582b55a9SAlexander Graf      * the host's instruction cache at least.
2775582b55a9SAlexander Graf      */
2776582b55a9SAlexander Graf     if (tcg_enabled()) {
2777582b55a9SAlexander Graf         return;
2778582b55a9SAlexander Graf     }
2779582b55a9SAlexander Graf 
27802a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(&address_space_memory,
27812a221651SEdgar E. Iglesias                                            start, NULL, len, FLUSH_CACHE);
2782582b55a9SAlexander Graf }
2783582b55a9SAlexander Graf 
27846d16c2f8Saliguori typedef struct {
2785d3e71559SPaolo Bonzini     MemoryRegion *mr;
27866d16c2f8Saliguori     void *buffer;
2787a8170e5eSAvi Kivity     hwaddr addr;
2788a8170e5eSAvi Kivity     hwaddr len;
2789c2cba0ffSFam Zheng     bool in_use;
27906d16c2f8Saliguori } BounceBuffer;
27916d16c2f8Saliguori 
27926d16c2f8Saliguori static BounceBuffer bounce;
27936d16c2f8Saliguori 
2794ba223c29Saliguori typedef struct MapClient {
2795e95205e1SFam Zheng     QEMUBH *bh;
279672cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2797ba223c29Saliguori } MapClient;
2798ba223c29Saliguori 
279938e047b5SFam Zheng QemuMutex map_client_list_lock;
280072cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
280172cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2802ba223c29Saliguori 
2803e95205e1SFam Zheng static void cpu_unregister_map_client_do(MapClient *client)
2804ba223c29Saliguori {
280572cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
28067267c094SAnthony Liguori     g_free(client);
2807ba223c29Saliguori }
2808ba223c29Saliguori 
280933b6c2edSFam Zheng static void cpu_notify_map_clients_locked(void)
2810ba223c29Saliguori {
2811ba223c29Saliguori     MapClient *client;
2812ba223c29Saliguori 
281372cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
281472cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2815e95205e1SFam Zheng         qemu_bh_schedule(client->bh);
2816e95205e1SFam Zheng         cpu_unregister_map_client_do(client);
2817ba223c29Saliguori     }
2818ba223c29Saliguori }
2819ba223c29Saliguori 
2820e95205e1SFam Zheng void cpu_register_map_client(QEMUBH *bh)
2821d0ecd2aaSbellard {
2822d0ecd2aaSbellard     MapClient *client = g_malloc(sizeof(*client));
2823d0ecd2aaSbellard 
282438e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2825e95205e1SFam Zheng     client->bh = bh;
2826d0ecd2aaSbellard     QLIST_INSERT_HEAD(&map_client_list, client, link);
282733b6c2edSFam Zheng     if (!atomic_read(&bounce.in_use)) {
282833b6c2edSFam Zheng         cpu_notify_map_clients_locked();
282933b6c2edSFam Zheng     }
283038e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2831d0ecd2aaSbellard }
2832d0ecd2aaSbellard 
283338e047b5SFam Zheng void cpu_exec_init_all(void)
283438e047b5SFam Zheng {
283538e047b5SFam Zheng     qemu_mutex_init(&ram_list.mutex);
283638e047b5SFam Zheng     io_mem_init();
2837680a4783SPaolo Bonzini     memory_map_init();
283838e047b5SFam Zheng     qemu_mutex_init(&map_client_list_lock);
283938e047b5SFam Zheng }
284038e047b5SFam Zheng 
2841e95205e1SFam Zheng void cpu_unregister_map_client(QEMUBH *bh)
2842d0ecd2aaSbellard {
2843e95205e1SFam Zheng     MapClient *client;
2844d0ecd2aaSbellard 
2845e95205e1SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2846e95205e1SFam Zheng     QLIST_FOREACH(client, &map_client_list, link) {
2847e95205e1SFam Zheng         if (client->bh == bh) {
2848e95205e1SFam Zheng             cpu_unregister_map_client_do(client);
2849e95205e1SFam Zheng             break;
2850e95205e1SFam Zheng         }
2851e95205e1SFam Zheng     }
2852e95205e1SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2853d0ecd2aaSbellard }
2854d0ecd2aaSbellard 
2855d0ecd2aaSbellard static void cpu_notify_map_clients(void)
2856d0ecd2aaSbellard {
285738e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
285833b6c2edSFam Zheng     cpu_notify_map_clients_locked();
285938e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
28606d16c2f8Saliguori }
28616d16c2f8Saliguori 
286251644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
286351644ab7SPaolo Bonzini {
28645c8a00ceSPaolo Bonzini     MemoryRegion *mr;
286551644ab7SPaolo Bonzini     hwaddr l, xlat;
286651644ab7SPaolo Bonzini 
286741063e1eSPaolo Bonzini     rcu_read_lock();
286851644ab7SPaolo Bonzini     while (len > 0) {
286951644ab7SPaolo Bonzini         l = len;
28705c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
28715c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
28725c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
28735c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
287451644ab7SPaolo Bonzini                 return false;
287551644ab7SPaolo Bonzini             }
287651644ab7SPaolo Bonzini         }
287751644ab7SPaolo Bonzini 
287851644ab7SPaolo Bonzini         len -= l;
287951644ab7SPaolo Bonzini         addr += l;
288051644ab7SPaolo Bonzini     }
288141063e1eSPaolo Bonzini     rcu_read_unlock();
288251644ab7SPaolo Bonzini     return true;
288351644ab7SPaolo Bonzini }
288451644ab7SPaolo Bonzini 
28856d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
28866d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
28876d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
28886d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2889ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2890ba223c29Saliguori  * likely to succeed.
28916d16c2f8Saliguori  */
2892ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2893a8170e5eSAvi Kivity                         hwaddr addr,
2894a8170e5eSAvi Kivity                         hwaddr *plen,
2895ac1970fbSAvi Kivity                         bool is_write)
28966d16c2f8Saliguori {
2897a8170e5eSAvi Kivity     hwaddr len = *plen;
2898e3127ae0SPaolo Bonzini     hwaddr done = 0;
2899e3127ae0SPaolo Bonzini     hwaddr l, xlat, base;
2900e3127ae0SPaolo Bonzini     MemoryRegion *mr, *this_mr;
2901e81bcda5SPaolo Bonzini     void *ptr;
29026d16c2f8Saliguori 
2903e3127ae0SPaolo Bonzini     if (len == 0) {
2904e3127ae0SPaolo Bonzini         return NULL;
2905e3127ae0SPaolo Bonzini     }
2906e3127ae0SPaolo Bonzini 
29076d16c2f8Saliguori     l = len;
290841063e1eSPaolo Bonzini     rcu_read_lock();
29095c8a00ceSPaolo Bonzini     mr = address_space_translate(as, addr, &xlat, &l, is_write);
291041063e1eSPaolo Bonzini 
29115c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
2912c2cba0ffSFam Zheng         if (atomic_xchg(&bounce.in_use, true)) {
291341063e1eSPaolo Bonzini             rcu_read_unlock();
2914e3127ae0SPaolo Bonzini             return NULL;
29156d16c2f8Saliguori         }
2916e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
2917e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
2918e85d9db5SKevin Wolf         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
29196d16c2f8Saliguori         bounce.addr = addr;
29206d16c2f8Saliguori         bounce.len = l;
2921d3e71559SPaolo Bonzini 
2922d3e71559SPaolo Bonzini         memory_region_ref(mr);
2923d3e71559SPaolo Bonzini         bounce.mr = mr;
29246d16c2f8Saliguori         if (!is_write) {
29255c9eb028SPeter Maydell             address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
29265c9eb028SPeter Maydell                                bounce.buffer, l);
29276d16c2f8Saliguori         }
292838bee5dcSStefano Stabellini 
292941063e1eSPaolo Bonzini         rcu_read_unlock();
293038bee5dcSStefano Stabellini         *plen = l;
293138bee5dcSStefano Stabellini         return bounce.buffer;
29326d16c2f8Saliguori     }
2933e3127ae0SPaolo Bonzini 
2934e3127ae0SPaolo Bonzini     base = xlat;
2935e3127ae0SPaolo Bonzini 
2936e3127ae0SPaolo Bonzini     for (;;) {
2937e3127ae0SPaolo Bonzini         len -= l;
2938e3127ae0SPaolo Bonzini         addr += l;
2939e3127ae0SPaolo Bonzini         done += l;
2940e3127ae0SPaolo Bonzini         if (len == 0) {
2941e3127ae0SPaolo Bonzini             break;
2942e3127ae0SPaolo Bonzini         }
2943e3127ae0SPaolo Bonzini 
2944e3127ae0SPaolo Bonzini         l = len;
2945e3127ae0SPaolo Bonzini         this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2946e3127ae0SPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
2947149f54b5SPaolo Bonzini             break;
2948149f54b5SPaolo Bonzini         }
29498ab934f9SStefano Stabellini     }
29506d16c2f8Saliguori 
2951d3e71559SPaolo Bonzini     memory_region_ref(mr);
2952e3127ae0SPaolo Bonzini     *plen = done;
29530878d0e1SPaolo Bonzini     ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
2954e81bcda5SPaolo Bonzini     rcu_read_unlock();
2955e81bcda5SPaolo Bonzini 
2956e81bcda5SPaolo Bonzini     return ptr;
29576d16c2f8Saliguori }
29586d16c2f8Saliguori 
2959ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
29606d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
29616d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
29626d16c2f8Saliguori  */
2963a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2964a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
29656d16c2f8Saliguori {
29666d16c2f8Saliguori     if (buffer != bounce.buffer) {
2967d3e71559SPaolo Bonzini         MemoryRegion *mr;
29687443b437SPaolo Bonzini         ram_addr_t addr1;
2969d3e71559SPaolo Bonzini 
297007bdaa41SPaolo Bonzini         mr = memory_region_from_host(buffer, &addr1);
29711b5ec234SPaolo Bonzini         assert(mr != NULL);
2972d3e71559SPaolo Bonzini         if (is_write) {
2973845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, access_len);
29746d16c2f8Saliguori         }
2975868bb33fSJan Kiszka         if (xen_enabled()) {
2976e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
2977050a0ddfSAnthony PERARD         }
2978d3e71559SPaolo Bonzini         memory_region_unref(mr);
29796d16c2f8Saliguori         return;
29806d16c2f8Saliguori     }
29816d16c2f8Saliguori     if (is_write) {
29825c9eb028SPeter Maydell         address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
29835c9eb028SPeter Maydell                             bounce.buffer, access_len);
29846d16c2f8Saliguori     }
2985f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
29866d16c2f8Saliguori     bounce.buffer = NULL;
2987d3e71559SPaolo Bonzini     memory_region_unref(bounce.mr);
2988c2cba0ffSFam Zheng     atomic_mb_set(&bounce.in_use, false);
2989ba223c29Saliguori     cpu_notify_map_clients();
29906d16c2f8Saliguori }
2991d0ecd2aaSbellard 
2992a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
2993a8170e5eSAvi Kivity                               hwaddr *plen,
2994ac1970fbSAvi Kivity                               int is_write)
2995ac1970fbSAvi Kivity {
2996ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
2997ac1970fbSAvi Kivity }
2998ac1970fbSAvi Kivity 
2999a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3000a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
3001ac1970fbSAvi Kivity {
3002ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3003ac1970fbSAvi Kivity }
3004ac1970fbSAvi Kivity 
30058df1cd07Sbellard /* warning: addr must be aligned */
300650013115SPeter Maydell static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
300750013115SPeter Maydell                                                   MemTxAttrs attrs,
300850013115SPeter Maydell                                                   MemTxResult *result,
30091e78bcc1SAlexander Graf                                                   enum device_endian endian)
30108df1cd07Sbellard {
30118df1cd07Sbellard     uint8_t *ptr;
3012791af8c8SPaolo Bonzini     uint64_t val;
30135c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3014149f54b5SPaolo Bonzini     hwaddr l = 4;
3015149f54b5SPaolo Bonzini     hwaddr addr1;
301650013115SPeter Maydell     MemTxResult r;
30174840f10eSJan Kiszka     bool release_lock = false;
30188df1cd07Sbellard 
301941063e1eSPaolo Bonzini     rcu_read_lock();
3020fdfba1a2SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, false);
30215c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
30224840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3023125b3806SPaolo Bonzini 
30248df1cd07Sbellard         /* I/O case */
302550013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
30261e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
30271e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
30281e78bcc1SAlexander Graf             val = bswap32(val);
30291e78bcc1SAlexander Graf         }
30301e78bcc1SAlexander Graf #else
30311e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
30321e78bcc1SAlexander Graf             val = bswap32(val);
30331e78bcc1SAlexander Graf         }
30341e78bcc1SAlexander Graf #endif
30358df1cd07Sbellard     } else {
30368df1cd07Sbellard         /* RAM case */
30370878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
30381e78bcc1SAlexander Graf         switch (endian) {
30391e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
30401e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
30411e78bcc1SAlexander Graf             break;
30421e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
30431e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
30441e78bcc1SAlexander Graf             break;
30451e78bcc1SAlexander Graf         default:
30468df1cd07Sbellard             val = ldl_p(ptr);
30471e78bcc1SAlexander Graf             break;
30481e78bcc1SAlexander Graf         }
304950013115SPeter Maydell         r = MEMTX_OK;
305050013115SPeter Maydell     }
305150013115SPeter Maydell     if (result) {
305250013115SPeter Maydell         *result = r;
30538df1cd07Sbellard     }
30544840f10eSJan Kiszka     if (release_lock) {
30554840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
30564840f10eSJan Kiszka     }
305741063e1eSPaolo Bonzini     rcu_read_unlock();
30588df1cd07Sbellard     return val;
30598df1cd07Sbellard }
30608df1cd07Sbellard 
306150013115SPeter Maydell uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
306250013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
306350013115SPeter Maydell {
306450013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
306550013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
306650013115SPeter Maydell }
306750013115SPeter Maydell 
306850013115SPeter Maydell uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
306950013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
307050013115SPeter Maydell {
307150013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
307250013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
307350013115SPeter Maydell }
307450013115SPeter Maydell 
307550013115SPeter Maydell uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
307650013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
307750013115SPeter Maydell {
307850013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
307950013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
308050013115SPeter Maydell }
308150013115SPeter Maydell 
3082fdfba1a2SEdgar E. Iglesias uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
30831e78bcc1SAlexander Graf {
308450013115SPeter Maydell     return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30851e78bcc1SAlexander Graf }
30861e78bcc1SAlexander Graf 
3087fdfba1a2SEdgar E. Iglesias uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
30881e78bcc1SAlexander Graf {
308950013115SPeter Maydell     return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30901e78bcc1SAlexander Graf }
30911e78bcc1SAlexander Graf 
3092fdfba1a2SEdgar E. Iglesias uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
30931e78bcc1SAlexander Graf {
309450013115SPeter Maydell     return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30951e78bcc1SAlexander Graf }
30961e78bcc1SAlexander Graf 
309784b7b8e7Sbellard /* warning: addr must be aligned */
309850013115SPeter Maydell static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
309950013115SPeter Maydell                                                   MemTxAttrs attrs,
310050013115SPeter Maydell                                                   MemTxResult *result,
31011e78bcc1SAlexander Graf                                                   enum device_endian endian)
310284b7b8e7Sbellard {
310384b7b8e7Sbellard     uint8_t *ptr;
310484b7b8e7Sbellard     uint64_t val;
31055c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3106149f54b5SPaolo Bonzini     hwaddr l = 8;
3107149f54b5SPaolo Bonzini     hwaddr addr1;
310850013115SPeter Maydell     MemTxResult r;
31094840f10eSJan Kiszka     bool release_lock = false;
311084b7b8e7Sbellard 
311141063e1eSPaolo Bonzini     rcu_read_lock();
31122c17449bSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3113149f54b5SPaolo Bonzini                                  false);
31145c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
31154840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3116125b3806SPaolo Bonzini 
311784b7b8e7Sbellard         /* I/O case */
311850013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3119968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
3120968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
3121968a5627SPaolo Bonzini             val = bswap64(val);
3122968a5627SPaolo Bonzini         }
3123968a5627SPaolo Bonzini #else
3124968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
3125968a5627SPaolo Bonzini             val = bswap64(val);
3126968a5627SPaolo Bonzini         }
3127968a5627SPaolo Bonzini #endif
312884b7b8e7Sbellard     } else {
312984b7b8e7Sbellard         /* RAM case */
31300878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
31311e78bcc1SAlexander Graf         switch (endian) {
31321e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
31331e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
31341e78bcc1SAlexander Graf             break;
31351e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
31361e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
31371e78bcc1SAlexander Graf             break;
31381e78bcc1SAlexander Graf         default:
313984b7b8e7Sbellard             val = ldq_p(ptr);
31401e78bcc1SAlexander Graf             break;
31411e78bcc1SAlexander Graf         }
314250013115SPeter Maydell         r = MEMTX_OK;
314350013115SPeter Maydell     }
314450013115SPeter Maydell     if (result) {
314550013115SPeter Maydell         *result = r;
314684b7b8e7Sbellard     }
31474840f10eSJan Kiszka     if (release_lock) {
31484840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
31494840f10eSJan Kiszka     }
315041063e1eSPaolo Bonzini     rcu_read_unlock();
315184b7b8e7Sbellard     return val;
315284b7b8e7Sbellard }
315384b7b8e7Sbellard 
315450013115SPeter Maydell uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
315550013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
315650013115SPeter Maydell {
315750013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
315850013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
315950013115SPeter Maydell }
316050013115SPeter Maydell 
316150013115SPeter Maydell uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
316250013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
316350013115SPeter Maydell {
316450013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
316550013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
316650013115SPeter Maydell }
316750013115SPeter Maydell 
316850013115SPeter Maydell uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
316950013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
317050013115SPeter Maydell {
317150013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
317250013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
317350013115SPeter Maydell }
317450013115SPeter Maydell 
31752c17449bSEdgar E. Iglesias uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
31761e78bcc1SAlexander Graf {
317750013115SPeter Maydell     return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31781e78bcc1SAlexander Graf }
31791e78bcc1SAlexander Graf 
31802c17449bSEdgar E. Iglesias uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
31811e78bcc1SAlexander Graf {
318250013115SPeter Maydell     return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31831e78bcc1SAlexander Graf }
31841e78bcc1SAlexander Graf 
31852c17449bSEdgar E. Iglesias uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
31861e78bcc1SAlexander Graf {
318750013115SPeter Maydell     return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31881e78bcc1SAlexander Graf }
31891e78bcc1SAlexander Graf 
3190aab33094Sbellard /* XXX: optimize */
319150013115SPeter Maydell uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
319250013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result)
3193aab33094Sbellard {
3194aab33094Sbellard     uint8_t val;
319550013115SPeter Maydell     MemTxResult r;
319650013115SPeter Maydell 
319750013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &val, 1, 0);
319850013115SPeter Maydell     if (result) {
319950013115SPeter Maydell         *result = r;
320050013115SPeter Maydell     }
3201aab33094Sbellard     return val;
3202aab33094Sbellard }
3203aab33094Sbellard 
320450013115SPeter Maydell uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
320550013115SPeter Maydell {
320650013115SPeter Maydell     return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
320750013115SPeter Maydell }
320850013115SPeter Maydell 
3209733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
321050013115SPeter Maydell static inline uint32_t address_space_lduw_internal(AddressSpace *as,
321150013115SPeter Maydell                                                    hwaddr addr,
321250013115SPeter Maydell                                                    MemTxAttrs attrs,
321350013115SPeter Maydell                                                    MemTxResult *result,
32141e78bcc1SAlexander Graf                                                    enum device_endian endian)
3215aab33094Sbellard {
3216733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3217733f0b02SMichael S. Tsirkin     uint64_t val;
32185c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3219149f54b5SPaolo Bonzini     hwaddr l = 2;
3220149f54b5SPaolo Bonzini     hwaddr addr1;
322150013115SPeter Maydell     MemTxResult r;
32224840f10eSJan Kiszka     bool release_lock = false;
3223733f0b02SMichael S. Tsirkin 
322441063e1eSPaolo Bonzini     rcu_read_lock();
322541701aa4SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3226149f54b5SPaolo Bonzini                                  false);
32275c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
32284840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3229125b3806SPaolo Bonzini 
3230733f0b02SMichael S. Tsirkin         /* I/O case */
323150013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
32321e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
32331e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
32341e78bcc1SAlexander Graf             val = bswap16(val);
32351e78bcc1SAlexander Graf         }
32361e78bcc1SAlexander Graf #else
32371e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
32381e78bcc1SAlexander Graf             val = bswap16(val);
32391e78bcc1SAlexander Graf         }
32401e78bcc1SAlexander Graf #endif
3241733f0b02SMichael S. Tsirkin     } else {
3242733f0b02SMichael S. Tsirkin         /* RAM case */
32430878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
32441e78bcc1SAlexander Graf         switch (endian) {
32451e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
32461e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
32471e78bcc1SAlexander Graf             break;
32481e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
32491e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
32501e78bcc1SAlexander Graf             break;
32511e78bcc1SAlexander Graf         default:
3252733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
32531e78bcc1SAlexander Graf             break;
32541e78bcc1SAlexander Graf         }
325550013115SPeter Maydell         r = MEMTX_OK;
325650013115SPeter Maydell     }
325750013115SPeter Maydell     if (result) {
325850013115SPeter Maydell         *result = r;
3259733f0b02SMichael S. Tsirkin     }
32604840f10eSJan Kiszka     if (release_lock) {
32614840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
32624840f10eSJan Kiszka     }
326341063e1eSPaolo Bonzini     rcu_read_unlock();
3264733f0b02SMichael S. Tsirkin     return val;
3265aab33094Sbellard }
3266aab33094Sbellard 
326750013115SPeter Maydell uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
326850013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
326950013115SPeter Maydell {
327050013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
327150013115SPeter Maydell                                        DEVICE_NATIVE_ENDIAN);
327250013115SPeter Maydell }
327350013115SPeter Maydell 
327450013115SPeter Maydell uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
327550013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
327650013115SPeter Maydell {
327750013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
327850013115SPeter Maydell                                        DEVICE_LITTLE_ENDIAN);
327950013115SPeter Maydell }
328050013115SPeter Maydell 
328150013115SPeter Maydell uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
328250013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
328350013115SPeter Maydell {
328450013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
328550013115SPeter Maydell                                        DEVICE_BIG_ENDIAN);
328650013115SPeter Maydell }
328750013115SPeter Maydell 
328841701aa4SEdgar E. Iglesias uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
32891e78bcc1SAlexander Graf {
329050013115SPeter Maydell     return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32911e78bcc1SAlexander Graf }
32921e78bcc1SAlexander Graf 
329341701aa4SEdgar E. Iglesias uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
32941e78bcc1SAlexander Graf {
329550013115SPeter Maydell     return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32961e78bcc1SAlexander Graf }
32971e78bcc1SAlexander Graf 
329841701aa4SEdgar E. Iglesias uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
32991e78bcc1SAlexander Graf {
330050013115SPeter Maydell     return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
33011e78bcc1SAlexander Graf }
33021e78bcc1SAlexander Graf 
33038df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
33048df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
33058df1cd07Sbellard    bits are used to track modified PTEs */
330650013115SPeter Maydell void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
330750013115SPeter Maydell                                 MemTxAttrs attrs, MemTxResult *result)
33088df1cd07Sbellard {
33098df1cd07Sbellard     uint8_t *ptr;
33105c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3311149f54b5SPaolo Bonzini     hwaddr l = 4;
3312149f54b5SPaolo Bonzini     hwaddr addr1;
331350013115SPeter Maydell     MemTxResult r;
3314845b6214SPaolo Bonzini     uint8_t dirty_log_mask;
33154840f10eSJan Kiszka     bool release_lock = false;
33168df1cd07Sbellard 
331741063e1eSPaolo Bonzini     rcu_read_lock();
33182198a121SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3319149f54b5SPaolo Bonzini                                  true);
33205c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
33214840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3322125b3806SPaolo Bonzini 
332350013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
33248df1cd07Sbellard     } else {
33250878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
33268df1cd07Sbellard         stl_p(ptr, val);
332774576198Saliguori 
3328845b6214SPaolo Bonzini         dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3329845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
33300878d0e1SPaolo Bonzini         cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
33310878d0e1SPaolo Bonzini                                             4, dirty_log_mask);
333250013115SPeter Maydell         r = MEMTX_OK;
333350013115SPeter Maydell     }
333450013115SPeter Maydell     if (result) {
333550013115SPeter Maydell         *result = r;
33368df1cd07Sbellard     }
33374840f10eSJan Kiszka     if (release_lock) {
33384840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
33394840f10eSJan Kiszka     }
334041063e1eSPaolo Bonzini     rcu_read_unlock();
33418df1cd07Sbellard }
33428df1cd07Sbellard 
334350013115SPeter Maydell void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
334450013115SPeter Maydell {
334550013115SPeter Maydell     address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
334650013115SPeter Maydell }
334750013115SPeter Maydell 
33488df1cd07Sbellard /* warning: addr must be aligned */
334950013115SPeter Maydell static inline void address_space_stl_internal(AddressSpace *as,
3350ab1da857SEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
335150013115SPeter Maydell                                               MemTxAttrs attrs,
335250013115SPeter Maydell                                               MemTxResult *result,
33531e78bcc1SAlexander Graf                                               enum device_endian endian)
33548df1cd07Sbellard {
33558df1cd07Sbellard     uint8_t *ptr;
33565c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3357149f54b5SPaolo Bonzini     hwaddr l = 4;
3358149f54b5SPaolo Bonzini     hwaddr addr1;
335950013115SPeter Maydell     MemTxResult r;
33604840f10eSJan Kiszka     bool release_lock = false;
33618df1cd07Sbellard 
336241063e1eSPaolo Bonzini     rcu_read_lock();
3363ab1da857SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3364149f54b5SPaolo Bonzini                                  true);
33655c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
33664840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3367125b3806SPaolo Bonzini 
33681e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
33691e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
33701e78bcc1SAlexander Graf             val = bswap32(val);
33711e78bcc1SAlexander Graf         }
33721e78bcc1SAlexander Graf #else
33731e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
33741e78bcc1SAlexander Graf             val = bswap32(val);
33751e78bcc1SAlexander Graf         }
33761e78bcc1SAlexander Graf #endif
337750013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
33788df1cd07Sbellard     } else {
33798df1cd07Sbellard         /* RAM case */
33800878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
33811e78bcc1SAlexander Graf         switch (endian) {
33821e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
33831e78bcc1SAlexander Graf             stl_le_p(ptr, val);
33841e78bcc1SAlexander Graf             break;
33851e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
33861e78bcc1SAlexander Graf             stl_be_p(ptr, val);
33871e78bcc1SAlexander Graf             break;
33881e78bcc1SAlexander Graf         default:
33898df1cd07Sbellard             stl_p(ptr, val);
33901e78bcc1SAlexander Graf             break;
33911e78bcc1SAlexander Graf         }
3392845b6214SPaolo Bonzini         invalidate_and_set_dirty(mr, addr1, 4);
339350013115SPeter Maydell         r = MEMTX_OK;
33948df1cd07Sbellard     }
339550013115SPeter Maydell     if (result) {
339650013115SPeter Maydell         *result = r;
339750013115SPeter Maydell     }
33984840f10eSJan Kiszka     if (release_lock) {
33994840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
34004840f10eSJan Kiszka     }
340141063e1eSPaolo Bonzini     rcu_read_unlock();
340250013115SPeter Maydell }
340350013115SPeter Maydell 
340450013115SPeter Maydell void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
340550013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
340650013115SPeter Maydell {
340750013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
340850013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
340950013115SPeter Maydell }
341050013115SPeter Maydell 
341150013115SPeter Maydell void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
341250013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
341350013115SPeter Maydell {
341450013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
341550013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
341650013115SPeter Maydell }
341750013115SPeter Maydell 
341850013115SPeter Maydell void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
341950013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
342050013115SPeter Maydell {
342150013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
342250013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
34233a7d929eSbellard }
34248df1cd07Sbellard 
3425ab1da857SEdgar E. Iglesias void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34261e78bcc1SAlexander Graf {
342750013115SPeter Maydell     address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34281e78bcc1SAlexander Graf }
34291e78bcc1SAlexander Graf 
3430ab1da857SEdgar E. Iglesias void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34311e78bcc1SAlexander Graf {
343250013115SPeter Maydell     address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34331e78bcc1SAlexander Graf }
34341e78bcc1SAlexander Graf 
3435ab1da857SEdgar E. Iglesias void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34361e78bcc1SAlexander Graf {
343750013115SPeter Maydell     address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34381e78bcc1SAlexander Graf }
34391e78bcc1SAlexander Graf 
3440aab33094Sbellard /* XXX: optimize */
344150013115SPeter Maydell void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
344250013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
3443aab33094Sbellard {
3444aab33094Sbellard     uint8_t v = val;
344550013115SPeter Maydell     MemTxResult r;
344650013115SPeter Maydell 
344750013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &v, 1, 1);
344850013115SPeter Maydell     if (result) {
344950013115SPeter Maydell         *result = r;
345050013115SPeter Maydell     }
345150013115SPeter Maydell }
345250013115SPeter Maydell 
345350013115SPeter Maydell void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
345450013115SPeter Maydell {
345550013115SPeter Maydell     address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3456aab33094Sbellard }
3457aab33094Sbellard 
3458733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
345950013115SPeter Maydell static inline void address_space_stw_internal(AddressSpace *as,
34605ce5944dSEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
346150013115SPeter Maydell                                               MemTxAttrs attrs,
346250013115SPeter Maydell                                               MemTxResult *result,
34631e78bcc1SAlexander Graf                                               enum device_endian endian)
3464aab33094Sbellard {
3465733f0b02SMichael S. Tsirkin     uint8_t *ptr;
34665c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3467149f54b5SPaolo Bonzini     hwaddr l = 2;
3468149f54b5SPaolo Bonzini     hwaddr addr1;
346950013115SPeter Maydell     MemTxResult r;
34704840f10eSJan Kiszka     bool release_lock = false;
3471733f0b02SMichael S. Tsirkin 
347241063e1eSPaolo Bonzini     rcu_read_lock();
34735ce5944dSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, true);
34745c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
34754840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3476125b3806SPaolo Bonzini 
34771e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
34781e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
34791e78bcc1SAlexander Graf             val = bswap16(val);
34801e78bcc1SAlexander Graf         }
34811e78bcc1SAlexander Graf #else
34821e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
34831e78bcc1SAlexander Graf             val = bswap16(val);
34841e78bcc1SAlexander Graf         }
34851e78bcc1SAlexander Graf #endif
348650013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3487733f0b02SMichael S. Tsirkin     } else {
3488733f0b02SMichael S. Tsirkin         /* RAM case */
34890878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
34901e78bcc1SAlexander Graf         switch (endian) {
34911e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
34921e78bcc1SAlexander Graf             stw_le_p(ptr, val);
34931e78bcc1SAlexander Graf             break;
34941e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
34951e78bcc1SAlexander Graf             stw_be_p(ptr, val);
34961e78bcc1SAlexander Graf             break;
34971e78bcc1SAlexander Graf         default:
3498733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
34991e78bcc1SAlexander Graf             break;
35001e78bcc1SAlexander Graf         }
3501845b6214SPaolo Bonzini         invalidate_and_set_dirty(mr, addr1, 2);
350250013115SPeter Maydell         r = MEMTX_OK;
3503733f0b02SMichael S. Tsirkin     }
350450013115SPeter Maydell     if (result) {
350550013115SPeter Maydell         *result = r;
350650013115SPeter Maydell     }
35074840f10eSJan Kiszka     if (release_lock) {
35084840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
35094840f10eSJan Kiszka     }
351041063e1eSPaolo Bonzini     rcu_read_unlock();
351150013115SPeter Maydell }
351250013115SPeter Maydell 
351350013115SPeter Maydell void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
351450013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
351550013115SPeter Maydell {
351650013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
351750013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
351850013115SPeter Maydell }
351950013115SPeter Maydell 
352050013115SPeter Maydell void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
352150013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
352250013115SPeter Maydell {
352350013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
352450013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
352550013115SPeter Maydell }
352650013115SPeter Maydell 
352750013115SPeter Maydell void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
352850013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
352950013115SPeter Maydell {
353050013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
353150013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
3532aab33094Sbellard }
3533aab33094Sbellard 
35345ce5944dSEdgar E. Iglesias void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35351e78bcc1SAlexander Graf {
353650013115SPeter Maydell     address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35371e78bcc1SAlexander Graf }
35381e78bcc1SAlexander Graf 
35395ce5944dSEdgar E. Iglesias void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35401e78bcc1SAlexander Graf {
354150013115SPeter Maydell     address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35421e78bcc1SAlexander Graf }
35431e78bcc1SAlexander Graf 
35445ce5944dSEdgar E. Iglesias void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35451e78bcc1SAlexander Graf {
354650013115SPeter Maydell     address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35471e78bcc1SAlexander Graf }
35481e78bcc1SAlexander Graf 
3549aab33094Sbellard /* XXX: optimize */
355050013115SPeter Maydell void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
355150013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
355250013115SPeter Maydell {
355350013115SPeter Maydell     MemTxResult r;
355450013115SPeter Maydell     val = tswap64(val);
355550013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
355650013115SPeter Maydell     if (result) {
355750013115SPeter Maydell         *result = r;
355850013115SPeter Maydell     }
355950013115SPeter Maydell }
356050013115SPeter Maydell 
356150013115SPeter Maydell void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
356250013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
356350013115SPeter Maydell {
356450013115SPeter Maydell     MemTxResult r;
356550013115SPeter Maydell     val = cpu_to_le64(val);
356650013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
356750013115SPeter Maydell     if (result) {
356850013115SPeter Maydell         *result = r;
356950013115SPeter Maydell     }
357050013115SPeter Maydell }
357150013115SPeter Maydell void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
357250013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
357350013115SPeter Maydell {
357450013115SPeter Maydell     MemTxResult r;
357550013115SPeter Maydell     val = cpu_to_be64(val);
357650013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
357750013115SPeter Maydell     if (result) {
357850013115SPeter Maydell         *result = r;
357950013115SPeter Maydell     }
358050013115SPeter Maydell }
358150013115SPeter Maydell 
3582f606604fSEdgar E. Iglesias void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3583aab33094Sbellard {
358450013115SPeter Maydell     address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3585aab33094Sbellard }
3586aab33094Sbellard 
3587f606604fSEdgar E. Iglesias void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
35881e78bcc1SAlexander Graf {
358950013115SPeter Maydell     address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35901e78bcc1SAlexander Graf }
35911e78bcc1SAlexander Graf 
3592f606604fSEdgar E. Iglesias void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
35931e78bcc1SAlexander Graf {
359450013115SPeter Maydell     address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35951e78bcc1SAlexander Graf }
35961e78bcc1SAlexander Graf 
35975e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
3598f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3599b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
360013eb76e0Sbellard {
360113eb76e0Sbellard     int l;
3602a8170e5eSAvi Kivity     hwaddr phys_addr;
36039b3c35e0Sj_mayer     target_ulong page;
360413eb76e0Sbellard 
360513eb76e0Sbellard     while (len > 0) {
36065232e4c7SPeter Maydell         int asidx;
36075232e4c7SPeter Maydell         MemTxAttrs attrs;
36085232e4c7SPeter Maydell 
360913eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
36105232e4c7SPeter Maydell         phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
36115232e4c7SPeter Maydell         asidx = cpu_asidx_from_attrs(cpu, attrs);
361213eb76e0Sbellard         /* if no physical page mapped, return an error */
361313eb76e0Sbellard         if (phys_addr == -1)
361413eb76e0Sbellard             return -1;
361513eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
361613eb76e0Sbellard         if (l > len)
361713eb76e0Sbellard             l = len;
36185e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
36192e38847bSEdgar E. Iglesias         if (is_write) {
36205232e4c7SPeter Maydell             cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
36215232e4c7SPeter Maydell                                           phys_addr, buf, l);
36222e38847bSEdgar E. Iglesias         } else {
36235232e4c7SPeter Maydell             address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
36245232e4c7SPeter Maydell                              MEMTXATTRS_UNSPECIFIED,
36255c9eb028SPeter Maydell                              buf, l, 0);
36262e38847bSEdgar E. Iglesias         }
362713eb76e0Sbellard         len -= l;
362813eb76e0Sbellard         buf += l;
362913eb76e0Sbellard         addr += l;
363013eb76e0Sbellard     }
363113eb76e0Sbellard     return 0;
363213eb76e0Sbellard }
3633038629a6SDr. David Alan Gilbert 
3634038629a6SDr. David Alan Gilbert /*
3635038629a6SDr. David Alan Gilbert  * Allows code that needs to deal with migration bitmaps etc to still be built
3636038629a6SDr. David Alan Gilbert  * target independent.
3637038629a6SDr. David Alan Gilbert  */
3638038629a6SDr. David Alan Gilbert size_t qemu_target_page_bits(void)
3639038629a6SDr. David Alan Gilbert {
3640038629a6SDr. David Alan Gilbert     return TARGET_PAGE_BITS;
3641038629a6SDr. David Alan Gilbert }
3642038629a6SDr. David Alan Gilbert 
3643a68fe89cSPaul Brook #endif
364413eb76e0Sbellard 
36458e4a424bSBlue Swirl /*
36468e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
36478e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
36488e4a424bSBlue Swirl  */
364998ed8ecfSGreg Kurz bool target_words_bigendian(void);
365098ed8ecfSGreg Kurz bool target_words_bigendian(void)
36518e4a424bSBlue Swirl {
36528e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
36538e4a424bSBlue Swirl     return true;
36548e4a424bSBlue Swirl #else
36558e4a424bSBlue Swirl     return false;
36568e4a424bSBlue Swirl #endif
36578e4a424bSBlue Swirl }
36588e4a424bSBlue Swirl 
365976f35538SWen Congyang #ifndef CONFIG_USER_ONLY
3660a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
366176f35538SWen Congyang {
36625c8a00ceSPaolo Bonzini     MemoryRegion*mr;
3663149f54b5SPaolo Bonzini     hwaddr l = 1;
366441063e1eSPaolo Bonzini     bool res;
366576f35538SWen Congyang 
366641063e1eSPaolo Bonzini     rcu_read_lock();
36675c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
3668149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
366976f35538SWen Congyang 
367041063e1eSPaolo Bonzini     res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
367141063e1eSPaolo Bonzini     rcu_read_unlock();
367241063e1eSPaolo Bonzini     return res;
367376f35538SWen Congyang }
3674bd2fa51fSMichael R. Hines 
3675e3807054SDr. David Alan Gilbert int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3676bd2fa51fSMichael R. Hines {
3677bd2fa51fSMichael R. Hines     RAMBlock *block;
3678e3807054SDr. David Alan Gilbert     int ret = 0;
3679bd2fa51fSMichael R. Hines 
36800dc3f44aSMike Day     rcu_read_lock();
36810dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3682e3807054SDr. David Alan Gilbert         ret = func(block->idstr, block->host, block->offset,
3683e3807054SDr. David Alan Gilbert                    block->used_length, opaque);
3684e3807054SDr. David Alan Gilbert         if (ret) {
3685e3807054SDr. David Alan Gilbert             break;
3686e3807054SDr. David Alan Gilbert         }
3687bd2fa51fSMichael R. Hines     }
36880dc3f44aSMike Day     rcu_read_unlock();
3689e3807054SDr. David Alan Gilbert     return ret;
3690bd2fa51fSMichael R. Hines }
3691ec3f8c99SPeter Maydell #endif
3692