xref: /qemu/system/physmem.c (revision 49dfcec40349245ad365964468b67e132c3cedc7)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20777872e5SStefan Weil #ifndef _WIN32
21a98d49b1Sbellard #include <sys/types.h>
22d5a8f07cSbellard #include <sys/mman.h>
23d5a8f07cSbellard #endif
2454936004Sbellard 
25055403b2SStefan Weil #include "qemu-common.h"
266180a181Sbellard #include "cpu.h"
27b67d9a52Sbellard #include "tcg.h"
28b3c7724cSpbrook #include "hw/hw.h"
294485bd26SMichael S. Tsirkin #if !defined(CONFIG_USER_ONLY)
3047c8ca53SMarcel Apfelbaum #include "hw/boards.h"
314485bd26SMichael S. Tsirkin #endif
32cc9e98cbSAlex Williamson #include "hw/qdev.h"
331de7afc9SPaolo Bonzini #include "qemu/osdep.h"
349c17d615SPaolo Bonzini #include "sysemu/kvm.h"
352ff3de68SMarkus Armbruster #include "sysemu/sysemu.h"
360d09e41aSPaolo Bonzini #include "hw/xen/xen.h"
371de7afc9SPaolo Bonzini #include "qemu/timer.h"
381de7afc9SPaolo Bonzini #include "qemu/config-file.h"
3975a34036SAndreas Färber #include "qemu/error-report.h"
40022c62cbSPaolo Bonzini #include "exec/memory.h"
419c17d615SPaolo Bonzini #include "sysemu/dma.h"
42022c62cbSPaolo Bonzini #include "exec/address-spaces.h"
4353a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4453a5960aSpbrook #include <qemu.h>
45432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
469c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
476506e4f9SStefano Stabellini #include "trace.h"
4853a5960aSpbrook #endif
490d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
500dc3f44aSMike Day #include "qemu/rcu_queue.h"
51022c62cbSPaolo Bonzini #include "exec/cputlb.h"
525b6dd868SBlue Swirl #include "translate-all.h"
530cac1b66SBlue Swirl 
54022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
55220c3ebdSJuan Quintela #include "exec/ram_addr.h"
5667d95c15SAvi Kivity 
57b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
58b35ba30fSMichael S. Tsirkin 
59db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
601196be37Sths 
6199773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
62981fdf23SJuan Quintela static bool in_migration;
6394a6b54fSpbrook 
640dc3f44aSMike Day /* ram_list is read under rcu_read_lock()/rcu_read_unlock().  Writes
650dc3f44aSMike Day  * are protected by the ramlist lock.
660dc3f44aSMike Day  */
670d53d9feSMike Day RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
6862152b8aSAvi Kivity 
6962152b8aSAvi Kivity static MemoryRegion *system_memory;
70309cb471SAvi Kivity static MemoryRegion *system_io;
7162152b8aSAvi Kivity 
72f6790af6SAvi Kivity AddressSpace address_space_io;
73f6790af6SAvi Kivity AddressSpace address_space_memory;
742673a5daSAvi Kivity 
750844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
76acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
770e0df1e2SAvi Kivity 
787bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
797bd4f430SPaolo Bonzini #define RAM_PREALLOC   (1 << 0)
807bd4f430SPaolo Bonzini 
81dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */
82dbcb8981SPaolo Bonzini #define RAM_SHARED     (1 << 1)
83dbcb8981SPaolo Bonzini 
8462be4e3aSMichael S. Tsirkin /* Only a portion of RAM (used_length) is actually used, and migrated.
8562be4e3aSMichael S. Tsirkin  * This used_length size can change across reboots.
8662be4e3aSMichael S. Tsirkin  */
8762be4e3aSMichael S. Tsirkin #define RAM_RESIZEABLE (1 << 2)
8862be4e3aSMichael S. Tsirkin 
89e2eef170Spbrook #endif
909fa3e853Sbellard 
91bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
926a00d601Sbellard /* current CPU in the current thread. It is only valid inside
936a00d601Sbellard    cpu_exec() */
944917cf44SAndreas Färber DEFINE_TLS(CPUState *, current_cpu);
952e70f6efSpbrook /* 0 = Do not count executed instructions.
96bf20dc07Sths    1 = Precise instruction counting.
972e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
985708fc66SPaolo Bonzini int use_icount;
996a00d601Sbellard 
100e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1014346ae3eSAvi Kivity 
1021db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
1031db8abb1SPaolo Bonzini 
1041db8abb1SPaolo Bonzini struct PhysPageEntry {
1059736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
1068b795765SMichael S. Tsirkin     uint32_t skip : 6;
1079736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
1088b795765SMichael S. Tsirkin     uint32_t ptr : 26;
1091db8abb1SPaolo Bonzini };
1101db8abb1SPaolo Bonzini 
1118b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
1128b795765SMichael S. Tsirkin 
11303f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
11457271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
11503f49957SPaolo Bonzini 
116026736ceSMichael S. Tsirkin #define P_L2_BITS 9
11703f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
11803f49957SPaolo Bonzini 
11903f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
12003f49957SPaolo Bonzini 
12103f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1220475d94fSPaolo Bonzini 
12353cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
12479e2b9aeSPaolo Bonzini     struct rcu_head rcu;
12579e2b9aeSPaolo Bonzini 
12653cb28cbSMarcel Apfelbaum     unsigned sections_nb;
12753cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
12853cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
12953cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
13053cb28cbSMarcel Apfelbaum     Node *nodes;
13153cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
13253cb28cbSMarcel Apfelbaum } PhysPageMap;
13353cb28cbSMarcel Apfelbaum 
1341db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
13579e2b9aeSPaolo Bonzini     struct rcu_head rcu;
13679e2b9aeSPaolo Bonzini 
1371db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1381db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1391db8abb1SPaolo Bonzini      */
1401db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
14153cb28cbSMarcel Apfelbaum     PhysPageMap map;
142acc9d80bSJan Kiszka     AddressSpace *as;
1431db8abb1SPaolo Bonzini };
1441db8abb1SPaolo Bonzini 
14590260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
14690260c6cSJan Kiszka typedef struct subpage_t {
14790260c6cSJan Kiszka     MemoryRegion iomem;
148acc9d80bSJan Kiszka     AddressSpace *as;
14990260c6cSJan Kiszka     hwaddr base;
15090260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
15190260c6cSJan Kiszka } subpage_t;
15290260c6cSJan Kiszka 
153b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
154b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1
155b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2
156b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3
1575312bd8bSAvi Kivity 
158e2eef170Spbrook static void io_mem_init(void);
15962152b8aSAvi Kivity static void memory_map_init(void);
16009daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener);
161e2eef170Spbrook 
1621ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
1636658ffb8Spbrook #endif
16454936004Sbellard 
1656d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
166d6f2ea22SAvi Kivity 
16753cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
168f7bf5461SAvi Kivity {
16953cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
17053cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
17153cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
17253cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
173f7bf5461SAvi Kivity     }
174f7bf5461SAvi Kivity }
175f7bf5461SAvi Kivity 
176db94604bSPaolo Bonzini static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
177d6f2ea22SAvi Kivity {
178d6f2ea22SAvi Kivity     unsigned i;
1798b795765SMichael S. Tsirkin     uint32_t ret;
180db94604bSPaolo Bonzini     PhysPageEntry e;
181db94604bSPaolo Bonzini     PhysPageEntry *p;
182d6f2ea22SAvi Kivity 
18353cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
184db94604bSPaolo Bonzini     p = map->nodes[ret];
185d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
18653cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
187db94604bSPaolo Bonzini 
188db94604bSPaolo Bonzini     e.skip = leaf ? 0 : 1;
189db94604bSPaolo Bonzini     e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
19003f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
191db94604bSPaolo Bonzini         memcpy(&p[i], &e, sizeof(e));
192d6f2ea22SAvi Kivity     }
193f7bf5461SAvi Kivity     return ret;
194d6f2ea22SAvi Kivity }
195d6f2ea22SAvi Kivity 
19653cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
19753cb28cbSMarcel Apfelbaum                                 hwaddr *index, hwaddr *nb, uint16_t leaf,
1982999097bSAvi Kivity                                 int level)
19992e873b9Sbellard {
200f7bf5461SAvi Kivity     PhysPageEntry *p;
20103f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
2025cd2c5b6SRichard Henderson 
2039736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
204db94604bSPaolo Bonzini         lp->ptr = phys_map_node_alloc(map, level == 0);
205db94604bSPaolo Bonzini     }
20653cb28cbSMarcel Apfelbaum     p = map->nodes[lp->ptr];
20703f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
208f7bf5461SAvi Kivity 
20903f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
21007f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2119736e55bSMichael S. Tsirkin             lp->skip = 0;
212c19e8800SAvi Kivity             lp->ptr = leaf;
21307f07b31SAvi Kivity             *index += step;
21407f07b31SAvi Kivity             *nb -= step;
215f7bf5461SAvi Kivity         } else {
21653cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2172999097bSAvi Kivity         }
2182999097bSAvi Kivity         ++lp;
219f7bf5461SAvi Kivity     }
2204346ae3eSAvi Kivity }
2215cd2c5b6SRichard Henderson 
222ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
223a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2242999097bSAvi Kivity                           uint16_t leaf)
225f7bf5461SAvi Kivity {
2262999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
22753cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
228f7bf5461SAvi Kivity 
22953cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
23092e873b9Sbellard }
23192e873b9Sbellard 
232b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
233b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
234b35ba30fSMichael S. Tsirkin  */
235b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
236b35ba30fSMichael S. Tsirkin {
237b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
238b35ba30fSMichael S. Tsirkin     int valid = 0;
239b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
240b35ba30fSMichael S. Tsirkin     int i;
241b35ba30fSMichael S. Tsirkin 
242b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
243b35ba30fSMichael S. Tsirkin         return;
244b35ba30fSMichael S. Tsirkin     }
245b35ba30fSMichael S. Tsirkin 
246b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
247b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
248b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
249b35ba30fSMichael S. Tsirkin             continue;
250b35ba30fSMichael S. Tsirkin         }
251b35ba30fSMichael S. Tsirkin 
252b35ba30fSMichael S. Tsirkin         valid_ptr = i;
253b35ba30fSMichael S. Tsirkin         valid++;
254b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
255b35ba30fSMichael S. Tsirkin             phys_page_compact(&p[i], nodes, compacted);
256b35ba30fSMichael S. Tsirkin         }
257b35ba30fSMichael S. Tsirkin     }
258b35ba30fSMichael S. Tsirkin 
259b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
260b35ba30fSMichael S. Tsirkin     if (valid != 1) {
261b35ba30fSMichael S. Tsirkin         return;
262b35ba30fSMichael S. Tsirkin     }
263b35ba30fSMichael S. Tsirkin 
264b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
265b35ba30fSMichael S. Tsirkin 
266b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
267b35ba30fSMichael S. Tsirkin     if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
268b35ba30fSMichael S. Tsirkin         return;
269b35ba30fSMichael S. Tsirkin     }
270b35ba30fSMichael S. Tsirkin 
271b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
272b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
273b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
274b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
275b35ba30fSMichael S. Tsirkin          * should never reach here.
276b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
277b35ba30fSMichael S. Tsirkin          * change this rule.
278b35ba30fSMichael S. Tsirkin          */
279b35ba30fSMichael S. Tsirkin         lp->skip = 0;
280b35ba30fSMichael S. Tsirkin     } else {
281b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
282b35ba30fSMichael S. Tsirkin     }
283b35ba30fSMichael S. Tsirkin }
284b35ba30fSMichael S. Tsirkin 
285b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
286b35ba30fSMichael S. Tsirkin {
287b35ba30fSMichael S. Tsirkin     DECLARE_BITMAP(compacted, nodes_nb);
288b35ba30fSMichael S. Tsirkin 
289b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
29053cb28cbSMarcel Apfelbaum         phys_page_compact(&d->phys_map, d->map.nodes, compacted);
291b35ba30fSMichael S. Tsirkin     }
292b35ba30fSMichael S. Tsirkin }
293b35ba30fSMichael S. Tsirkin 
29497115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
2959affd6fcSPaolo Bonzini                                            Node *nodes, MemoryRegionSection *sections)
29692e873b9Sbellard {
29731ab2b4aSAvi Kivity     PhysPageEntry *p;
29897115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
29931ab2b4aSAvi Kivity     int i;
300f1f6e3b8SAvi Kivity 
3019736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
302c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
3039affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
304f1f6e3b8SAvi Kivity         }
3059affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
30603f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
30731ab2b4aSAvi Kivity     }
308b35ba30fSMichael S. Tsirkin 
309b35ba30fSMichael S. Tsirkin     if (sections[lp.ptr].size.hi ||
310b35ba30fSMichael S. Tsirkin         range_covers_byte(sections[lp.ptr].offset_within_address_space,
311b35ba30fSMichael S. Tsirkin                           sections[lp.ptr].size.lo, addr)) {
3129affd6fcSPaolo Bonzini         return &sections[lp.ptr];
313b35ba30fSMichael S. Tsirkin     } else {
314b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
315b35ba30fSMichael S. Tsirkin     }
316f3705d53SAvi Kivity }
317f3705d53SAvi Kivity 
318e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
319e5548617SBlue Swirl {
3202a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
321e5548617SBlue Swirl         && mr != &io_mem_watch;
322e5548617SBlue Swirl }
323149f54b5SPaolo Bonzini 
32479e2b9aeSPaolo Bonzini /* Called from RCU critical section */
325c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
32690260c6cSJan Kiszka                                                         hwaddr addr,
32790260c6cSJan Kiszka                                                         bool resolve_subpage)
3289f029603SJan Kiszka {
32990260c6cSJan Kiszka     MemoryRegionSection *section;
33090260c6cSJan Kiszka     subpage_t *subpage;
33190260c6cSJan Kiszka 
33253cb28cbSMarcel Apfelbaum     section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
33390260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
33490260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
33553cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
33690260c6cSJan Kiszka     }
33790260c6cSJan Kiszka     return section;
3389f029603SJan Kiszka }
3399f029603SJan Kiszka 
34079e2b9aeSPaolo Bonzini /* Called from RCU critical section */
34190260c6cSJan Kiszka static MemoryRegionSection *
342c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
34390260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
344149f54b5SPaolo Bonzini {
345149f54b5SPaolo Bonzini     MemoryRegionSection *section;
346a87f3954SPaolo Bonzini     Int128 diff;
347149f54b5SPaolo Bonzini 
348c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
349149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
350149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
351149f54b5SPaolo Bonzini 
352149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
353149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
354149f54b5SPaolo Bonzini 
355149f54b5SPaolo Bonzini     diff = int128_sub(section->mr->size, int128_make64(addr));
3563752a036SPeter Maydell     *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
357149f54b5SPaolo Bonzini     return section;
358149f54b5SPaolo Bonzini }
35990260c6cSJan Kiszka 
360a87f3954SPaolo Bonzini static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
361a87f3954SPaolo Bonzini {
362a87f3954SPaolo Bonzini     if (memory_region_is_ram(mr)) {
363a87f3954SPaolo Bonzini         return !(is_write && mr->readonly);
364a87f3954SPaolo Bonzini     }
365a87f3954SPaolo Bonzini     if (memory_region_is_romd(mr)) {
366a87f3954SPaolo Bonzini         return !is_write;
367a87f3954SPaolo Bonzini     }
368a87f3954SPaolo Bonzini 
369a87f3954SPaolo Bonzini     return false;
370a87f3954SPaolo Bonzini }
371a87f3954SPaolo Bonzini 
37241063e1eSPaolo Bonzini /* Called from RCU critical section */
3735c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
37490260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
37590260c6cSJan Kiszka                                       bool is_write)
37690260c6cSJan Kiszka {
37730951157SAvi Kivity     IOMMUTLBEntry iotlb;
37830951157SAvi Kivity     MemoryRegionSection *section;
37930951157SAvi Kivity     MemoryRegion *mr;
38030951157SAvi Kivity 
38130951157SAvi Kivity     for (;;) {
38279e2b9aeSPaolo Bonzini         AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
38379e2b9aeSPaolo Bonzini         section = address_space_translate_internal(d, addr, &addr, plen, true);
38430951157SAvi Kivity         mr = section->mr;
38530951157SAvi Kivity 
38630951157SAvi Kivity         if (!mr->iommu_ops) {
38730951157SAvi Kivity             break;
38830951157SAvi Kivity         }
38930951157SAvi Kivity 
3908d7b8cb9SLe Tan         iotlb = mr->iommu_ops->translate(mr, addr, is_write);
39130951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
39230951157SAvi Kivity                 | (addr & iotlb.addr_mask));
39323820dbfSPeter Crosthwaite         *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
39430951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
39530951157SAvi Kivity             mr = &io_mem_unassigned;
39630951157SAvi Kivity             break;
39730951157SAvi Kivity         }
39830951157SAvi Kivity 
39930951157SAvi Kivity         as = iotlb.target_as;
40030951157SAvi Kivity     }
40130951157SAvi Kivity 
402fe680d0dSAlexey Kardashevskiy     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
403a87f3954SPaolo Bonzini         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
40423820dbfSPeter Crosthwaite         *plen = MIN(page, *plen);
405a87f3954SPaolo Bonzini     }
406a87f3954SPaolo Bonzini 
40730951157SAvi Kivity     *xlat = addr;
40830951157SAvi Kivity     return mr;
40990260c6cSJan Kiszka }
41090260c6cSJan Kiszka 
41179e2b9aeSPaolo Bonzini /* Called from RCU critical section */
41290260c6cSJan Kiszka MemoryRegionSection *
4139d82b5a7SPaolo Bonzini address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
4149d82b5a7SPaolo Bonzini                                   hwaddr *xlat, hwaddr *plen)
41590260c6cSJan Kiszka {
41630951157SAvi Kivity     MemoryRegionSection *section;
4179d82b5a7SPaolo Bonzini     section = address_space_translate_internal(cpu->memory_dispatch,
4189d82b5a7SPaolo Bonzini                                                addr, xlat, plen, false);
41930951157SAvi Kivity 
42030951157SAvi Kivity     assert(!section->mr->iommu_ops);
42130951157SAvi Kivity     return section;
42290260c6cSJan Kiszka }
4239fa3e853Sbellard #endif
424fd6ce8f6Sbellard 
425b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
4269656f324Spbrook 
427e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
428e7f4eff7SJuan Quintela {
429259186a7SAndreas Färber     CPUState *cpu = opaque;
430e7f4eff7SJuan Quintela 
4313098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
4323098dba0Saurel32        version_id is increased. */
433259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
434c01a71c1SChristian Borntraeger     tlb_flush(cpu, 1);
4359656f324Spbrook 
4369656f324Spbrook     return 0;
4379656f324Spbrook }
438e7f4eff7SJuan Quintela 
4396c3bff0eSPavel Dovgaluk static int cpu_common_pre_load(void *opaque)
4406c3bff0eSPavel Dovgaluk {
4416c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4426c3bff0eSPavel Dovgaluk 
443adee6424SPaolo Bonzini     cpu->exception_index = -1;
4446c3bff0eSPavel Dovgaluk 
4456c3bff0eSPavel Dovgaluk     return 0;
4466c3bff0eSPavel Dovgaluk }
4476c3bff0eSPavel Dovgaluk 
4486c3bff0eSPavel Dovgaluk static bool cpu_common_exception_index_needed(void *opaque)
4496c3bff0eSPavel Dovgaluk {
4506c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4516c3bff0eSPavel Dovgaluk 
452adee6424SPaolo Bonzini     return tcg_enabled() && cpu->exception_index != -1;
4536c3bff0eSPavel Dovgaluk }
4546c3bff0eSPavel Dovgaluk 
4556c3bff0eSPavel Dovgaluk static const VMStateDescription vmstate_cpu_common_exception_index = {
4566c3bff0eSPavel Dovgaluk     .name = "cpu_common/exception_index",
4576c3bff0eSPavel Dovgaluk     .version_id = 1,
4586c3bff0eSPavel Dovgaluk     .minimum_version_id = 1,
4596c3bff0eSPavel Dovgaluk     .fields = (VMStateField[]) {
4606c3bff0eSPavel Dovgaluk         VMSTATE_INT32(exception_index, CPUState),
4616c3bff0eSPavel Dovgaluk         VMSTATE_END_OF_LIST()
4626c3bff0eSPavel Dovgaluk     }
4636c3bff0eSPavel Dovgaluk };
4646c3bff0eSPavel Dovgaluk 
4651a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
466e7f4eff7SJuan Quintela     .name = "cpu_common",
467e7f4eff7SJuan Quintela     .version_id = 1,
468e7f4eff7SJuan Quintela     .minimum_version_id = 1,
4696c3bff0eSPavel Dovgaluk     .pre_load = cpu_common_pre_load,
470e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
471e7f4eff7SJuan Quintela     .fields = (VMStateField[]) {
472259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
473259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
474e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
4756c3bff0eSPavel Dovgaluk     },
4766c3bff0eSPavel Dovgaluk     .subsections = (VMStateSubsection[]) {
4776c3bff0eSPavel Dovgaluk         {
4786c3bff0eSPavel Dovgaluk             .vmsd = &vmstate_cpu_common_exception_index,
4796c3bff0eSPavel Dovgaluk             .needed = cpu_common_exception_index_needed,
4806c3bff0eSPavel Dovgaluk         } , {
4816c3bff0eSPavel Dovgaluk             /* empty */
4826c3bff0eSPavel Dovgaluk         }
483e7f4eff7SJuan Quintela     }
484e7f4eff7SJuan Quintela };
4851a1562f5SAndreas Färber 
4869656f324Spbrook #endif
4879656f324Spbrook 
48838d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
489950f1472SGlauber Costa {
490bdc44640SAndreas Färber     CPUState *cpu;
491950f1472SGlauber Costa 
492bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
49355e5c285SAndreas Färber         if (cpu->cpu_index == index) {
494bdc44640SAndreas Färber             return cpu;
49555e5c285SAndreas Färber         }
496950f1472SGlauber Costa     }
497950f1472SGlauber Costa 
498bdc44640SAndreas Färber     return NULL;
499950f1472SGlauber Costa }
500950f1472SGlauber Costa 
50109daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY)
50209daed84SEdgar E. Iglesias void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
50309daed84SEdgar E. Iglesias {
50409daed84SEdgar E. Iglesias     /* We only support one address space per cpu at the moment.  */
50509daed84SEdgar E. Iglesias     assert(cpu->as == as);
50609daed84SEdgar E. Iglesias 
50709daed84SEdgar E. Iglesias     if (cpu->tcg_as_listener) {
50809daed84SEdgar E. Iglesias         memory_listener_unregister(cpu->tcg_as_listener);
50909daed84SEdgar E. Iglesias     } else {
51009daed84SEdgar E. Iglesias         cpu->tcg_as_listener = g_new0(MemoryListener, 1);
51109daed84SEdgar E. Iglesias     }
51209daed84SEdgar E. Iglesias     cpu->tcg_as_listener->commit = tcg_commit;
51309daed84SEdgar E. Iglesias     memory_listener_register(cpu->tcg_as_listener, as);
51409daed84SEdgar E. Iglesias }
51509daed84SEdgar E. Iglesias #endif
51609daed84SEdgar E. Iglesias 
5179349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env)
518fd6ce8f6Sbellard {
5199f09e18aSAndreas Färber     CPUState *cpu = ENV_GET_CPU(env);
520b170fce3SAndreas Färber     CPUClass *cc = CPU_GET_CLASS(cpu);
521bdc44640SAndreas Färber     CPUState *some_cpu;
5226a00d601Sbellard     int cpu_index;
5236a00d601Sbellard 
524c2764719Spbrook #if defined(CONFIG_USER_ONLY)
525c2764719Spbrook     cpu_list_lock();
526c2764719Spbrook #endif
5276a00d601Sbellard     cpu_index = 0;
528bdc44640SAndreas Färber     CPU_FOREACH(some_cpu) {
5296a00d601Sbellard         cpu_index++;
5306a00d601Sbellard     }
53155e5c285SAndreas Färber     cpu->cpu_index = cpu_index;
5321b1ed8dcSAndreas Färber     cpu->numa_node = 0;
533f0c3c505SAndreas Färber     QTAILQ_INIT(&cpu->breakpoints);
534ff4700b0SAndreas Färber     QTAILQ_INIT(&cpu->watchpoints);
535dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
53609daed84SEdgar E. Iglesias     cpu->as = &address_space_memory;
5379f09e18aSAndreas Färber     cpu->thread_id = qemu_get_thread_id();
538cba70549SPaolo Bonzini     cpu_reload_memory_map(cpu);
539dc7a09cfSJan Kiszka #endif
540bdc44640SAndreas Färber     QTAILQ_INSERT_TAIL(&cpus, cpu, node);
541c2764719Spbrook #if defined(CONFIG_USER_ONLY)
542c2764719Spbrook     cpu_list_unlock();
543c2764719Spbrook #endif
544e0d47944SAndreas Färber     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
545259186a7SAndreas Färber         vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
546e0d47944SAndreas Färber     }
547b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5480be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
549b3c7724cSpbrook                     cpu_save, cpu_load, env);
550b170fce3SAndreas Färber     assert(cc->vmsd == NULL);
551e0d47944SAndreas Färber     assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
552b3c7724cSpbrook #endif
553b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
554b170fce3SAndreas Färber         vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
555b170fce3SAndreas Färber     }
556fd6ce8f6Sbellard }
557fd6ce8f6Sbellard 
55894df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
55900b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
56094df27fdSPaul Brook {
56194df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
56294df27fdSPaul Brook }
56394df27fdSPaul Brook #else
56400b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
5651e7855a5SMax Filippov {
566e8262a1bSMax Filippov     hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
567e8262a1bSMax Filippov     if (phys != -1) {
56809daed84SEdgar E. Iglesias         tb_invalidate_phys_addr(cpu->as,
56929d8ec7bSEdgar E. Iglesias                                 phys | (pc & ~TARGET_PAGE_MASK));
570e8262a1bSMax Filippov     }
5711e7855a5SMax Filippov }
572c27004ecSbellard #endif
573d720b93dSbellard 
574c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
57575a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
576c527ee8fSPaul Brook 
577c527ee8fSPaul Brook {
578c527ee8fSPaul Brook }
579c527ee8fSPaul Brook 
5803ee887e8SPeter Maydell int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
5813ee887e8SPeter Maydell                           int flags)
5823ee887e8SPeter Maydell {
5833ee887e8SPeter Maydell     return -ENOSYS;
5843ee887e8SPeter Maydell }
5853ee887e8SPeter Maydell 
5863ee887e8SPeter Maydell void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
5873ee887e8SPeter Maydell {
5883ee887e8SPeter Maydell }
5893ee887e8SPeter Maydell 
59075a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
591c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
592c527ee8fSPaul Brook {
593c527ee8fSPaul Brook     return -ENOSYS;
594c527ee8fSPaul Brook }
595c527ee8fSPaul Brook #else
5966658ffb8Spbrook /* Add a watchpoint.  */
59775a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
598a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
5996658ffb8Spbrook {
600c0ce998eSaliguori     CPUWatchpoint *wp;
6016658ffb8Spbrook 
60205068c0dSPeter Maydell     /* forbid ranges which are empty or run off the end of the address space */
60307e2863dSMax Filippov     if (len == 0 || (addr + len - 1) < addr) {
60475a34036SAndreas Färber         error_report("tried to set invalid watchpoint at %"
60575a34036SAndreas Färber                      VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
606b4051334Saliguori         return -EINVAL;
607b4051334Saliguori     }
6087267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
6096658ffb8Spbrook 
610a1d1bb31Saliguori     wp->vaddr = addr;
61105068c0dSPeter Maydell     wp->len = len;
612a1d1bb31Saliguori     wp->flags = flags;
613a1d1bb31Saliguori 
6142dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
615ff4700b0SAndreas Färber     if (flags & BP_GDB) {
616ff4700b0SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
617ff4700b0SAndreas Färber     } else {
618ff4700b0SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
619ff4700b0SAndreas Färber     }
620a1d1bb31Saliguori 
62131b030d4SAndreas Färber     tlb_flush_page(cpu, addr);
622a1d1bb31Saliguori 
623a1d1bb31Saliguori     if (watchpoint)
624a1d1bb31Saliguori         *watchpoint = wp;
625a1d1bb31Saliguori     return 0;
6266658ffb8Spbrook }
6276658ffb8Spbrook 
628a1d1bb31Saliguori /* Remove a specific watchpoint.  */
62975a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
630a1d1bb31Saliguori                           int flags)
6316658ffb8Spbrook {
632a1d1bb31Saliguori     CPUWatchpoint *wp;
6336658ffb8Spbrook 
634ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
63505068c0dSPeter Maydell         if (addr == wp->vaddr && len == wp->len
6366e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
63775a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
6386658ffb8Spbrook             return 0;
6396658ffb8Spbrook         }
6406658ffb8Spbrook     }
641a1d1bb31Saliguori     return -ENOENT;
6426658ffb8Spbrook }
6436658ffb8Spbrook 
644a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
64575a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
646a1d1bb31Saliguori {
647ff4700b0SAndreas Färber     QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
6487d03f82fSedgar_igl 
64931b030d4SAndreas Färber     tlb_flush_page(cpu, watchpoint->vaddr);
650a1d1bb31Saliguori 
6517267c094SAnthony Liguori     g_free(watchpoint);
6527d03f82fSedgar_igl }
6537d03f82fSedgar_igl 
654a1d1bb31Saliguori /* Remove all matching watchpoints.  */
65575a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
656a1d1bb31Saliguori {
657c0ce998eSaliguori     CPUWatchpoint *wp, *next;
658a1d1bb31Saliguori 
659ff4700b0SAndreas Färber     QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
66075a34036SAndreas Färber         if (wp->flags & mask) {
66175a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
66275a34036SAndreas Färber         }
663a1d1bb31Saliguori     }
664c0ce998eSaliguori }
66505068c0dSPeter Maydell 
66605068c0dSPeter Maydell /* Return true if this watchpoint address matches the specified
66705068c0dSPeter Maydell  * access (ie the address range covered by the watchpoint overlaps
66805068c0dSPeter Maydell  * partially or completely with the address range covered by the
66905068c0dSPeter Maydell  * access).
67005068c0dSPeter Maydell  */
67105068c0dSPeter Maydell static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
67205068c0dSPeter Maydell                                                   vaddr addr,
67305068c0dSPeter Maydell                                                   vaddr len)
67405068c0dSPeter Maydell {
67505068c0dSPeter Maydell     /* We know the lengths are non-zero, but a little caution is
67605068c0dSPeter Maydell      * required to avoid errors in the case where the range ends
67705068c0dSPeter Maydell      * exactly at the top of the address space and so addr + len
67805068c0dSPeter Maydell      * wraps round to zero.
67905068c0dSPeter Maydell      */
68005068c0dSPeter Maydell     vaddr wpend = wp->vaddr + wp->len - 1;
68105068c0dSPeter Maydell     vaddr addrend = addr + len - 1;
68205068c0dSPeter Maydell 
68305068c0dSPeter Maydell     return !(addr > wpend || wp->vaddr > addrend);
68405068c0dSPeter Maydell }
68505068c0dSPeter Maydell 
686c527ee8fSPaul Brook #endif
687a1d1bb31Saliguori 
688a1d1bb31Saliguori /* Add a breakpoint.  */
689b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
690a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
6914c3a88a2Sbellard {
692c0ce998eSaliguori     CPUBreakpoint *bp;
6934c3a88a2Sbellard 
6947267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
6954c3a88a2Sbellard 
696a1d1bb31Saliguori     bp->pc = pc;
697a1d1bb31Saliguori     bp->flags = flags;
698a1d1bb31Saliguori 
6992dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
70000b941e5SAndreas Färber     if (flags & BP_GDB) {
701f0c3c505SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
70200b941e5SAndreas Färber     } else {
703f0c3c505SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
70400b941e5SAndreas Färber     }
705d720b93dSbellard 
706f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, pc);
707a1d1bb31Saliguori 
70800b941e5SAndreas Färber     if (breakpoint) {
709a1d1bb31Saliguori         *breakpoint = bp;
71000b941e5SAndreas Färber     }
7114c3a88a2Sbellard     return 0;
7124c3a88a2Sbellard }
7134c3a88a2Sbellard 
714a1d1bb31Saliguori /* Remove a specific breakpoint.  */
715b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
716a1d1bb31Saliguori {
717a1d1bb31Saliguori     CPUBreakpoint *bp;
718a1d1bb31Saliguori 
719f0c3c505SAndreas Färber     QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
720a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
721b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
722a1d1bb31Saliguori             return 0;
7237d03f82fSedgar_igl         }
724a1d1bb31Saliguori     }
725a1d1bb31Saliguori     return -ENOENT;
7267d03f82fSedgar_igl }
7277d03f82fSedgar_igl 
728a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
729b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
7304c3a88a2Sbellard {
731f0c3c505SAndreas Färber     QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
732f0c3c505SAndreas Färber 
733f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, breakpoint->pc);
734a1d1bb31Saliguori 
7357267c094SAnthony Liguori     g_free(breakpoint);
736a1d1bb31Saliguori }
737a1d1bb31Saliguori 
738a1d1bb31Saliguori /* Remove all matching breakpoints. */
739b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
740a1d1bb31Saliguori {
741c0ce998eSaliguori     CPUBreakpoint *bp, *next;
742a1d1bb31Saliguori 
743f0c3c505SAndreas Färber     QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
744b3310ab3SAndreas Färber         if (bp->flags & mask) {
745b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
746b3310ab3SAndreas Färber         }
747c0ce998eSaliguori     }
7484c3a88a2Sbellard }
7494c3a88a2Sbellard 
750c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
751c33a346eSbellard    CPU loop after each instruction */
7523825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled)
753c33a346eSbellard {
754ed2803daSAndreas Färber     if (cpu->singlestep_enabled != enabled) {
755ed2803daSAndreas Färber         cpu->singlestep_enabled = enabled;
756ed2803daSAndreas Färber         if (kvm_enabled()) {
75738e478ecSStefan Weil             kvm_update_guest_debug(cpu, 0);
758ed2803daSAndreas Färber         } else {
759ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
7609fa3e853Sbellard             /* XXX: only flush what is necessary */
76138e478ecSStefan Weil             CPUArchState *env = cpu->env_ptr;
7620124311eSbellard             tb_flush(env);
763c33a346eSbellard         }
764e22a25c9Saliguori     }
765c33a346eSbellard }
766c33a346eSbellard 
767a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...)
7687501267eSbellard {
7697501267eSbellard     va_list ap;
770493ae1f0Spbrook     va_list ap2;
7717501267eSbellard 
7727501267eSbellard     va_start(ap, fmt);
773493ae1f0Spbrook     va_copy(ap2, ap);
7747501267eSbellard     fprintf(stderr, "qemu: fatal: ");
7757501267eSbellard     vfprintf(stderr, fmt, ap);
7767501267eSbellard     fprintf(stderr, "\n");
777878096eeSAndreas Färber     cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
77893fcfe39Saliguori     if (qemu_log_enabled()) {
77993fcfe39Saliguori         qemu_log("qemu: fatal: ");
78093fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
78193fcfe39Saliguori         qemu_log("\n");
782a0762859SAndreas Färber         log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
78331b1a7b4Saliguori         qemu_log_flush();
78493fcfe39Saliguori         qemu_log_close();
785924edcaeSbalrog     }
786493ae1f0Spbrook     va_end(ap2);
787f9373291Sj_mayer     va_end(ap);
788fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
789fd052bf6SRiku Voipio     {
790fd052bf6SRiku Voipio         struct sigaction act;
791fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
792fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
793fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
794fd052bf6SRiku Voipio     }
795fd052bf6SRiku Voipio #endif
7967501267eSbellard     abort();
7977501267eSbellard }
7987501267eSbellard 
7990124311eSbellard #if !defined(CONFIG_USER_ONLY)
8000dc3f44aSMike Day /* Called from RCU critical section */
801041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
802041603feSPaolo Bonzini {
803041603feSPaolo Bonzini     RAMBlock *block;
804041603feSPaolo Bonzini 
80543771539SPaolo Bonzini     block = atomic_rcu_read(&ram_list.mru_block);
8069b8424d5SMichael S. Tsirkin     if (block && addr - block->offset < block->max_length) {
807041603feSPaolo Bonzini         goto found;
808041603feSPaolo Bonzini     }
8090dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
8109b8424d5SMichael S. Tsirkin         if (addr - block->offset < block->max_length) {
811041603feSPaolo Bonzini             goto found;
812041603feSPaolo Bonzini         }
813041603feSPaolo Bonzini     }
814041603feSPaolo Bonzini 
815041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
816041603feSPaolo Bonzini     abort();
817041603feSPaolo Bonzini 
818041603feSPaolo Bonzini found:
81943771539SPaolo Bonzini     /* It is safe to write mru_block outside the iothread lock.  This
82043771539SPaolo Bonzini      * is what happens:
82143771539SPaolo Bonzini      *
82243771539SPaolo Bonzini      *     mru_block = xxx
82343771539SPaolo Bonzini      *     rcu_read_unlock()
82443771539SPaolo Bonzini      *                                        xxx removed from list
82543771539SPaolo Bonzini      *                  rcu_read_lock()
82643771539SPaolo Bonzini      *                  read mru_block
82743771539SPaolo Bonzini      *                                        mru_block = NULL;
82843771539SPaolo Bonzini      *                                        call_rcu(reclaim_ramblock, xxx);
82943771539SPaolo Bonzini      *                  rcu_read_unlock()
83043771539SPaolo Bonzini      *
83143771539SPaolo Bonzini      * atomic_rcu_set is not needed here.  The block was already published
83243771539SPaolo Bonzini      * when it was placed into the list.  Here we're just making an extra
83343771539SPaolo Bonzini      * copy of the pointer.
83443771539SPaolo Bonzini      */
835041603feSPaolo Bonzini     ram_list.mru_block = block;
836041603feSPaolo Bonzini     return block;
837041603feSPaolo Bonzini }
838041603feSPaolo Bonzini 
839a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
8401ccde1cbSbellard {
841041603feSPaolo Bonzini     ram_addr_t start1;
842a2f4d5beSJuan Quintela     RAMBlock *block;
843a2f4d5beSJuan Quintela     ram_addr_t end;
844a2f4d5beSJuan Quintela 
845a2f4d5beSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length);
846a2f4d5beSJuan Quintela     start &= TARGET_PAGE_MASK;
847f23db169Sbellard 
8480dc3f44aSMike Day     rcu_read_lock();
849041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
850041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
8511240be24SMichael S. Tsirkin     start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
852e5548617SBlue Swirl     cpu_tlb_reset_dirty_all(start1, length);
8530dc3f44aSMike Day     rcu_read_unlock();
854d24981d3SJuan Quintela }
855d24981d3SJuan Quintela 
856d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
857a2f4d5beSJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
85852159192SJuan Quintela                                      unsigned client)
859d24981d3SJuan Quintela {
860d24981d3SJuan Quintela     if (length == 0)
861d24981d3SJuan Quintela         return;
862c8d6f66aSMichael S. Tsirkin     cpu_physical_memory_clear_dirty_range_type(start, length, client);
863d24981d3SJuan Quintela 
864d24981d3SJuan Quintela     if (tcg_enabled()) {
865a2f4d5beSJuan Quintela         tlb_reset_dirty_range_all(start, length);
866d24981d3SJuan Quintela     }
8671ccde1cbSbellard }
8681ccde1cbSbellard 
869981fdf23SJuan Quintela static void cpu_physical_memory_set_dirty_tracking(bool enable)
87074576198Saliguori {
87174576198Saliguori     in_migration = enable;
87274576198Saliguori }
87374576198Saliguori 
87479e2b9aeSPaolo Bonzini /* Called from RCU critical section */
875bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu,
876e5548617SBlue Swirl                                        MemoryRegionSection *section,
877e5548617SBlue Swirl                                        target_ulong vaddr,
878149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
879e5548617SBlue Swirl                                        int prot,
880e5548617SBlue Swirl                                        target_ulong *address)
881e5548617SBlue Swirl {
882a8170e5eSAvi Kivity     hwaddr iotlb;
883e5548617SBlue Swirl     CPUWatchpoint *wp;
884e5548617SBlue Swirl 
885cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
886e5548617SBlue Swirl         /* Normal RAM.  */
887e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
888149f54b5SPaolo Bonzini             + xlat;
889e5548617SBlue Swirl         if (!section->readonly) {
890b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_NOTDIRTY;
891e5548617SBlue Swirl         } else {
892b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_ROM;
893e5548617SBlue Swirl         }
894e5548617SBlue Swirl     } else {
8951b3fb98fSEdgar E. Iglesias         iotlb = section - section->address_space->dispatch->map.sections;
896149f54b5SPaolo Bonzini         iotlb += xlat;
897e5548617SBlue Swirl     }
898e5548617SBlue Swirl 
899e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
900e5548617SBlue Swirl        watchpoint trap routines.  */
901ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
90205068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
903e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
904e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
905b41aac4fSLiu Ping Fan                 iotlb = PHYS_SECTION_WATCH + paddr;
906e5548617SBlue Swirl                 *address |= TLB_MMIO;
907e5548617SBlue Swirl                 break;
908e5548617SBlue Swirl             }
909e5548617SBlue Swirl         }
910e5548617SBlue Swirl     }
911e5548617SBlue Swirl 
912e5548617SBlue Swirl     return iotlb;
913e5548617SBlue Swirl }
9149fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
91533417e70Sbellard 
916e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
9178da3ff18Spbrook 
918c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
9195312bd8bSAvi Kivity                              uint16_t section);
920acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
92154688b1eSAvi Kivity 
922a2b257d6SIgor Mammedov static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
923a2b257d6SIgor Mammedov                                qemu_anon_ram_alloc;
92491138037SMarkus Armbruster 
92591138037SMarkus Armbruster /*
92691138037SMarkus Armbruster  * Set a custom physical guest memory alloator.
92791138037SMarkus Armbruster  * Accelerators with unusual needs may need this.  Hopefully, we can
92891138037SMarkus Armbruster  * get rid of it eventually.
92991138037SMarkus Armbruster  */
930a2b257d6SIgor Mammedov void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
93191138037SMarkus Armbruster {
93291138037SMarkus Armbruster     phys_mem_alloc = alloc;
93391138037SMarkus Armbruster }
93491138037SMarkus Armbruster 
93553cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
93653cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
9375312bd8bSAvi Kivity {
93868f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
93968f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
94068f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
94168f3f65bSPaolo Bonzini      */
94253cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
94368f3f65bSPaolo Bonzini 
94453cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
94553cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
94653cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
94753cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
9485312bd8bSAvi Kivity     }
94953cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
950dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
95153cb28cbSMarcel Apfelbaum     return map->sections_nb++;
9525312bd8bSAvi Kivity }
9535312bd8bSAvi Kivity 
954058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
955058bc4b5SPaolo Bonzini {
956dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
957dfde4e6eSPaolo Bonzini 
958058bc4b5SPaolo Bonzini     if (mr->subpage) {
959058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
960b4fefef9SPeter Crosthwaite         object_unref(OBJECT(&subpage->iomem));
961058bc4b5SPaolo Bonzini         g_free(subpage);
962058bc4b5SPaolo Bonzini     }
963058bc4b5SPaolo Bonzini }
964058bc4b5SPaolo Bonzini 
9656092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
9665312bd8bSAvi Kivity {
9679affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
9689affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
969058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
970058bc4b5SPaolo Bonzini     }
9719affd6fcSPaolo Bonzini     g_free(map->sections);
9729affd6fcSPaolo Bonzini     g_free(map->nodes);
9735312bd8bSAvi Kivity }
9745312bd8bSAvi Kivity 
975ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
9760f0cb164SAvi Kivity {
9770f0cb164SAvi Kivity     subpage_t *subpage;
978a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
9790f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
98097115a8dSMichael S. Tsirkin     MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
98153cb28cbSMarcel Apfelbaum                                                    d->map.nodes, d->map.sections);
9820f0cb164SAvi Kivity     MemoryRegionSection subsection = {
9830f0cb164SAvi Kivity         .offset_within_address_space = base,
984052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
9850f0cb164SAvi Kivity     };
986a8170e5eSAvi Kivity     hwaddr start, end;
9870f0cb164SAvi Kivity 
988f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
9890f0cb164SAvi Kivity 
990f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
991acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
9923be91e86SEdgar E. Iglesias         subsection.address_space = d->as;
9930f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
994ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
99553cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
9960f0cb164SAvi Kivity     } else {
997f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
9980f0cb164SAvi Kivity     }
9990f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1000052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
100153cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
100253cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
10030f0cb164SAvi Kivity }
10040f0cb164SAvi Kivity 
10050f0cb164SAvi Kivity 
1006052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
1007052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
100833417e70Sbellard {
1009a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
101053cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
1011052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
1012052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
1013dd81124bSAvi Kivity 
1014733d5ef5SPaolo Bonzini     assert(num_pages);
1015733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
101633417e70Sbellard }
101733417e70Sbellard 
1018ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
10190f0cb164SAvi Kivity {
102089ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
102100752703SPaolo Bonzini     AddressSpaceDispatch *d = as->next_dispatch;
102299b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
1023052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
10240f0cb164SAvi Kivity 
1025733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1026733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1027733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
1028733d5ef5SPaolo Bonzini 
1029052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
1030ac1970fbSAvi Kivity         register_subpage(d, &now);
1031733d5ef5SPaolo Bonzini     } else {
1032052e87b0SPaolo Bonzini         now.size = int128_zero();
1033733d5ef5SPaolo Bonzini     }
1034052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
1035052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
1036052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
1037052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
10380f0cb164SAvi Kivity         now = remain;
1039052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
1040733d5ef5SPaolo Bonzini             register_subpage(d, &now);
104188266249SHu Tao         } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1042052e87b0SPaolo Bonzini             now.size = page_size;
1043ac1970fbSAvi Kivity             register_subpage(d, &now);
104469b67646STyler Hall         } else {
1045052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
1046ac1970fbSAvi Kivity             register_multipage(d, &now);
104769b67646STyler Hall         }
10480f0cb164SAvi Kivity     }
10490f0cb164SAvi Kivity }
10500f0cb164SAvi Kivity 
105162a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
105262a2744cSSheng Yang {
105362a2744cSSheng Yang     if (kvm_enabled())
105462a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
105562a2744cSSheng Yang }
105662a2744cSSheng Yang 
1057b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
1058b2a8658eSUmesh Deshpande {
1059b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
1060b2a8658eSUmesh Deshpande }
1061b2a8658eSUmesh Deshpande 
1062b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
1063b2a8658eSUmesh Deshpande {
1064b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
1065b2a8658eSUmesh Deshpande }
1066b2a8658eSUmesh Deshpande 
1067e1e84ba0SMarkus Armbruster #ifdef __linux__
1068c902760fSMarcelo Tosatti 
1069c902760fSMarcelo Tosatti #include <sys/vfs.h>
1070c902760fSMarcelo Tosatti 
1071c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
1072c902760fSMarcelo Tosatti 
1073fc7a5800SHu Tao static long gethugepagesize(const char *path, Error **errp)
1074c902760fSMarcelo Tosatti {
1075c902760fSMarcelo Tosatti     struct statfs fs;
1076c902760fSMarcelo Tosatti     int ret;
1077c902760fSMarcelo Tosatti 
1078c902760fSMarcelo Tosatti     do {
1079c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
1080c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
1081c902760fSMarcelo Tosatti 
1082c902760fSMarcelo Tosatti     if (ret != 0) {
1083fc7a5800SHu Tao         error_setg_errno(errp, errno, "failed to get page size of file %s",
1084fc7a5800SHu Tao                          path);
1085c902760fSMarcelo Tosatti         return 0;
1086c902760fSMarcelo Tosatti     }
1087c902760fSMarcelo Tosatti 
1088c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
1089c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1090c902760fSMarcelo Tosatti 
1091c902760fSMarcelo Tosatti     return fs.f_bsize;
1092c902760fSMarcelo Tosatti }
1093c902760fSMarcelo Tosatti 
109404b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
109504b16653SAlex Williamson                             ram_addr_t memory,
10967f56e740SPaolo Bonzini                             const char *path,
10977f56e740SPaolo Bonzini                             Error **errp)
1098c902760fSMarcelo Tosatti {
1099c902760fSMarcelo Tosatti     char *filename;
11008ca761f6SPeter Feiner     char *sanitized_name;
11018ca761f6SPeter Feiner     char *c;
1102557529ddSHu Tao     void *area = NULL;
1103c902760fSMarcelo Tosatti     int fd;
1104557529ddSHu Tao     uint64_t hpagesize;
1105fc7a5800SHu Tao     Error *local_err = NULL;
1106c902760fSMarcelo Tosatti 
1107fc7a5800SHu Tao     hpagesize = gethugepagesize(path, &local_err);
1108fc7a5800SHu Tao     if (local_err) {
1109fc7a5800SHu Tao         error_propagate(errp, local_err);
1110f9a49dfaSMarcelo Tosatti         goto error;
1111c902760fSMarcelo Tosatti     }
1112a2b257d6SIgor Mammedov     block->mr->align = hpagesize;
1113c902760fSMarcelo Tosatti 
1114c902760fSMarcelo Tosatti     if (memory < hpagesize) {
1115557529ddSHu Tao         error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1116557529ddSHu Tao                    "or larger than huge page size 0x%" PRIx64,
1117557529ddSHu Tao                    memory, hpagesize);
1118557529ddSHu Tao         goto error;
1119c902760fSMarcelo Tosatti     }
1120c902760fSMarcelo Tosatti 
1121c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
11227f56e740SPaolo Bonzini         error_setg(errp,
11237f56e740SPaolo Bonzini                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1124f9a49dfaSMarcelo Tosatti         goto error;
1125c902760fSMarcelo Tosatti     }
1126c902760fSMarcelo Tosatti 
11278ca761f6SPeter Feiner     /* Make name safe to use with mkstemp by replacing '/' with '_'. */
112883234bf2SPeter Crosthwaite     sanitized_name = g_strdup(memory_region_name(block->mr));
11298ca761f6SPeter Feiner     for (c = sanitized_name; *c != '\0'; c++) {
11308ca761f6SPeter Feiner         if (*c == '/')
11318ca761f6SPeter Feiner             *c = '_';
11328ca761f6SPeter Feiner     }
11338ca761f6SPeter Feiner 
11348ca761f6SPeter Feiner     filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
11358ca761f6SPeter Feiner                                sanitized_name);
11368ca761f6SPeter Feiner     g_free(sanitized_name);
1137c902760fSMarcelo Tosatti 
1138c902760fSMarcelo Tosatti     fd = mkstemp(filename);
1139c902760fSMarcelo Tosatti     if (fd < 0) {
11407f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
11417f56e740SPaolo Bonzini                          "unable to create backing store for hugepages");
1142e4ada482SStefan Weil         g_free(filename);
1143f9a49dfaSMarcelo Tosatti         goto error;
1144c902760fSMarcelo Tosatti     }
1145c902760fSMarcelo Tosatti     unlink(filename);
1146e4ada482SStefan Weil     g_free(filename);
1147c902760fSMarcelo Tosatti 
1148c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
1149c902760fSMarcelo Tosatti 
1150c902760fSMarcelo Tosatti     /*
1151c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1152c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1153c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1154c902760fSMarcelo Tosatti      * mmap will fail.
1155c902760fSMarcelo Tosatti      */
11567f56e740SPaolo Bonzini     if (ftruncate(fd, memory)) {
1157c902760fSMarcelo Tosatti         perror("ftruncate");
11587f56e740SPaolo Bonzini     }
1159c902760fSMarcelo Tosatti 
1160dbcb8981SPaolo Bonzini     area = mmap(0, memory, PROT_READ | PROT_WRITE,
1161dbcb8981SPaolo Bonzini                 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1162dbcb8981SPaolo Bonzini                 fd, 0);
1163c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
11647f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
11657f56e740SPaolo Bonzini                          "unable to map backing store for hugepages");
1166c902760fSMarcelo Tosatti         close(fd);
1167f9a49dfaSMarcelo Tosatti         goto error;
1168c902760fSMarcelo Tosatti     }
1169ef36fa14SMarcelo Tosatti 
1170ef36fa14SMarcelo Tosatti     if (mem_prealloc) {
117138183310SPaolo Bonzini         os_mem_prealloc(fd, area, memory);
1172ef36fa14SMarcelo Tosatti     }
1173ef36fa14SMarcelo Tosatti 
117404b16653SAlex Williamson     block->fd = fd;
1175c902760fSMarcelo Tosatti     return area;
1176f9a49dfaSMarcelo Tosatti 
1177f9a49dfaSMarcelo Tosatti error:
1178f9a49dfaSMarcelo Tosatti     if (mem_prealloc) {
117981b07353SGonglei         error_report("%s", error_get_pretty(*errp));
1180f9a49dfaSMarcelo Tosatti         exit(1);
1181f9a49dfaSMarcelo Tosatti     }
1182f9a49dfaSMarcelo Tosatti     return NULL;
1183c902760fSMarcelo Tosatti }
1184c902760fSMarcelo Tosatti #endif
1185c902760fSMarcelo Tosatti 
11860dc3f44aSMike Day /* Called with the ramlist lock held.  */
1187d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1188d17b5288SAlex Williamson {
118904b16653SAlex Williamson     RAMBlock *block, *next_block;
11903e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
119104b16653SAlex Williamson 
119249cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
119349cd9ac6SStefan Hajnoczi 
11940dc3f44aSMike Day     if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
119504b16653SAlex Williamson         return 0;
11960d53d9feSMike Day     }
119704b16653SAlex Williamson 
11980dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1199f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
120004b16653SAlex Williamson 
120162be4e3aSMichael S. Tsirkin         end = block->offset + block->max_length;
120204b16653SAlex Williamson 
12030dc3f44aSMike Day         QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
120404b16653SAlex Williamson             if (next_block->offset >= end) {
120504b16653SAlex Williamson                 next = MIN(next, next_block->offset);
120604b16653SAlex Williamson             }
120704b16653SAlex Williamson         }
120804b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
120904b16653SAlex Williamson             offset = end;
121004b16653SAlex Williamson             mingap = next - end;
121104b16653SAlex Williamson         }
121204b16653SAlex Williamson     }
12133e837b2cSAlex Williamson 
12143e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
12153e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
12163e837b2cSAlex Williamson                 (uint64_t)size);
12173e837b2cSAlex Williamson         abort();
12183e837b2cSAlex Williamson     }
12193e837b2cSAlex Williamson 
122004b16653SAlex Williamson     return offset;
122104b16653SAlex Williamson }
122204b16653SAlex Williamson 
1223652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
122404b16653SAlex Williamson {
1225d17b5288SAlex Williamson     RAMBlock *block;
1226d17b5288SAlex Williamson     ram_addr_t last = 0;
1227d17b5288SAlex Williamson 
12280dc3f44aSMike Day     rcu_read_lock();
12290dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
123062be4e3aSMichael S. Tsirkin         last = MAX(last, block->offset + block->max_length);
12310d53d9feSMike Day     }
12320dc3f44aSMike Day     rcu_read_unlock();
1233d17b5288SAlex Williamson     return last;
1234d17b5288SAlex Williamson }
1235d17b5288SAlex Williamson 
1236ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1237ddb97f1dSJason Baron {
1238ddb97f1dSJason Baron     int ret;
1239ddb97f1dSJason Baron 
1240ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
124147c8ca53SMarcel Apfelbaum     if (!machine_dump_guest_core(current_machine)) {
1242ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1243ddb97f1dSJason Baron         if (ret) {
1244ddb97f1dSJason Baron             perror("qemu_madvise");
1245ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1246ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1247ddb97f1dSJason Baron         }
1248ddb97f1dSJason Baron     }
1249ddb97f1dSJason Baron }
1250ddb97f1dSJason Baron 
12510dc3f44aSMike Day /* Called within an RCU critical section, or while the ramlist lock
12520dc3f44aSMike Day  * is held.
12530dc3f44aSMike Day  */
125420cfe881SHu Tao static RAMBlock *find_ram_block(ram_addr_t addr)
125584b89d78SCam Macdonell {
125620cfe881SHu Tao     RAMBlock *block;
125784b89d78SCam Macdonell 
12580dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1259c5705a77SAvi Kivity         if (block->offset == addr) {
126020cfe881SHu Tao             return block;
1261c5705a77SAvi Kivity         }
1262c5705a77SAvi Kivity     }
126320cfe881SHu Tao 
126420cfe881SHu Tao     return NULL;
126520cfe881SHu Tao }
126620cfe881SHu Tao 
1267ae3a7047SMike Day /* Called with iothread lock held.  */
126820cfe881SHu Tao void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
126920cfe881SHu Tao {
1270ae3a7047SMike Day     RAMBlock *new_block, *block;
127120cfe881SHu Tao 
12720dc3f44aSMike Day     rcu_read_lock();
1273ae3a7047SMike Day     new_block = find_ram_block(addr);
1274c5705a77SAvi Kivity     assert(new_block);
1275c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
127684b89d78SCam Macdonell 
127709e5ab63SAnthony Liguori     if (dev) {
127809e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
127984b89d78SCam Macdonell         if (id) {
128084b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
12817267c094SAnthony Liguori             g_free(id);
128284b89d78SCam Macdonell         }
128384b89d78SCam Macdonell     }
128484b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
128584b89d78SCam Macdonell 
12860dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1287c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
128884b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
128984b89d78SCam Macdonell                     new_block->idstr);
129084b89d78SCam Macdonell             abort();
129184b89d78SCam Macdonell         }
129284b89d78SCam Macdonell     }
12930dc3f44aSMike Day     rcu_read_unlock();
1294c5705a77SAvi Kivity }
1295c5705a77SAvi Kivity 
1296ae3a7047SMike Day /* Called with iothread lock held.  */
129720cfe881SHu Tao void qemu_ram_unset_idstr(ram_addr_t addr)
129820cfe881SHu Tao {
1299ae3a7047SMike Day     RAMBlock *block;
130020cfe881SHu Tao 
1301ae3a7047SMike Day     /* FIXME: arch_init.c assumes that this is not called throughout
1302ae3a7047SMike Day      * migration.  Ignore the problem since hot-unplug during migration
1303ae3a7047SMike Day      * does not work anyway.
1304ae3a7047SMike Day      */
1305ae3a7047SMike Day 
13060dc3f44aSMike Day     rcu_read_lock();
1307ae3a7047SMike Day     block = find_ram_block(addr);
130820cfe881SHu Tao     if (block) {
130920cfe881SHu Tao         memset(block->idstr, 0, sizeof(block->idstr));
131020cfe881SHu Tao     }
13110dc3f44aSMike Day     rcu_read_unlock();
131220cfe881SHu Tao }
131320cfe881SHu Tao 
13148490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
13158490fc78SLuiz Capitulino {
131675cc7f01SMarcel Apfelbaum     if (!machine_mem_merge(current_machine)) {
13178490fc78SLuiz Capitulino         /* disabled by the user */
13188490fc78SLuiz Capitulino         return 0;
13198490fc78SLuiz Capitulino     }
13208490fc78SLuiz Capitulino 
13218490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
13228490fc78SLuiz Capitulino }
13238490fc78SLuiz Capitulino 
132462be4e3aSMichael S. Tsirkin /* Only legal before guest might have detected the memory size: e.g. on
132562be4e3aSMichael S. Tsirkin  * incoming migration, or right after reset.
132662be4e3aSMichael S. Tsirkin  *
132762be4e3aSMichael S. Tsirkin  * As memory core doesn't know how is memory accessed, it is up to
132862be4e3aSMichael S. Tsirkin  * resize callback to update device state and/or add assertions to detect
132962be4e3aSMichael S. Tsirkin  * misuse, if necessary.
133062be4e3aSMichael S. Tsirkin  */
133162be4e3aSMichael S. Tsirkin int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
133262be4e3aSMichael S. Tsirkin {
133362be4e3aSMichael S. Tsirkin     RAMBlock *block = find_ram_block(base);
133462be4e3aSMichael S. Tsirkin 
133562be4e3aSMichael S. Tsirkin     assert(block);
133662be4e3aSMichael S. Tsirkin 
1337129ddaf3SMichael S. Tsirkin     newsize = TARGET_PAGE_ALIGN(newsize);
1338129ddaf3SMichael S. Tsirkin 
133962be4e3aSMichael S. Tsirkin     if (block->used_length == newsize) {
134062be4e3aSMichael S. Tsirkin         return 0;
134162be4e3aSMichael S. Tsirkin     }
134262be4e3aSMichael S. Tsirkin 
134362be4e3aSMichael S. Tsirkin     if (!(block->flags & RAM_RESIZEABLE)) {
134462be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
134562be4e3aSMichael S. Tsirkin                          "Length mismatch: %s: 0x" RAM_ADDR_FMT
134662be4e3aSMichael S. Tsirkin                          " in != 0x" RAM_ADDR_FMT, block->idstr,
134762be4e3aSMichael S. Tsirkin                          newsize, block->used_length);
134862be4e3aSMichael S. Tsirkin         return -EINVAL;
134962be4e3aSMichael S. Tsirkin     }
135062be4e3aSMichael S. Tsirkin 
135162be4e3aSMichael S. Tsirkin     if (block->max_length < newsize) {
135262be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
135362be4e3aSMichael S. Tsirkin                          "Length too large: %s: 0x" RAM_ADDR_FMT
135462be4e3aSMichael S. Tsirkin                          " > 0x" RAM_ADDR_FMT, block->idstr,
135562be4e3aSMichael S. Tsirkin                          newsize, block->max_length);
135662be4e3aSMichael S. Tsirkin         return -EINVAL;
135762be4e3aSMichael S. Tsirkin     }
135862be4e3aSMichael S. Tsirkin 
135962be4e3aSMichael S. Tsirkin     cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
136062be4e3aSMichael S. Tsirkin     block->used_length = newsize;
136162be4e3aSMichael S. Tsirkin     cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
136262be4e3aSMichael S. Tsirkin     memory_region_set_size(block->mr, newsize);
136362be4e3aSMichael S. Tsirkin     if (block->resized) {
136462be4e3aSMichael S. Tsirkin         block->resized(block->idstr, newsize, block->host);
136562be4e3aSMichael S. Tsirkin     }
136662be4e3aSMichael S. Tsirkin     return 0;
136762be4e3aSMichael S. Tsirkin }
136862be4e3aSMichael S. Tsirkin 
1369ef701d7bSHu Tao static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1370c5705a77SAvi Kivity {
1371e1c57ab8SPaolo Bonzini     RAMBlock *block;
13720d53d9feSMike Day     RAMBlock *last_block = NULL;
13732152f5caSJuan Quintela     ram_addr_t old_ram_size, new_ram_size;
13742152f5caSJuan Quintela 
13752152f5caSJuan Quintela     old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1376c5705a77SAvi Kivity 
1377b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
13789b8424d5SMichael S. Tsirkin     new_block->offset = find_ram_offset(new_block->max_length);
1379e1c57ab8SPaolo Bonzini 
13800628c182SMarkus Armbruster     if (!new_block->host) {
1381e1c57ab8SPaolo Bonzini         if (xen_enabled()) {
13829b8424d5SMichael S. Tsirkin             xen_ram_alloc(new_block->offset, new_block->max_length,
13839b8424d5SMichael S. Tsirkin                           new_block->mr);
1384e1c57ab8SPaolo Bonzini         } else {
13859b8424d5SMichael S. Tsirkin             new_block->host = phys_mem_alloc(new_block->max_length,
1386a2b257d6SIgor Mammedov                                              &new_block->mr->align);
138739228250SMarkus Armbruster             if (!new_block->host) {
1388ef701d7bSHu Tao                 error_setg_errno(errp, errno,
1389ef701d7bSHu Tao                                  "cannot set up guest memory '%s'",
1390ef701d7bSHu Tao                                  memory_region_name(new_block->mr));
1391ef701d7bSHu Tao                 qemu_mutex_unlock_ramlist();
1392ef701d7bSHu Tao                 return -1;
139339228250SMarkus Armbruster             }
13949b8424d5SMichael S. Tsirkin             memory_try_enable_merging(new_block->host, new_block->max_length);
1395c902760fSMarcelo Tosatti         }
13966977dfe6SYoshiaki Tamura     }
139794a6b54fSpbrook 
13980d53d9feSMike Day     /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
13990d53d9feSMike Day      * QLIST (which has an RCU-friendly variant) does not have insertion at
14000d53d9feSMike Day      * tail, so save the last element in last_block.
14010d53d9feSMike Day      */
14020dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
14030d53d9feSMike Day         last_block = block;
14049b8424d5SMichael S. Tsirkin         if (block->max_length < new_block->max_length) {
1405abb26d63SPaolo Bonzini             break;
1406abb26d63SPaolo Bonzini         }
1407abb26d63SPaolo Bonzini     }
1408abb26d63SPaolo Bonzini     if (block) {
14090dc3f44aSMike Day         QLIST_INSERT_BEFORE_RCU(block, new_block, next);
14100d53d9feSMike Day     } else if (last_block) {
14110dc3f44aSMike Day         QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
14120d53d9feSMike Day     } else { /* list is empty */
14130dc3f44aSMike Day         QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1414abb26d63SPaolo Bonzini     }
14150d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
141694a6b54fSpbrook 
14170dc3f44aSMike Day     /* Write list before version */
14180dc3f44aSMike Day     smp_wmb();
1419f798b07fSUmesh Deshpande     ram_list.version++;
1420b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1421f798b07fSUmesh Deshpande 
14222152f5caSJuan Quintela     new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
14232152f5caSJuan Quintela 
14242152f5caSJuan Quintela     if (new_ram_size > old_ram_size) {
14251ab4c8ceSJuan Quintela         int i;
1426ae3a7047SMike Day 
1427ae3a7047SMike Day         /* ram_list.dirty_memory[] is protected by the iothread lock.  */
14281ab4c8ceSJuan Quintela         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
14291ab4c8ceSJuan Quintela             ram_list.dirty_memory[i] =
14301ab4c8ceSJuan Quintela                 bitmap_zero_extend(ram_list.dirty_memory[i],
14311ab4c8ceSJuan Quintela                                    old_ram_size, new_ram_size);
14321ab4c8ceSJuan Quintela        }
14332152f5caSJuan Quintela     }
14349b8424d5SMichael S. Tsirkin     cpu_physical_memory_set_dirty_range(new_block->offset,
14359b8424d5SMichael S. Tsirkin                                         new_block->used_length);
143694a6b54fSpbrook 
1437a904c911SPaolo Bonzini     if (new_block->host) {
14389b8424d5SMichael S. Tsirkin         qemu_ram_setup_dump(new_block->host, new_block->max_length);
14399b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
14409b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1441e1c57ab8SPaolo Bonzini         if (kvm_enabled()) {
14429b8424d5SMichael S. Tsirkin             kvm_setup_guest_memory(new_block->host, new_block->max_length);
1443e1c57ab8SPaolo Bonzini         }
1444a904c911SPaolo Bonzini     }
14456f0437e8SJan Kiszka 
144694a6b54fSpbrook     return new_block->offset;
144794a6b54fSpbrook }
1448e9a1ab19Sbellard 
14490b183fc8SPaolo Bonzini #ifdef __linux__
1450e1c57ab8SPaolo Bonzini ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1451dbcb8981SPaolo Bonzini                                     bool share, const char *mem_path,
14527f56e740SPaolo Bonzini                                     Error **errp)
1453e1c57ab8SPaolo Bonzini {
1454e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1455ef701d7bSHu Tao     ram_addr_t addr;
1456ef701d7bSHu Tao     Error *local_err = NULL;
1457e1c57ab8SPaolo Bonzini 
1458e1c57ab8SPaolo Bonzini     if (xen_enabled()) {
14597f56e740SPaolo Bonzini         error_setg(errp, "-mem-path not supported with Xen");
14607f56e740SPaolo Bonzini         return -1;
1461e1c57ab8SPaolo Bonzini     }
1462e1c57ab8SPaolo Bonzini 
1463e1c57ab8SPaolo Bonzini     if (phys_mem_alloc != qemu_anon_ram_alloc) {
1464e1c57ab8SPaolo Bonzini         /*
1465e1c57ab8SPaolo Bonzini          * file_ram_alloc() needs to allocate just like
1466e1c57ab8SPaolo Bonzini          * phys_mem_alloc, but we haven't bothered to provide
1467e1c57ab8SPaolo Bonzini          * a hook there.
1468e1c57ab8SPaolo Bonzini          */
14697f56e740SPaolo Bonzini         error_setg(errp,
14707f56e740SPaolo Bonzini                    "-mem-path not supported with this accelerator");
14717f56e740SPaolo Bonzini         return -1;
1472e1c57ab8SPaolo Bonzini     }
1473e1c57ab8SPaolo Bonzini 
1474e1c57ab8SPaolo Bonzini     size = TARGET_PAGE_ALIGN(size);
1475e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1476e1c57ab8SPaolo Bonzini     new_block->mr = mr;
14779b8424d5SMichael S. Tsirkin     new_block->used_length = size;
14789b8424d5SMichael S. Tsirkin     new_block->max_length = size;
1479dbcb8981SPaolo Bonzini     new_block->flags = share ? RAM_SHARED : 0;
14807f56e740SPaolo Bonzini     new_block->host = file_ram_alloc(new_block, size,
14817f56e740SPaolo Bonzini                                      mem_path, errp);
14827f56e740SPaolo Bonzini     if (!new_block->host) {
14837f56e740SPaolo Bonzini         g_free(new_block);
14847f56e740SPaolo Bonzini         return -1;
14857f56e740SPaolo Bonzini     }
14867f56e740SPaolo Bonzini 
1487ef701d7bSHu Tao     addr = ram_block_add(new_block, &local_err);
1488ef701d7bSHu Tao     if (local_err) {
1489ef701d7bSHu Tao         g_free(new_block);
1490ef701d7bSHu Tao         error_propagate(errp, local_err);
1491ef701d7bSHu Tao         return -1;
1492ef701d7bSHu Tao     }
1493ef701d7bSHu Tao     return addr;
1494e1c57ab8SPaolo Bonzini }
14950b183fc8SPaolo Bonzini #endif
1496e1c57ab8SPaolo Bonzini 
149762be4e3aSMichael S. Tsirkin static
149862be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
149962be4e3aSMichael S. Tsirkin                                    void (*resized)(const char*,
150062be4e3aSMichael S. Tsirkin                                                    uint64_t length,
150162be4e3aSMichael S. Tsirkin                                                    void *host),
150262be4e3aSMichael S. Tsirkin                                    void *host, bool resizeable,
1503ef701d7bSHu Tao                                    MemoryRegion *mr, Error **errp)
1504e1c57ab8SPaolo Bonzini {
1505e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1506ef701d7bSHu Tao     ram_addr_t addr;
1507ef701d7bSHu Tao     Error *local_err = NULL;
1508e1c57ab8SPaolo Bonzini 
1509e1c57ab8SPaolo Bonzini     size = TARGET_PAGE_ALIGN(size);
151062be4e3aSMichael S. Tsirkin     max_size = TARGET_PAGE_ALIGN(max_size);
1511e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1512e1c57ab8SPaolo Bonzini     new_block->mr = mr;
151362be4e3aSMichael S. Tsirkin     new_block->resized = resized;
15149b8424d5SMichael S. Tsirkin     new_block->used_length = size;
15159b8424d5SMichael S. Tsirkin     new_block->max_length = max_size;
151662be4e3aSMichael S. Tsirkin     assert(max_size >= size);
1517e1c57ab8SPaolo Bonzini     new_block->fd = -1;
1518e1c57ab8SPaolo Bonzini     new_block->host = host;
1519e1c57ab8SPaolo Bonzini     if (host) {
15207bd4f430SPaolo Bonzini         new_block->flags |= RAM_PREALLOC;
1521e1c57ab8SPaolo Bonzini     }
152262be4e3aSMichael S. Tsirkin     if (resizeable) {
152362be4e3aSMichael S. Tsirkin         new_block->flags |= RAM_RESIZEABLE;
152462be4e3aSMichael S. Tsirkin     }
1525ef701d7bSHu Tao     addr = ram_block_add(new_block, &local_err);
1526ef701d7bSHu Tao     if (local_err) {
1527ef701d7bSHu Tao         g_free(new_block);
1528ef701d7bSHu Tao         error_propagate(errp, local_err);
1529ef701d7bSHu Tao         return -1;
1530ef701d7bSHu Tao     }
1531ef701d7bSHu Tao     return addr;
1532e1c57ab8SPaolo Bonzini }
1533e1c57ab8SPaolo Bonzini 
153462be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
153562be4e3aSMichael S. Tsirkin                                    MemoryRegion *mr, Error **errp)
153662be4e3aSMichael S. Tsirkin {
153762be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
153862be4e3aSMichael S. Tsirkin }
153962be4e3aSMichael S. Tsirkin 
1540ef701d7bSHu Tao ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
15416977dfe6SYoshiaki Tamura {
154262be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
154362be4e3aSMichael S. Tsirkin }
154462be4e3aSMichael S. Tsirkin 
154562be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
154662be4e3aSMichael S. Tsirkin                                      void (*resized)(const char*,
154762be4e3aSMichael S. Tsirkin                                                      uint64_t length,
154862be4e3aSMichael S. Tsirkin                                                      void *host),
154962be4e3aSMichael S. Tsirkin                                      MemoryRegion *mr, Error **errp)
155062be4e3aSMichael S. Tsirkin {
155162be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
15526977dfe6SYoshiaki Tamura }
15536977dfe6SYoshiaki Tamura 
15541f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
15551f2e98b6SAlex Williamson {
15561f2e98b6SAlex Williamson     RAMBlock *block;
15571f2e98b6SAlex Williamson 
1558b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
15590dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
15601f2e98b6SAlex Williamson         if (addr == block->offset) {
15610dc3f44aSMike Day             QLIST_REMOVE_RCU(block, next);
15620d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
15630dc3f44aSMike Day             /* Write list before version */
15640dc3f44aSMike Day             smp_wmb();
1565f798b07fSUmesh Deshpande             ram_list.version++;
156643771539SPaolo Bonzini             g_free_rcu(block, rcu);
1567b2a8658eSUmesh Deshpande             break;
15681f2e98b6SAlex Williamson         }
15691f2e98b6SAlex Williamson     }
1570b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
15711f2e98b6SAlex Williamson }
15721f2e98b6SAlex Williamson 
157343771539SPaolo Bonzini static void reclaim_ramblock(RAMBlock *block)
1574e9a1ab19Sbellard {
15757bd4f430SPaolo Bonzini     if (block->flags & RAM_PREALLOC) {
1576cd19cfa2SHuang Ying         ;
1577dfeaf2abSMarkus Armbruster     } else if (xen_enabled()) {
1578dfeaf2abSMarkus Armbruster         xen_invalidate_map_cache_entry(block->host);
1579089f3f76SStefan Weil #ifndef _WIN32
15803435f395SMarkus Armbruster     } else if (block->fd >= 0) {
15819b8424d5SMichael S. Tsirkin         munmap(block->host, block->max_length);
158204b16653SAlex Williamson         close(block->fd);
1583089f3f76SStefan Weil #endif
158404b16653SAlex Williamson     } else {
15859b8424d5SMichael S. Tsirkin         qemu_anon_ram_free(block->host, block->max_length);
158604b16653SAlex Williamson     }
15877267c094SAnthony Liguori     g_free(block);
158843771539SPaolo Bonzini }
158943771539SPaolo Bonzini 
159043771539SPaolo Bonzini void qemu_ram_free(ram_addr_t addr)
159143771539SPaolo Bonzini {
159243771539SPaolo Bonzini     RAMBlock *block;
159343771539SPaolo Bonzini 
159443771539SPaolo Bonzini     qemu_mutex_lock_ramlist();
15950dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
159643771539SPaolo Bonzini         if (addr == block->offset) {
15970dc3f44aSMike Day             QLIST_REMOVE_RCU(block, next);
159843771539SPaolo Bonzini             ram_list.mru_block = NULL;
15990dc3f44aSMike Day             /* Write list before version */
16000dc3f44aSMike Day             smp_wmb();
160143771539SPaolo Bonzini             ram_list.version++;
160243771539SPaolo Bonzini             call_rcu(block, reclaim_ramblock, rcu);
1603b2a8658eSUmesh Deshpande             break;
160404b16653SAlex Williamson         }
160504b16653SAlex Williamson     }
1606b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1607e9a1ab19Sbellard }
1608e9a1ab19Sbellard 
1609cd19cfa2SHuang Ying #ifndef _WIN32
1610cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1611cd19cfa2SHuang Ying {
1612cd19cfa2SHuang Ying     RAMBlock *block;
1613cd19cfa2SHuang Ying     ram_addr_t offset;
1614cd19cfa2SHuang Ying     int flags;
1615cd19cfa2SHuang Ying     void *area, *vaddr;
1616cd19cfa2SHuang Ying 
16170dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1618cd19cfa2SHuang Ying         offset = addr - block->offset;
16199b8424d5SMichael S. Tsirkin         if (offset < block->max_length) {
16201240be24SMichael S. Tsirkin             vaddr = ramblock_ptr(block, offset);
16217bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1622cd19cfa2SHuang Ying                 ;
1623dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1624dfeaf2abSMarkus Armbruster                 abort();
1625cd19cfa2SHuang Ying             } else {
1626cd19cfa2SHuang Ying                 flags = MAP_FIXED;
16273435f395SMarkus Armbruster                 if (block->fd >= 0) {
1628dbcb8981SPaolo Bonzini                     flags |= (block->flags & RAM_SHARED ?
1629dbcb8981SPaolo Bonzini                               MAP_SHARED : MAP_PRIVATE);
1630cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1631cd19cfa2SHuang Ying                                 flags, block->fd, offset);
1632cd19cfa2SHuang Ying                 } else {
16332eb9fbaaSMarkus Armbruster                     /*
16342eb9fbaaSMarkus Armbruster                      * Remap needs to match alloc.  Accelerators that
16352eb9fbaaSMarkus Armbruster                      * set phys_mem_alloc never remap.  If they did,
16362eb9fbaaSMarkus Armbruster                      * we'd need a remap hook here.
16372eb9fbaaSMarkus Armbruster                      */
16382eb9fbaaSMarkus Armbruster                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
16392eb9fbaaSMarkus Armbruster 
1640cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1641cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1642cd19cfa2SHuang Ying                                 flags, -1, 0);
1643cd19cfa2SHuang Ying                 }
1644cd19cfa2SHuang Ying                 if (area != vaddr) {
1645f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1646f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1647cd19cfa2SHuang Ying                             length, addr);
1648cd19cfa2SHuang Ying                     exit(1);
1649cd19cfa2SHuang Ying                 }
16508490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1651ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1652cd19cfa2SHuang Ying             }
1653cd19cfa2SHuang Ying         }
1654cd19cfa2SHuang Ying     }
1655cd19cfa2SHuang Ying }
1656cd19cfa2SHuang Ying #endif /* !_WIN32 */
1657cd19cfa2SHuang Ying 
1658a35ba7beSPaolo Bonzini int qemu_get_ram_fd(ram_addr_t addr)
1659a35ba7beSPaolo Bonzini {
1660ae3a7047SMike Day     RAMBlock *block;
1661ae3a7047SMike Day     int fd;
1662a35ba7beSPaolo Bonzini 
16630dc3f44aSMike Day     rcu_read_lock();
1664ae3a7047SMike Day     block = qemu_get_ram_block(addr);
1665ae3a7047SMike Day     fd = block->fd;
16660dc3f44aSMike Day     rcu_read_unlock();
1667ae3a7047SMike Day     return fd;
1668a35ba7beSPaolo Bonzini }
1669a35ba7beSPaolo Bonzini 
16703fd74b84SDamjan Marion void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
16713fd74b84SDamjan Marion {
1672ae3a7047SMike Day     RAMBlock *block;
1673ae3a7047SMike Day     void *ptr;
16743fd74b84SDamjan Marion 
16750dc3f44aSMike Day     rcu_read_lock();
1676ae3a7047SMike Day     block = qemu_get_ram_block(addr);
1677ae3a7047SMike Day     ptr = ramblock_ptr(block, 0);
16780dc3f44aSMike Day     rcu_read_unlock();
1679ae3a7047SMike Day     return ptr;
16803fd74b84SDamjan Marion }
16813fd74b84SDamjan Marion 
16821b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.
1683ae3a7047SMike Day  * This should not be used for general purpose DMA.  Use address_space_map
1684ae3a7047SMike Day  * or address_space_rw instead. For local memory (e.g. video ram) that the
1685ae3a7047SMike Day  * device owns, use memory_region_get_ram_ptr.
16860dc3f44aSMike Day  *
16870dc3f44aSMike Day  * By the time this function returns, the returned pointer is not protected
16880dc3f44aSMike Day  * by RCU anymore.  If the caller is not within an RCU critical section and
16890dc3f44aSMike Day  * does not hold the iothread lock, it must have other means of protecting the
16900dc3f44aSMike Day  * pointer, such as a reference to the region that includes the incoming
16910dc3f44aSMike Day  * ram_addr_t.
16921b5ec234SPaolo Bonzini  */
16931b5ec234SPaolo Bonzini void *qemu_get_ram_ptr(ram_addr_t addr)
16941b5ec234SPaolo Bonzini {
1695ae3a7047SMike Day     RAMBlock *block;
1696ae3a7047SMike Day     void *ptr;
16971b5ec234SPaolo Bonzini 
16980dc3f44aSMike Day     rcu_read_lock();
1699ae3a7047SMike Day     block = qemu_get_ram_block(addr);
1700ae3a7047SMike Day 
1701ae3a7047SMike Day     if (xen_enabled() && block->host == NULL) {
1702432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1703432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1704712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1705432d268cSJun Nakajima          */
1706432d268cSJun Nakajima         if (block->offset == 0) {
1707ae3a7047SMike Day             ptr = xen_map_cache(addr, 0, 0);
17080dc3f44aSMike Day             goto unlock;
1709432d268cSJun Nakajima         }
1710ae3a7047SMike Day 
1711ae3a7047SMike Day         block->host = xen_map_cache(block->offset, block->max_length, 1);
1712432d268cSJun Nakajima     }
1713ae3a7047SMike Day     ptr = ramblock_ptr(block, addr - block->offset);
1714ae3a7047SMike Day 
17150dc3f44aSMike Day unlock:
17160dc3f44aSMike Day     rcu_read_unlock();
1717ae3a7047SMike Day     return ptr;
171894a6b54fSpbrook }
1719f471a17eSAlex Williamson 
172038bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1721ae3a7047SMike Day  * but takes a size argument.
17220dc3f44aSMike Day  *
17230dc3f44aSMike Day  * By the time this function returns, the returned pointer is not protected
17240dc3f44aSMike Day  * by RCU anymore.  If the caller is not within an RCU critical section and
17250dc3f44aSMike Day  * does not hold the iothread lock, it must have other means of protecting the
17260dc3f44aSMike Day  * pointer, such as a reference to the region that includes the incoming
17270dc3f44aSMike Day  * ram_addr_t.
1728ae3a7047SMike Day  */
1729cb85f7abSPeter Maydell static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
173038bee5dcSStefano Stabellini {
1731ae3a7047SMike Day     void *ptr;
17328ab934f9SStefano Stabellini     if (*size == 0) {
17338ab934f9SStefano Stabellini         return NULL;
17348ab934f9SStefano Stabellini     }
1735868bb33fSJan Kiszka     if (xen_enabled()) {
1736e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
1737868bb33fSJan Kiszka     } else {
173838bee5dcSStefano Stabellini         RAMBlock *block;
17390dc3f44aSMike Day         rcu_read_lock();
17400dc3f44aSMike Day         QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
17419b8424d5SMichael S. Tsirkin             if (addr - block->offset < block->max_length) {
17429b8424d5SMichael S. Tsirkin                 if (addr - block->offset + *size > block->max_length)
17439b8424d5SMichael S. Tsirkin                     *size = block->max_length - addr + block->offset;
1744ae3a7047SMike Day                 ptr = ramblock_ptr(block, addr - block->offset);
17450dc3f44aSMike Day                 rcu_read_unlock();
1746ae3a7047SMike Day                 return ptr;
174738bee5dcSStefano Stabellini             }
174838bee5dcSStefano Stabellini         }
174938bee5dcSStefano Stabellini 
175038bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
175138bee5dcSStefano Stabellini         abort();
175238bee5dcSStefano Stabellini     }
175338bee5dcSStefano Stabellini }
175438bee5dcSStefano Stabellini 
17557443b437SPaolo Bonzini /* Some of the softmmu routines need to translate from a host pointer
1756ae3a7047SMike Day  * (typically a TLB entry) back to a ram offset.
1757ae3a7047SMike Day  *
1758ae3a7047SMike Day  * By the time this function returns, the returned pointer is not protected
1759ae3a7047SMike Day  * by RCU anymore.  If the caller is not within an RCU critical section and
1760ae3a7047SMike Day  * does not hold the iothread lock, it must have other means of protecting the
1761ae3a7047SMike Day  * pointer, such as a reference to the region that includes the incoming
1762ae3a7047SMike Day  * ram_addr_t.
1763ae3a7047SMike Day  */
17641b5ec234SPaolo Bonzini MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
17655579c7f3Spbrook {
176694a6b54fSpbrook     RAMBlock *block;
176794a6b54fSpbrook     uint8_t *host = ptr;
1768ae3a7047SMike Day     MemoryRegion *mr;
176994a6b54fSpbrook 
1770868bb33fSJan Kiszka     if (xen_enabled()) {
17710dc3f44aSMike Day         rcu_read_lock();
1772e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
1773ae3a7047SMike Day         mr = qemu_get_ram_block(*ram_addr)->mr;
17740dc3f44aSMike Day         rcu_read_unlock();
1775ae3a7047SMike Day         return mr;
1776712c2b41SStefano Stabellini     }
1777712c2b41SStefano Stabellini 
17780dc3f44aSMike Day     rcu_read_lock();
17790dc3f44aSMike Day     block = atomic_rcu_read(&ram_list.mru_block);
17809b8424d5SMichael S. Tsirkin     if (block && block->host && host - block->host < block->max_length) {
178123887b79SPaolo Bonzini         goto found;
178223887b79SPaolo Bonzini     }
178323887b79SPaolo Bonzini 
17840dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1785432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1786432d268cSJun Nakajima         if (block->host == NULL) {
1787432d268cSJun Nakajima             continue;
1788432d268cSJun Nakajima         }
17899b8424d5SMichael S. Tsirkin         if (host - block->host < block->max_length) {
179023887b79SPaolo Bonzini             goto found;
179194a6b54fSpbrook         }
1792f471a17eSAlex Williamson     }
1793432d268cSJun Nakajima 
17940dc3f44aSMike Day     rcu_read_unlock();
17951b5ec234SPaolo Bonzini     return NULL;
179623887b79SPaolo Bonzini 
179723887b79SPaolo Bonzini found:
179823887b79SPaolo Bonzini     *ram_addr = block->offset + (host - block->host);
1799ae3a7047SMike Day     mr = block->mr;
18000dc3f44aSMike Day     rcu_read_unlock();
1801ae3a7047SMike Day     return mr;
1802e890261fSMarcelo Tosatti }
1803f471a17eSAlex Williamson 
1804a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
18050e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
18061ccde1cbSbellard {
180752159192SJuan Quintela     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
18080e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
18093a7d929eSbellard     }
18100e0df1e2SAvi Kivity     switch (size) {
18110e0df1e2SAvi Kivity     case 1:
18125579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
18130e0df1e2SAvi Kivity         break;
18140e0df1e2SAvi Kivity     case 2:
18155579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
18160e0df1e2SAvi Kivity         break;
18170e0df1e2SAvi Kivity     case 4:
18185579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
18190e0df1e2SAvi Kivity         break;
18200e0df1e2SAvi Kivity     default:
18210e0df1e2SAvi Kivity         abort();
18220e0df1e2SAvi Kivity     }
18236886867eSPaolo Bonzini     cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
1824f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1825f23db169Sbellard        flushed */
1826a2cd8c85SJuan Quintela     if (!cpu_physical_memory_is_clean(ram_addr)) {
18274917cf44SAndreas Färber         CPUArchState *env = current_cpu->env_ptr;
182893afeadeSAndreas Färber         tlb_set_dirty(env, current_cpu->mem_io_vaddr);
18294917cf44SAndreas Färber     }
18301ccde1cbSbellard }
18311ccde1cbSbellard 
1832b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1833b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
1834b018ddf6SPaolo Bonzini {
1835b018ddf6SPaolo Bonzini     return is_write;
1836b018ddf6SPaolo Bonzini }
1837b018ddf6SPaolo Bonzini 
18380e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
18390e0df1e2SAvi Kivity     .write = notdirty_mem_write,
1840b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
18410e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
18421ccde1cbSbellard };
18431ccde1cbSbellard 
18440f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
184566b9b43cSPeter Maydell static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
18460f459d16Spbrook {
184793afeadeSAndreas Färber     CPUState *cpu = current_cpu;
184893afeadeSAndreas Färber     CPUArchState *env = cpu->env_ptr;
184906d55cc1Saliguori     target_ulong pc, cs_base;
18500f459d16Spbrook     target_ulong vaddr;
1851a1d1bb31Saliguori     CPUWatchpoint *wp;
185206d55cc1Saliguori     int cpu_flags;
18530f459d16Spbrook 
1854ff4700b0SAndreas Färber     if (cpu->watchpoint_hit) {
185506d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
185606d55cc1Saliguori          * the debug interrupt so that is will trigger after the
185706d55cc1Saliguori          * current instruction. */
185893afeadeSAndreas Färber         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
185906d55cc1Saliguori         return;
186006d55cc1Saliguori     }
186193afeadeSAndreas Färber     vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1862ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
186305068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, len)
186405068c0dSPeter Maydell             && (wp->flags & flags)) {
186508225676SPeter Maydell             if (flags == BP_MEM_READ) {
186608225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_READ;
186708225676SPeter Maydell             } else {
186808225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
186908225676SPeter Maydell             }
187008225676SPeter Maydell             wp->hitaddr = vaddr;
187166b9b43cSPeter Maydell             wp->hitattrs = attrs;
1872ff4700b0SAndreas Färber             if (!cpu->watchpoint_hit) {
1873ff4700b0SAndreas Färber                 cpu->watchpoint_hit = wp;
1874239c51a5SAndreas Färber                 tb_check_watchpoint(cpu);
187506d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
187627103424SAndreas Färber                     cpu->exception_index = EXCP_DEBUG;
18775638d180SAndreas Färber                     cpu_loop_exit(cpu);
187806d55cc1Saliguori                 } else {
187906d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1880648f034cSAndreas Färber                     tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
18810ea8cb88SAndreas Färber                     cpu_resume_from_signal(cpu, NULL);
18820f459d16Spbrook                 }
1883488d6577SMax Filippov             }
18846e140f28Saliguori         } else {
18856e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
18866e140f28Saliguori         }
18870f459d16Spbrook     }
18880f459d16Spbrook }
18890f459d16Spbrook 
18906658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
18916658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
18926658ffb8Spbrook    phys routines.  */
189366b9b43cSPeter Maydell static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
189466b9b43cSPeter Maydell                                   unsigned size, MemTxAttrs attrs)
18956658ffb8Spbrook {
189666b9b43cSPeter Maydell     MemTxResult res;
189766b9b43cSPeter Maydell     uint64_t data;
18986658ffb8Spbrook 
189966b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
19001ec9b909SAvi Kivity     switch (size) {
190167364150SMax Filippov     case 1:
190266b9b43cSPeter Maydell         data = address_space_ldub(&address_space_memory, addr, attrs, &res);
190367364150SMax Filippov         break;
190467364150SMax Filippov     case 2:
190566b9b43cSPeter Maydell         data = address_space_lduw(&address_space_memory, addr, attrs, &res);
190667364150SMax Filippov         break;
190767364150SMax Filippov     case 4:
190866b9b43cSPeter Maydell         data = address_space_ldl(&address_space_memory, addr, attrs, &res);
190967364150SMax Filippov         break;
19101ec9b909SAvi Kivity     default: abort();
19111ec9b909SAvi Kivity     }
191266b9b43cSPeter Maydell     *pdata = data;
191366b9b43cSPeter Maydell     return res;
191466b9b43cSPeter Maydell }
191566b9b43cSPeter Maydell 
191666b9b43cSPeter Maydell static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
191766b9b43cSPeter Maydell                                    uint64_t val, unsigned size,
191866b9b43cSPeter Maydell                                    MemTxAttrs attrs)
191966b9b43cSPeter Maydell {
192066b9b43cSPeter Maydell     MemTxResult res;
192166b9b43cSPeter Maydell 
192266b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
192366b9b43cSPeter Maydell     switch (size) {
192466b9b43cSPeter Maydell     case 1:
192566b9b43cSPeter Maydell         address_space_stb(&address_space_memory, addr, val, attrs, &res);
192666b9b43cSPeter Maydell         break;
192766b9b43cSPeter Maydell     case 2:
192866b9b43cSPeter Maydell         address_space_stw(&address_space_memory, addr, val, attrs, &res);
192966b9b43cSPeter Maydell         break;
193066b9b43cSPeter Maydell     case 4:
193166b9b43cSPeter Maydell         address_space_stl(&address_space_memory, addr, val, attrs, &res);
193266b9b43cSPeter Maydell         break;
193366b9b43cSPeter Maydell     default: abort();
193466b9b43cSPeter Maydell     }
193566b9b43cSPeter Maydell     return res;
19366658ffb8Spbrook }
19376658ffb8Spbrook 
19381ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
193966b9b43cSPeter Maydell     .read_with_attrs = watch_mem_read,
194066b9b43cSPeter Maydell     .write_with_attrs = watch_mem_write,
19411ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
19426658ffb8Spbrook };
19436658ffb8Spbrook 
1944f25a49e0SPeter Maydell static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1945f25a49e0SPeter Maydell                                 unsigned len, MemTxAttrs attrs)
1946db7b5426Sblueswir1 {
1947acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1948ff6cff75SPaolo Bonzini     uint8_t buf[8];
19495c9eb028SPeter Maydell     MemTxResult res;
1950791af8c8SPaolo Bonzini 
1951db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1952016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1953acc9d80bSJan Kiszka            subpage, len, addr);
1954db7b5426Sblueswir1 #endif
19555c9eb028SPeter Maydell     res = address_space_read(subpage->as, addr + subpage->base,
19565c9eb028SPeter Maydell                              attrs, buf, len);
19575c9eb028SPeter Maydell     if (res) {
19585c9eb028SPeter Maydell         return res;
1959f25a49e0SPeter Maydell     }
1960acc9d80bSJan Kiszka     switch (len) {
1961acc9d80bSJan Kiszka     case 1:
1962f25a49e0SPeter Maydell         *data = ldub_p(buf);
1963f25a49e0SPeter Maydell         return MEMTX_OK;
1964acc9d80bSJan Kiszka     case 2:
1965f25a49e0SPeter Maydell         *data = lduw_p(buf);
1966f25a49e0SPeter Maydell         return MEMTX_OK;
1967acc9d80bSJan Kiszka     case 4:
1968f25a49e0SPeter Maydell         *data = ldl_p(buf);
1969f25a49e0SPeter Maydell         return MEMTX_OK;
1970ff6cff75SPaolo Bonzini     case 8:
1971f25a49e0SPeter Maydell         *data = ldq_p(buf);
1972f25a49e0SPeter Maydell         return MEMTX_OK;
1973acc9d80bSJan Kiszka     default:
1974acc9d80bSJan Kiszka         abort();
1975acc9d80bSJan Kiszka     }
1976db7b5426Sblueswir1 }
1977db7b5426Sblueswir1 
1978f25a49e0SPeter Maydell static MemTxResult subpage_write(void *opaque, hwaddr addr,
1979f25a49e0SPeter Maydell                                  uint64_t value, unsigned len, MemTxAttrs attrs)
1980db7b5426Sblueswir1 {
1981acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1982ff6cff75SPaolo Bonzini     uint8_t buf[8];
1983acc9d80bSJan Kiszka 
1984db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1985016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1986acc9d80bSJan Kiszka            " value %"PRIx64"\n",
1987acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
1988db7b5426Sblueswir1 #endif
1989acc9d80bSJan Kiszka     switch (len) {
1990acc9d80bSJan Kiszka     case 1:
1991acc9d80bSJan Kiszka         stb_p(buf, value);
1992acc9d80bSJan Kiszka         break;
1993acc9d80bSJan Kiszka     case 2:
1994acc9d80bSJan Kiszka         stw_p(buf, value);
1995acc9d80bSJan Kiszka         break;
1996acc9d80bSJan Kiszka     case 4:
1997acc9d80bSJan Kiszka         stl_p(buf, value);
1998acc9d80bSJan Kiszka         break;
1999ff6cff75SPaolo Bonzini     case 8:
2000ff6cff75SPaolo Bonzini         stq_p(buf, value);
2001ff6cff75SPaolo Bonzini         break;
2002acc9d80bSJan Kiszka     default:
2003acc9d80bSJan Kiszka         abort();
2004acc9d80bSJan Kiszka     }
20055c9eb028SPeter Maydell     return address_space_write(subpage->as, addr + subpage->base,
20065c9eb028SPeter Maydell                                attrs, buf, len);
2007db7b5426Sblueswir1 }
2008db7b5426Sblueswir1 
2009c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
2010016e9d62SAmos Kong                             unsigned len, bool is_write)
2011c353e4ccSPaolo Bonzini {
2012acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2013c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
2014016e9d62SAmos Kong     printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2015acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
2016c353e4ccSPaolo Bonzini #endif
2017c353e4ccSPaolo Bonzini 
2018acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
2019016e9d62SAmos Kong                                       len, is_write);
2020c353e4ccSPaolo Bonzini }
2021c353e4ccSPaolo Bonzini 
202270c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
2023f25a49e0SPeter Maydell     .read_with_attrs = subpage_read,
2024f25a49e0SPeter Maydell     .write_with_attrs = subpage_write,
2025ff6cff75SPaolo Bonzini     .impl.min_access_size = 1,
2026ff6cff75SPaolo Bonzini     .impl.max_access_size = 8,
2027ff6cff75SPaolo Bonzini     .valid.min_access_size = 1,
2028ff6cff75SPaolo Bonzini     .valid.max_access_size = 8,
2029c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
203070c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
2031db7b5426Sblueswir1 };
2032db7b5426Sblueswir1 
2033c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
20345312bd8bSAvi Kivity                              uint16_t section)
2035db7b5426Sblueswir1 {
2036db7b5426Sblueswir1     int idx, eidx;
2037db7b5426Sblueswir1 
2038db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2039db7b5426Sblueswir1         return -1;
2040db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2041db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2042db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2043016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2044016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
2045db7b5426Sblueswir1 #endif
2046db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
20475312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
2048db7b5426Sblueswir1     }
2049db7b5426Sblueswir1 
2050db7b5426Sblueswir1     return 0;
2051db7b5426Sblueswir1 }
2052db7b5426Sblueswir1 
2053acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2054db7b5426Sblueswir1 {
2055c227f099SAnthony Liguori     subpage_t *mmio;
2056db7b5426Sblueswir1 
20577267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
20581eec614bSaliguori 
2059acc9d80bSJan Kiszka     mmio->as = as;
2060db7b5426Sblueswir1     mmio->base = base;
20612c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2062b4fefef9SPeter Crosthwaite                           NULL, TARGET_PAGE_SIZE);
2063b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
2064db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2065016e9d62SAmos Kong     printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2066016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
2067db7b5426Sblueswir1 #endif
2068b41aac4fSLiu Ping Fan     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2069db7b5426Sblueswir1 
2070db7b5426Sblueswir1     return mmio;
2071db7b5426Sblueswir1 }
2072db7b5426Sblueswir1 
2073a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2074a656e22fSPeter Crosthwaite                               MemoryRegion *mr)
20755312bd8bSAvi Kivity {
2076a656e22fSPeter Crosthwaite     assert(as);
20775312bd8bSAvi Kivity     MemoryRegionSection section = {
2078a656e22fSPeter Crosthwaite         .address_space = as,
20795312bd8bSAvi Kivity         .mr = mr,
20805312bd8bSAvi Kivity         .offset_within_address_space = 0,
20815312bd8bSAvi Kivity         .offset_within_region = 0,
2082052e87b0SPaolo Bonzini         .size = int128_2_64(),
20835312bd8bSAvi Kivity     };
20845312bd8bSAvi Kivity 
208553cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
20865312bd8bSAvi Kivity }
20875312bd8bSAvi Kivity 
20889d82b5a7SPaolo Bonzini MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
2089aa102231SAvi Kivity {
209079e2b9aeSPaolo Bonzini     AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
209179e2b9aeSPaolo Bonzini     MemoryRegionSection *sections = d->map.sections;
20929d82b5a7SPaolo Bonzini 
20939d82b5a7SPaolo Bonzini     return sections[index & ~TARGET_PAGE_MASK].mr;
2094aa102231SAvi Kivity }
2095aa102231SAvi Kivity 
2096e9179ce1SAvi Kivity static void io_mem_init(void)
2097e9179ce1SAvi Kivity {
20981f6245e5SPaolo Bonzini     memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
20992c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
21001f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
21012c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
21021f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
21032c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
21041f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
2105e9179ce1SAvi Kivity }
2106e9179ce1SAvi Kivity 
2107ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
2108ac1970fbSAvi Kivity {
210989ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
211053cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
211153cb28cbSMarcel Apfelbaum     uint16_t n;
211253cb28cbSMarcel Apfelbaum 
2113a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_unassigned);
211453cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
2115a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_notdirty);
211653cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_NOTDIRTY);
2117a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_rom);
211853cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_ROM);
2119a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_watch);
212053cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_WATCH);
212100752703SPaolo Bonzini 
21229736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
212300752703SPaolo Bonzini     d->as = as;
212400752703SPaolo Bonzini     as->next_dispatch = d;
212500752703SPaolo Bonzini }
212600752703SPaolo Bonzini 
212779e2b9aeSPaolo Bonzini static void address_space_dispatch_free(AddressSpaceDispatch *d)
212879e2b9aeSPaolo Bonzini {
212979e2b9aeSPaolo Bonzini     phys_sections_free(&d->map);
213079e2b9aeSPaolo Bonzini     g_free(d);
213179e2b9aeSPaolo Bonzini }
213279e2b9aeSPaolo Bonzini 
213300752703SPaolo Bonzini static void mem_commit(MemoryListener *listener)
213400752703SPaolo Bonzini {
213500752703SPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
21360475d94fSPaolo Bonzini     AddressSpaceDispatch *cur = as->dispatch;
21370475d94fSPaolo Bonzini     AddressSpaceDispatch *next = as->next_dispatch;
2138ac1970fbSAvi Kivity 
213953cb28cbSMarcel Apfelbaum     phys_page_compact_all(next, next->map.nodes_nb);
2140b35ba30fSMichael S. Tsirkin 
214179e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, next);
214253cb28cbSMarcel Apfelbaum     if (cur) {
214379e2b9aeSPaolo Bonzini         call_rcu(cur, address_space_dispatch_free, rcu);
2144ac1970fbSAvi Kivity     }
21459affd6fcSPaolo Bonzini }
21469affd6fcSPaolo Bonzini 
21471d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
214850c1e149SAvi Kivity {
2149182735efSAndreas Färber     CPUState *cpu;
2150117712c3SAvi Kivity 
2151117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
2152117712c3SAvi Kivity        reset the modified entries */
2153117712c3SAvi Kivity     /* XXX: slow ! */
2154bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
215533bde2e1SEdgar E. Iglesias         /* FIXME: Disentangle the cpu.h circular files deps so we can
215633bde2e1SEdgar E. Iglesias            directly get the right CPU from listener.  */
215733bde2e1SEdgar E. Iglesias         if (cpu->tcg_as_listener != listener) {
215833bde2e1SEdgar E. Iglesias             continue;
215933bde2e1SEdgar E. Iglesias         }
216076e5c76fSPaolo Bonzini         cpu_reload_memory_map(cpu);
2161117712c3SAvi Kivity     }
216250c1e149SAvi Kivity }
216350c1e149SAvi Kivity 
216493632747SAvi Kivity static void core_log_global_start(MemoryListener *listener)
216593632747SAvi Kivity {
2166981fdf23SJuan Quintela     cpu_physical_memory_set_dirty_tracking(true);
216793632747SAvi Kivity }
216893632747SAvi Kivity 
216993632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener)
217093632747SAvi Kivity {
2171981fdf23SJuan Quintela     cpu_physical_memory_set_dirty_tracking(false);
217293632747SAvi Kivity }
217393632747SAvi Kivity 
217493632747SAvi Kivity static MemoryListener core_memory_listener = {
217593632747SAvi Kivity     .log_global_start = core_log_global_start,
217693632747SAvi Kivity     .log_global_stop = core_log_global_stop,
2177ac1970fbSAvi Kivity     .priority = 1,
217893632747SAvi Kivity };
217993632747SAvi Kivity 
2180ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
2181ac1970fbSAvi Kivity {
218200752703SPaolo Bonzini     as->dispatch = NULL;
218389ae337aSPaolo Bonzini     as->dispatch_listener = (MemoryListener) {
2184ac1970fbSAvi Kivity         .begin = mem_begin,
218500752703SPaolo Bonzini         .commit = mem_commit,
2186ac1970fbSAvi Kivity         .region_add = mem_add,
2187ac1970fbSAvi Kivity         .region_nop = mem_add,
2188ac1970fbSAvi Kivity         .priority = 0,
2189ac1970fbSAvi Kivity     };
219089ae337aSPaolo Bonzini     memory_listener_register(&as->dispatch_listener, as);
2191ac1970fbSAvi Kivity }
2192ac1970fbSAvi Kivity 
21936e48e8f9SPaolo Bonzini void address_space_unregister(AddressSpace *as)
21946e48e8f9SPaolo Bonzini {
21956e48e8f9SPaolo Bonzini     memory_listener_unregister(&as->dispatch_listener);
21966e48e8f9SPaolo Bonzini }
21976e48e8f9SPaolo Bonzini 
219883f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
219983f3c251SAvi Kivity {
220083f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
220183f3c251SAvi Kivity 
220279e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, NULL);
220379e2b9aeSPaolo Bonzini     if (d) {
220479e2b9aeSPaolo Bonzini         call_rcu(d, address_space_dispatch_free, rcu);
220579e2b9aeSPaolo Bonzini     }
220683f3c251SAvi Kivity }
220783f3c251SAvi Kivity 
220862152b8aSAvi Kivity static void memory_map_init(void)
220962152b8aSAvi Kivity {
22107267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
221103f49957SPaolo Bonzini 
221257271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
22137dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
2214309cb471SAvi Kivity 
22157267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
22163bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
22173bb28b72SJan Kiszka                           65536);
22187dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
221993632747SAvi Kivity 
2220f6790af6SAvi Kivity     memory_listener_register(&core_memory_listener, &address_space_memory);
22212641689aSliguang }
222262152b8aSAvi Kivity 
222362152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
222462152b8aSAvi Kivity {
222562152b8aSAvi Kivity     return system_memory;
222662152b8aSAvi Kivity }
222762152b8aSAvi Kivity 
2228309cb471SAvi Kivity MemoryRegion *get_system_io(void)
2229309cb471SAvi Kivity {
2230309cb471SAvi Kivity     return system_io;
2231309cb471SAvi Kivity }
2232309cb471SAvi Kivity 
2233e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2234e2eef170Spbrook 
223513eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
223613eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
2237f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2238a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
223913eb76e0Sbellard {
224013eb76e0Sbellard     int l, flags;
224113eb76e0Sbellard     target_ulong page;
224253a5960aSpbrook     void * p;
224313eb76e0Sbellard 
224413eb76e0Sbellard     while (len > 0) {
224513eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
224613eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
224713eb76e0Sbellard         if (l > len)
224813eb76e0Sbellard             l = len;
224913eb76e0Sbellard         flags = page_get_flags(page);
225013eb76e0Sbellard         if (!(flags & PAGE_VALID))
2251a68fe89cSPaul Brook             return -1;
225213eb76e0Sbellard         if (is_write) {
225313eb76e0Sbellard             if (!(flags & PAGE_WRITE))
2254a68fe89cSPaul Brook                 return -1;
2255579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
225672fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2257a68fe89cSPaul Brook                 return -1;
225872fb7daaSaurel32             memcpy(p, buf, l);
225972fb7daaSaurel32             unlock_user(p, addr, l);
226013eb76e0Sbellard         } else {
226113eb76e0Sbellard             if (!(flags & PAGE_READ))
2262a68fe89cSPaul Brook                 return -1;
2263579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
226472fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2265a68fe89cSPaul Brook                 return -1;
226672fb7daaSaurel32             memcpy(buf, p, l);
22675b257578Saurel32             unlock_user(p, addr, 0);
226813eb76e0Sbellard         }
226913eb76e0Sbellard         len -= l;
227013eb76e0Sbellard         buf += l;
227113eb76e0Sbellard         addr += l;
227213eb76e0Sbellard     }
2273a68fe89cSPaul Brook     return 0;
227413eb76e0Sbellard }
22758df1cd07Sbellard 
227613eb76e0Sbellard #else
227751d7a9ebSAnthony PERARD 
2278a8170e5eSAvi Kivity static void invalidate_and_set_dirty(hwaddr addr,
2279a8170e5eSAvi Kivity                                      hwaddr length)
228051d7a9ebSAnthony PERARD {
2281f874bf90SPeter Maydell     if (cpu_physical_memory_range_includes_clean(addr, length)) {
2282f874bf90SPeter Maydell         tb_invalidate_phys_range(addr, addr + length, 0);
22836886867eSPaolo Bonzini         cpu_physical_memory_set_dirty_range_nocode(addr, length);
228449dfcec4SPaolo Bonzini     } else {
2285e226939dSAnthony PERARD         xen_modified_memory(addr, length);
228651d7a9ebSAnthony PERARD     }
228749dfcec4SPaolo Bonzini }
228851d7a9ebSAnthony PERARD 
228923326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
229082f2563fSPaolo Bonzini {
2291e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
229223326164SRichard Henderson 
229323326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
229423326164SRichard Henderson        otherwise specified.  */
229523326164SRichard Henderson     if (access_size_max == 0) {
229623326164SRichard Henderson         access_size_max = 4;
229782f2563fSPaolo Bonzini     }
229823326164SRichard Henderson 
229923326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
230023326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
230123326164SRichard Henderson         unsigned align_size_max = addr & -addr;
230223326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
230323326164SRichard Henderson             access_size_max = align_size_max;
230423326164SRichard Henderson         }
230523326164SRichard Henderson     }
230623326164SRichard Henderson 
230723326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
230823326164SRichard Henderson     if (l > access_size_max) {
230923326164SRichard Henderson         l = access_size_max;
231023326164SRichard Henderson     }
2311098178f2SPaolo Bonzini     if (l & (l - 1)) {
2312098178f2SPaolo Bonzini         l = 1 << (qemu_fls(l) - 1);
2313098178f2SPaolo Bonzini     }
231423326164SRichard Henderson 
231523326164SRichard Henderson     return l;
231682f2563fSPaolo Bonzini }
231782f2563fSPaolo Bonzini 
23185c9eb028SPeter Maydell MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
23195c9eb028SPeter Maydell                              uint8_t *buf, int len, bool is_write)
232013eb76e0Sbellard {
2321149f54b5SPaolo Bonzini     hwaddr l;
232213eb76e0Sbellard     uint8_t *ptr;
2323791af8c8SPaolo Bonzini     uint64_t val;
2324149f54b5SPaolo Bonzini     hwaddr addr1;
23255c8a00ceSPaolo Bonzini     MemoryRegion *mr;
23263b643495SPeter Maydell     MemTxResult result = MEMTX_OK;
232713eb76e0Sbellard 
232841063e1eSPaolo Bonzini     rcu_read_lock();
232913eb76e0Sbellard     while (len > 0) {
233013eb76e0Sbellard         l = len;
23315c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, is_write);
233213eb76e0Sbellard 
233313eb76e0Sbellard         if (is_write) {
23345c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
23355c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
23364917cf44SAndreas Färber                 /* XXX: could force current_cpu to NULL to avoid
23376a00d601Sbellard                    potential bugs */
233823326164SRichard Henderson                 switch (l) {
233923326164SRichard Henderson                 case 8:
234023326164SRichard Henderson                     /* 64 bit write access */
234123326164SRichard Henderson                     val = ldq_p(buf);
23423b643495SPeter Maydell                     result |= memory_region_dispatch_write(mr, addr1, val, 8,
23433b643495SPeter Maydell                                                            attrs);
234423326164SRichard Henderson                     break;
234523326164SRichard Henderson                 case 4:
23461c213d19Sbellard                     /* 32 bit write access */
2347c27004ecSbellard                     val = ldl_p(buf);
23483b643495SPeter Maydell                     result |= memory_region_dispatch_write(mr, addr1, val, 4,
23493b643495SPeter Maydell                                                            attrs);
235023326164SRichard Henderson                     break;
235123326164SRichard Henderson                 case 2:
23521c213d19Sbellard                     /* 16 bit write access */
2353c27004ecSbellard                     val = lduw_p(buf);
23543b643495SPeter Maydell                     result |= memory_region_dispatch_write(mr, addr1, val, 2,
23553b643495SPeter Maydell                                                            attrs);
235623326164SRichard Henderson                     break;
235723326164SRichard Henderson                 case 1:
23581c213d19Sbellard                     /* 8 bit write access */
2359c27004ecSbellard                     val = ldub_p(buf);
23603b643495SPeter Maydell                     result |= memory_region_dispatch_write(mr, addr1, val, 1,
23613b643495SPeter Maydell                                                            attrs);
236223326164SRichard Henderson                     break;
236323326164SRichard Henderson                 default:
236423326164SRichard Henderson                     abort();
236513eb76e0Sbellard                 }
23662bbfa05dSPaolo Bonzini             } else {
23675c8a00ceSPaolo Bonzini                 addr1 += memory_region_get_ram_addr(mr);
236813eb76e0Sbellard                 /* RAM case */
23695579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
237013eb76e0Sbellard                 memcpy(ptr, buf, l);
237151d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
23723a7d929eSbellard             }
237313eb76e0Sbellard         } else {
23745c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
237513eb76e0Sbellard                 /* I/O case */
23765c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
237723326164SRichard Henderson                 switch (l) {
237823326164SRichard Henderson                 case 8:
237923326164SRichard Henderson                     /* 64 bit read access */
23803b643495SPeter Maydell                     result |= memory_region_dispatch_read(mr, addr1, &val, 8,
23813b643495SPeter Maydell                                                           attrs);
238223326164SRichard Henderson                     stq_p(buf, val);
238323326164SRichard Henderson                     break;
238423326164SRichard Henderson                 case 4:
238513eb76e0Sbellard                     /* 32 bit read access */
23863b643495SPeter Maydell                     result |= memory_region_dispatch_read(mr, addr1, &val, 4,
23873b643495SPeter Maydell                                                           attrs);
2388c27004ecSbellard                     stl_p(buf, val);
238923326164SRichard Henderson                     break;
239023326164SRichard Henderson                 case 2:
239113eb76e0Sbellard                     /* 16 bit read access */
23923b643495SPeter Maydell                     result |= memory_region_dispatch_read(mr, addr1, &val, 2,
23933b643495SPeter Maydell                                                           attrs);
2394c27004ecSbellard                     stw_p(buf, val);
239523326164SRichard Henderson                     break;
239623326164SRichard Henderson                 case 1:
23971c213d19Sbellard                     /* 8 bit read access */
23983b643495SPeter Maydell                     result |= memory_region_dispatch_read(mr, addr1, &val, 1,
23993b643495SPeter Maydell                                                           attrs);
2400c27004ecSbellard                     stb_p(buf, val);
240123326164SRichard Henderson                     break;
240223326164SRichard Henderson                 default:
240323326164SRichard Henderson                     abort();
240413eb76e0Sbellard                 }
240513eb76e0Sbellard             } else {
240613eb76e0Sbellard                 /* RAM case */
24075c8a00ceSPaolo Bonzini                 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2408f3705d53SAvi Kivity                 memcpy(buf, ptr, l);
240913eb76e0Sbellard             }
241013eb76e0Sbellard         }
241113eb76e0Sbellard         len -= l;
241213eb76e0Sbellard         buf += l;
241313eb76e0Sbellard         addr += l;
241413eb76e0Sbellard     }
241541063e1eSPaolo Bonzini     rcu_read_unlock();
2416fd8aaa76SPaolo Bonzini 
24173b643495SPeter Maydell     return result;
241813eb76e0Sbellard }
24198df1cd07Sbellard 
24205c9eb028SPeter Maydell MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2421ac1970fbSAvi Kivity                                 const uint8_t *buf, int len)
2422ac1970fbSAvi Kivity {
24235c9eb028SPeter Maydell     return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
2424ac1970fbSAvi Kivity }
2425ac1970fbSAvi Kivity 
24265c9eb028SPeter Maydell MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
24275c9eb028SPeter Maydell                                uint8_t *buf, int len)
2428ac1970fbSAvi Kivity {
24295c9eb028SPeter Maydell     return address_space_rw(as, addr, attrs, buf, len, false);
2430ac1970fbSAvi Kivity }
2431ac1970fbSAvi Kivity 
2432ac1970fbSAvi Kivity 
2433a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2434ac1970fbSAvi Kivity                             int len, int is_write)
2435ac1970fbSAvi Kivity {
24365c9eb028SPeter Maydell     address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
24375c9eb028SPeter Maydell                      buf, len, is_write);
2438ac1970fbSAvi Kivity }
2439ac1970fbSAvi Kivity 
2440582b55a9SAlexander Graf enum write_rom_type {
2441582b55a9SAlexander Graf     WRITE_DATA,
2442582b55a9SAlexander Graf     FLUSH_CACHE,
2443582b55a9SAlexander Graf };
2444582b55a9SAlexander Graf 
24452a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2446582b55a9SAlexander Graf     hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2447d0ecd2aaSbellard {
2448149f54b5SPaolo Bonzini     hwaddr l;
2449d0ecd2aaSbellard     uint8_t *ptr;
2450149f54b5SPaolo Bonzini     hwaddr addr1;
24515c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2452d0ecd2aaSbellard 
245341063e1eSPaolo Bonzini     rcu_read_lock();
2454d0ecd2aaSbellard     while (len > 0) {
2455d0ecd2aaSbellard         l = len;
24562a221651SEdgar E. Iglesias         mr = address_space_translate(as, addr, &addr1, &l, true);
2457d0ecd2aaSbellard 
24585c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
24595c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2460d0ecd2aaSbellard             /* do nothing */
2461d0ecd2aaSbellard         } else {
24625c8a00ceSPaolo Bonzini             addr1 += memory_region_get_ram_addr(mr);
2463d0ecd2aaSbellard             /* ROM/RAM case */
24645579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
2465582b55a9SAlexander Graf             switch (type) {
2466582b55a9SAlexander Graf             case WRITE_DATA:
2467d0ecd2aaSbellard                 memcpy(ptr, buf, l);
246851d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
2469582b55a9SAlexander Graf                 break;
2470582b55a9SAlexander Graf             case FLUSH_CACHE:
2471582b55a9SAlexander Graf                 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2472582b55a9SAlexander Graf                 break;
2473582b55a9SAlexander Graf             }
2474d0ecd2aaSbellard         }
2475d0ecd2aaSbellard         len -= l;
2476d0ecd2aaSbellard         buf += l;
2477d0ecd2aaSbellard         addr += l;
2478d0ecd2aaSbellard     }
247941063e1eSPaolo Bonzini     rcu_read_unlock();
2480d0ecd2aaSbellard }
2481d0ecd2aaSbellard 
2482582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
24832a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2484582b55a9SAlexander Graf                                    const uint8_t *buf, int len)
2485582b55a9SAlexander Graf {
24862a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2487582b55a9SAlexander Graf }
2488582b55a9SAlexander Graf 
2489582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len)
2490582b55a9SAlexander Graf {
2491582b55a9SAlexander Graf     /*
2492582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
2493582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
2494582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
2495582b55a9SAlexander Graf      * the host's instruction cache at least.
2496582b55a9SAlexander Graf      */
2497582b55a9SAlexander Graf     if (tcg_enabled()) {
2498582b55a9SAlexander Graf         return;
2499582b55a9SAlexander Graf     }
2500582b55a9SAlexander Graf 
25012a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(&address_space_memory,
25022a221651SEdgar E. Iglesias                                            start, NULL, len, FLUSH_CACHE);
2503582b55a9SAlexander Graf }
2504582b55a9SAlexander Graf 
25056d16c2f8Saliguori typedef struct {
2506d3e71559SPaolo Bonzini     MemoryRegion *mr;
25076d16c2f8Saliguori     void *buffer;
2508a8170e5eSAvi Kivity     hwaddr addr;
2509a8170e5eSAvi Kivity     hwaddr len;
2510c2cba0ffSFam Zheng     bool in_use;
25116d16c2f8Saliguori } BounceBuffer;
25126d16c2f8Saliguori 
25136d16c2f8Saliguori static BounceBuffer bounce;
25146d16c2f8Saliguori 
2515ba223c29Saliguori typedef struct MapClient {
2516e95205e1SFam Zheng     QEMUBH *bh;
251772cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2518ba223c29Saliguori } MapClient;
2519ba223c29Saliguori 
252038e047b5SFam Zheng QemuMutex map_client_list_lock;
252172cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
252272cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2523ba223c29Saliguori 
2524e95205e1SFam Zheng static void cpu_unregister_map_client_do(MapClient *client)
2525ba223c29Saliguori {
252672cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
25277267c094SAnthony Liguori     g_free(client);
2528ba223c29Saliguori }
2529ba223c29Saliguori 
253033b6c2edSFam Zheng static void cpu_notify_map_clients_locked(void)
2531ba223c29Saliguori {
2532ba223c29Saliguori     MapClient *client;
2533ba223c29Saliguori 
253472cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
253572cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2536e95205e1SFam Zheng         qemu_bh_schedule(client->bh);
2537e95205e1SFam Zheng         cpu_unregister_map_client_do(client);
2538ba223c29Saliguori     }
2539ba223c29Saliguori }
2540ba223c29Saliguori 
2541e95205e1SFam Zheng void cpu_register_map_client(QEMUBH *bh)
2542d0ecd2aaSbellard {
2543d0ecd2aaSbellard     MapClient *client = g_malloc(sizeof(*client));
2544d0ecd2aaSbellard 
254538e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2546e95205e1SFam Zheng     client->bh = bh;
2547d0ecd2aaSbellard     QLIST_INSERT_HEAD(&map_client_list, client, link);
254833b6c2edSFam Zheng     if (!atomic_read(&bounce.in_use)) {
254933b6c2edSFam Zheng         cpu_notify_map_clients_locked();
255033b6c2edSFam Zheng     }
255138e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2552d0ecd2aaSbellard }
2553d0ecd2aaSbellard 
255438e047b5SFam Zheng void cpu_exec_init_all(void)
255538e047b5SFam Zheng {
255638e047b5SFam Zheng     qemu_mutex_init(&ram_list.mutex);
255738e047b5SFam Zheng     memory_map_init();
255838e047b5SFam Zheng     io_mem_init();
255938e047b5SFam Zheng     qemu_mutex_init(&map_client_list_lock);
256038e047b5SFam Zheng }
256138e047b5SFam Zheng 
2562e95205e1SFam Zheng void cpu_unregister_map_client(QEMUBH *bh)
2563d0ecd2aaSbellard {
2564e95205e1SFam Zheng     MapClient *client;
2565d0ecd2aaSbellard 
2566e95205e1SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2567e95205e1SFam Zheng     QLIST_FOREACH(client, &map_client_list, link) {
2568e95205e1SFam Zheng         if (client->bh == bh) {
2569e95205e1SFam Zheng             cpu_unregister_map_client_do(client);
2570e95205e1SFam Zheng             break;
2571e95205e1SFam Zheng         }
2572e95205e1SFam Zheng     }
2573e95205e1SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2574d0ecd2aaSbellard }
2575d0ecd2aaSbellard 
2576d0ecd2aaSbellard static void cpu_notify_map_clients(void)
2577d0ecd2aaSbellard {
257838e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
257933b6c2edSFam Zheng     cpu_notify_map_clients_locked();
258038e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
25816d16c2f8Saliguori }
25826d16c2f8Saliguori 
258351644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
258451644ab7SPaolo Bonzini {
25855c8a00ceSPaolo Bonzini     MemoryRegion *mr;
258651644ab7SPaolo Bonzini     hwaddr l, xlat;
258751644ab7SPaolo Bonzini 
258841063e1eSPaolo Bonzini     rcu_read_lock();
258951644ab7SPaolo Bonzini     while (len > 0) {
259051644ab7SPaolo Bonzini         l = len;
25915c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
25925c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
25935c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
25945c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
259551644ab7SPaolo Bonzini                 return false;
259651644ab7SPaolo Bonzini             }
259751644ab7SPaolo Bonzini         }
259851644ab7SPaolo Bonzini 
259951644ab7SPaolo Bonzini         len -= l;
260051644ab7SPaolo Bonzini         addr += l;
260151644ab7SPaolo Bonzini     }
260241063e1eSPaolo Bonzini     rcu_read_unlock();
260351644ab7SPaolo Bonzini     return true;
260451644ab7SPaolo Bonzini }
260551644ab7SPaolo Bonzini 
26066d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
26076d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
26086d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
26096d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2610ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2611ba223c29Saliguori  * likely to succeed.
26126d16c2f8Saliguori  */
2613ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2614a8170e5eSAvi Kivity                         hwaddr addr,
2615a8170e5eSAvi Kivity                         hwaddr *plen,
2616ac1970fbSAvi Kivity                         bool is_write)
26176d16c2f8Saliguori {
2618a8170e5eSAvi Kivity     hwaddr len = *plen;
2619e3127ae0SPaolo Bonzini     hwaddr done = 0;
2620e3127ae0SPaolo Bonzini     hwaddr l, xlat, base;
2621e3127ae0SPaolo Bonzini     MemoryRegion *mr, *this_mr;
2622e3127ae0SPaolo Bonzini     ram_addr_t raddr;
26236d16c2f8Saliguori 
2624e3127ae0SPaolo Bonzini     if (len == 0) {
2625e3127ae0SPaolo Bonzini         return NULL;
2626e3127ae0SPaolo Bonzini     }
2627e3127ae0SPaolo Bonzini 
26286d16c2f8Saliguori     l = len;
262941063e1eSPaolo Bonzini     rcu_read_lock();
26305c8a00ceSPaolo Bonzini     mr = address_space_translate(as, addr, &xlat, &l, is_write);
263141063e1eSPaolo Bonzini 
26325c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
2633c2cba0ffSFam Zheng         if (atomic_xchg(&bounce.in_use, true)) {
263441063e1eSPaolo Bonzini             rcu_read_unlock();
2635e3127ae0SPaolo Bonzini             return NULL;
26366d16c2f8Saliguori         }
2637e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
2638e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
2639e85d9db5SKevin Wolf         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
26406d16c2f8Saliguori         bounce.addr = addr;
26416d16c2f8Saliguori         bounce.len = l;
2642d3e71559SPaolo Bonzini 
2643d3e71559SPaolo Bonzini         memory_region_ref(mr);
2644d3e71559SPaolo Bonzini         bounce.mr = mr;
26456d16c2f8Saliguori         if (!is_write) {
26465c9eb028SPeter Maydell             address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
26475c9eb028SPeter Maydell                                bounce.buffer, l);
26486d16c2f8Saliguori         }
264938bee5dcSStefano Stabellini 
265041063e1eSPaolo Bonzini         rcu_read_unlock();
265138bee5dcSStefano Stabellini         *plen = l;
265238bee5dcSStefano Stabellini         return bounce.buffer;
26536d16c2f8Saliguori     }
2654e3127ae0SPaolo Bonzini 
2655e3127ae0SPaolo Bonzini     base = xlat;
2656e3127ae0SPaolo Bonzini     raddr = memory_region_get_ram_addr(mr);
2657e3127ae0SPaolo Bonzini 
2658e3127ae0SPaolo Bonzini     for (;;) {
2659e3127ae0SPaolo Bonzini         len -= l;
2660e3127ae0SPaolo Bonzini         addr += l;
2661e3127ae0SPaolo Bonzini         done += l;
2662e3127ae0SPaolo Bonzini         if (len == 0) {
2663e3127ae0SPaolo Bonzini             break;
2664e3127ae0SPaolo Bonzini         }
2665e3127ae0SPaolo Bonzini 
2666e3127ae0SPaolo Bonzini         l = len;
2667e3127ae0SPaolo Bonzini         this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2668e3127ae0SPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
2669149f54b5SPaolo Bonzini             break;
2670149f54b5SPaolo Bonzini         }
26718ab934f9SStefano Stabellini     }
26726d16c2f8Saliguori 
2673d3e71559SPaolo Bonzini     memory_region_ref(mr);
267441063e1eSPaolo Bonzini     rcu_read_unlock();
2675e3127ae0SPaolo Bonzini     *plen = done;
2676e3127ae0SPaolo Bonzini     return qemu_ram_ptr_length(raddr + base, plen);
26776d16c2f8Saliguori }
26786d16c2f8Saliguori 
2679ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
26806d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
26816d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
26826d16c2f8Saliguori  */
2683a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2684a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
26856d16c2f8Saliguori {
26866d16c2f8Saliguori     if (buffer != bounce.buffer) {
2687d3e71559SPaolo Bonzini         MemoryRegion *mr;
26887443b437SPaolo Bonzini         ram_addr_t addr1;
2689d3e71559SPaolo Bonzini 
2690d3e71559SPaolo Bonzini         mr = qemu_ram_addr_from_host(buffer, &addr1);
26911b5ec234SPaolo Bonzini         assert(mr != NULL);
2692d3e71559SPaolo Bonzini         if (is_write) {
26936886867eSPaolo Bonzini             invalidate_and_set_dirty(addr1, access_len);
26946d16c2f8Saliguori         }
2695868bb33fSJan Kiszka         if (xen_enabled()) {
2696e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
2697050a0ddfSAnthony PERARD         }
2698d3e71559SPaolo Bonzini         memory_region_unref(mr);
26996d16c2f8Saliguori         return;
27006d16c2f8Saliguori     }
27016d16c2f8Saliguori     if (is_write) {
27025c9eb028SPeter Maydell         address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
27035c9eb028SPeter Maydell                             bounce.buffer, access_len);
27046d16c2f8Saliguori     }
2705f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
27066d16c2f8Saliguori     bounce.buffer = NULL;
2707d3e71559SPaolo Bonzini     memory_region_unref(bounce.mr);
2708c2cba0ffSFam Zheng     atomic_mb_set(&bounce.in_use, false);
2709ba223c29Saliguori     cpu_notify_map_clients();
27106d16c2f8Saliguori }
2711d0ecd2aaSbellard 
2712a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
2713a8170e5eSAvi Kivity                               hwaddr *plen,
2714ac1970fbSAvi Kivity                               int is_write)
2715ac1970fbSAvi Kivity {
2716ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
2717ac1970fbSAvi Kivity }
2718ac1970fbSAvi Kivity 
2719a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2720a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
2721ac1970fbSAvi Kivity {
2722ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2723ac1970fbSAvi Kivity }
2724ac1970fbSAvi Kivity 
27258df1cd07Sbellard /* warning: addr must be aligned */
272650013115SPeter Maydell static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
272750013115SPeter Maydell                                                   MemTxAttrs attrs,
272850013115SPeter Maydell                                                   MemTxResult *result,
27291e78bcc1SAlexander Graf                                                   enum device_endian endian)
27308df1cd07Sbellard {
27318df1cd07Sbellard     uint8_t *ptr;
2732791af8c8SPaolo Bonzini     uint64_t val;
27335c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2734149f54b5SPaolo Bonzini     hwaddr l = 4;
2735149f54b5SPaolo Bonzini     hwaddr addr1;
273650013115SPeter Maydell     MemTxResult r;
27378df1cd07Sbellard 
273841063e1eSPaolo Bonzini     rcu_read_lock();
2739fdfba1a2SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, false);
27405c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
27418df1cd07Sbellard         /* I/O case */
274250013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
27431e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
27441e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
27451e78bcc1SAlexander Graf             val = bswap32(val);
27461e78bcc1SAlexander Graf         }
27471e78bcc1SAlexander Graf #else
27481e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
27491e78bcc1SAlexander Graf             val = bswap32(val);
27501e78bcc1SAlexander Graf         }
27511e78bcc1SAlexander Graf #endif
27528df1cd07Sbellard     } else {
27538df1cd07Sbellard         /* RAM case */
27545c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
275506ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2756149f54b5SPaolo Bonzini                                + addr1);
27571e78bcc1SAlexander Graf         switch (endian) {
27581e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
27591e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
27601e78bcc1SAlexander Graf             break;
27611e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
27621e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
27631e78bcc1SAlexander Graf             break;
27641e78bcc1SAlexander Graf         default:
27658df1cd07Sbellard             val = ldl_p(ptr);
27661e78bcc1SAlexander Graf             break;
27671e78bcc1SAlexander Graf         }
276850013115SPeter Maydell         r = MEMTX_OK;
276950013115SPeter Maydell     }
277050013115SPeter Maydell     if (result) {
277150013115SPeter Maydell         *result = r;
27728df1cd07Sbellard     }
277341063e1eSPaolo Bonzini     rcu_read_unlock();
27748df1cd07Sbellard     return val;
27758df1cd07Sbellard }
27768df1cd07Sbellard 
277750013115SPeter Maydell uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
277850013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
277950013115SPeter Maydell {
278050013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
278150013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
278250013115SPeter Maydell }
278350013115SPeter Maydell 
278450013115SPeter Maydell uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
278550013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
278650013115SPeter Maydell {
278750013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
278850013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
278950013115SPeter Maydell }
279050013115SPeter Maydell 
279150013115SPeter Maydell uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
279250013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
279350013115SPeter Maydell {
279450013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
279550013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
279650013115SPeter Maydell }
279750013115SPeter Maydell 
2798fdfba1a2SEdgar E. Iglesias uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
27991e78bcc1SAlexander Graf {
280050013115SPeter Maydell     return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
28011e78bcc1SAlexander Graf }
28021e78bcc1SAlexander Graf 
2803fdfba1a2SEdgar E. Iglesias uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
28041e78bcc1SAlexander Graf {
280550013115SPeter Maydell     return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
28061e78bcc1SAlexander Graf }
28071e78bcc1SAlexander Graf 
2808fdfba1a2SEdgar E. Iglesias uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
28091e78bcc1SAlexander Graf {
281050013115SPeter Maydell     return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
28111e78bcc1SAlexander Graf }
28121e78bcc1SAlexander Graf 
281384b7b8e7Sbellard /* warning: addr must be aligned */
281450013115SPeter Maydell static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
281550013115SPeter Maydell                                                   MemTxAttrs attrs,
281650013115SPeter Maydell                                                   MemTxResult *result,
28171e78bcc1SAlexander Graf                                                   enum device_endian endian)
281884b7b8e7Sbellard {
281984b7b8e7Sbellard     uint8_t *ptr;
282084b7b8e7Sbellard     uint64_t val;
28215c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2822149f54b5SPaolo Bonzini     hwaddr l = 8;
2823149f54b5SPaolo Bonzini     hwaddr addr1;
282450013115SPeter Maydell     MemTxResult r;
282584b7b8e7Sbellard 
282641063e1eSPaolo Bonzini     rcu_read_lock();
28272c17449bSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2828149f54b5SPaolo Bonzini                                  false);
28295c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
283084b7b8e7Sbellard         /* I/O case */
283150013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
2832968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
2833968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
2834968a5627SPaolo Bonzini             val = bswap64(val);
2835968a5627SPaolo Bonzini         }
2836968a5627SPaolo Bonzini #else
2837968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
2838968a5627SPaolo Bonzini             val = bswap64(val);
2839968a5627SPaolo Bonzini         }
2840968a5627SPaolo Bonzini #endif
284184b7b8e7Sbellard     } else {
284284b7b8e7Sbellard         /* RAM case */
28435c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
284406ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2845149f54b5SPaolo Bonzini                                + addr1);
28461e78bcc1SAlexander Graf         switch (endian) {
28471e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
28481e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
28491e78bcc1SAlexander Graf             break;
28501e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
28511e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
28521e78bcc1SAlexander Graf             break;
28531e78bcc1SAlexander Graf         default:
285484b7b8e7Sbellard             val = ldq_p(ptr);
28551e78bcc1SAlexander Graf             break;
28561e78bcc1SAlexander Graf         }
285750013115SPeter Maydell         r = MEMTX_OK;
285850013115SPeter Maydell     }
285950013115SPeter Maydell     if (result) {
286050013115SPeter Maydell         *result = r;
286184b7b8e7Sbellard     }
286241063e1eSPaolo Bonzini     rcu_read_unlock();
286384b7b8e7Sbellard     return val;
286484b7b8e7Sbellard }
286584b7b8e7Sbellard 
286650013115SPeter Maydell uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
286750013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
286850013115SPeter Maydell {
286950013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
287050013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
287150013115SPeter Maydell }
287250013115SPeter Maydell 
287350013115SPeter Maydell uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
287450013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
287550013115SPeter Maydell {
287650013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
287750013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
287850013115SPeter Maydell }
287950013115SPeter Maydell 
288050013115SPeter Maydell uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
288150013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
288250013115SPeter Maydell {
288350013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
288450013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
288550013115SPeter Maydell }
288650013115SPeter Maydell 
28872c17449bSEdgar E. Iglesias uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
28881e78bcc1SAlexander Graf {
288950013115SPeter Maydell     return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
28901e78bcc1SAlexander Graf }
28911e78bcc1SAlexander Graf 
28922c17449bSEdgar E. Iglesias uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
28931e78bcc1SAlexander Graf {
289450013115SPeter Maydell     return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
28951e78bcc1SAlexander Graf }
28961e78bcc1SAlexander Graf 
28972c17449bSEdgar E. Iglesias uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
28981e78bcc1SAlexander Graf {
289950013115SPeter Maydell     return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
29001e78bcc1SAlexander Graf }
29011e78bcc1SAlexander Graf 
2902aab33094Sbellard /* XXX: optimize */
290350013115SPeter Maydell uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
290450013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result)
2905aab33094Sbellard {
2906aab33094Sbellard     uint8_t val;
290750013115SPeter Maydell     MemTxResult r;
290850013115SPeter Maydell 
290950013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &val, 1, 0);
291050013115SPeter Maydell     if (result) {
291150013115SPeter Maydell         *result = r;
291250013115SPeter Maydell     }
2913aab33094Sbellard     return val;
2914aab33094Sbellard }
2915aab33094Sbellard 
291650013115SPeter Maydell uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
291750013115SPeter Maydell {
291850013115SPeter Maydell     return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
291950013115SPeter Maydell }
292050013115SPeter Maydell 
2921733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
292250013115SPeter Maydell static inline uint32_t address_space_lduw_internal(AddressSpace *as,
292350013115SPeter Maydell                                                    hwaddr addr,
292450013115SPeter Maydell                                                    MemTxAttrs attrs,
292550013115SPeter Maydell                                                    MemTxResult *result,
29261e78bcc1SAlexander Graf                                                    enum device_endian endian)
2927aab33094Sbellard {
2928733f0b02SMichael S. Tsirkin     uint8_t *ptr;
2929733f0b02SMichael S. Tsirkin     uint64_t val;
29305c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2931149f54b5SPaolo Bonzini     hwaddr l = 2;
2932149f54b5SPaolo Bonzini     hwaddr addr1;
293350013115SPeter Maydell     MemTxResult r;
2934733f0b02SMichael S. Tsirkin 
293541063e1eSPaolo Bonzini     rcu_read_lock();
293641701aa4SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2937149f54b5SPaolo Bonzini                                  false);
29385c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
2939733f0b02SMichael S. Tsirkin         /* I/O case */
294050013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
29411e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
29421e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
29431e78bcc1SAlexander Graf             val = bswap16(val);
29441e78bcc1SAlexander Graf         }
29451e78bcc1SAlexander Graf #else
29461e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
29471e78bcc1SAlexander Graf             val = bswap16(val);
29481e78bcc1SAlexander Graf         }
29491e78bcc1SAlexander Graf #endif
2950733f0b02SMichael S. Tsirkin     } else {
2951733f0b02SMichael S. Tsirkin         /* RAM case */
29525c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
295306ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2954149f54b5SPaolo Bonzini                                + addr1);
29551e78bcc1SAlexander Graf         switch (endian) {
29561e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
29571e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
29581e78bcc1SAlexander Graf             break;
29591e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
29601e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
29611e78bcc1SAlexander Graf             break;
29621e78bcc1SAlexander Graf         default:
2963733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
29641e78bcc1SAlexander Graf             break;
29651e78bcc1SAlexander Graf         }
296650013115SPeter Maydell         r = MEMTX_OK;
296750013115SPeter Maydell     }
296850013115SPeter Maydell     if (result) {
296950013115SPeter Maydell         *result = r;
2970733f0b02SMichael S. Tsirkin     }
297141063e1eSPaolo Bonzini     rcu_read_unlock();
2972733f0b02SMichael S. Tsirkin     return val;
2973aab33094Sbellard }
2974aab33094Sbellard 
297550013115SPeter Maydell uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
297650013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
297750013115SPeter Maydell {
297850013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
297950013115SPeter Maydell                                        DEVICE_NATIVE_ENDIAN);
298050013115SPeter Maydell }
298150013115SPeter Maydell 
298250013115SPeter Maydell uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
298350013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
298450013115SPeter Maydell {
298550013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
298650013115SPeter Maydell                                        DEVICE_LITTLE_ENDIAN);
298750013115SPeter Maydell }
298850013115SPeter Maydell 
298950013115SPeter Maydell uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
299050013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
299150013115SPeter Maydell {
299250013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
299350013115SPeter Maydell                                        DEVICE_BIG_ENDIAN);
299450013115SPeter Maydell }
299550013115SPeter Maydell 
299641701aa4SEdgar E. Iglesias uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
29971e78bcc1SAlexander Graf {
299850013115SPeter Maydell     return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
29991e78bcc1SAlexander Graf }
30001e78bcc1SAlexander Graf 
300141701aa4SEdgar E. Iglesias uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
30021e78bcc1SAlexander Graf {
300350013115SPeter Maydell     return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30041e78bcc1SAlexander Graf }
30051e78bcc1SAlexander Graf 
300641701aa4SEdgar E. Iglesias uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
30071e78bcc1SAlexander Graf {
300850013115SPeter Maydell     return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
30091e78bcc1SAlexander Graf }
30101e78bcc1SAlexander Graf 
30118df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
30128df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
30138df1cd07Sbellard    bits are used to track modified PTEs */
301450013115SPeter Maydell void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
301550013115SPeter Maydell                                 MemTxAttrs attrs, MemTxResult *result)
30168df1cd07Sbellard {
30178df1cd07Sbellard     uint8_t *ptr;
30185c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3019149f54b5SPaolo Bonzini     hwaddr l = 4;
3020149f54b5SPaolo Bonzini     hwaddr addr1;
302150013115SPeter Maydell     MemTxResult r;
30228df1cd07Sbellard 
302341063e1eSPaolo Bonzini     rcu_read_lock();
30242198a121SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3025149f54b5SPaolo Bonzini                                  true);
30265c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
302750013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
30288df1cd07Sbellard     } else {
30295c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
30305579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
30318df1cd07Sbellard         stl_p(ptr, val);
303274576198Saliguori 
303374576198Saliguori         if (unlikely(in_migration)) {
3034a2cd8c85SJuan Quintela             if (cpu_physical_memory_is_clean(addr1)) {
303574576198Saliguori                 /* invalidate code */
303674576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
303774576198Saliguori                 /* set dirty bit */
30386886867eSPaolo Bonzini                 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
303974576198Saliguori             }
304074576198Saliguori         }
304150013115SPeter Maydell         r = MEMTX_OK;
304250013115SPeter Maydell     }
304350013115SPeter Maydell     if (result) {
304450013115SPeter Maydell         *result = r;
30458df1cd07Sbellard     }
304641063e1eSPaolo Bonzini     rcu_read_unlock();
30478df1cd07Sbellard }
30488df1cd07Sbellard 
304950013115SPeter Maydell void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
305050013115SPeter Maydell {
305150013115SPeter Maydell     address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
305250013115SPeter Maydell }
305350013115SPeter Maydell 
30548df1cd07Sbellard /* warning: addr must be aligned */
305550013115SPeter Maydell static inline void address_space_stl_internal(AddressSpace *as,
3056ab1da857SEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
305750013115SPeter Maydell                                               MemTxAttrs attrs,
305850013115SPeter Maydell                                               MemTxResult *result,
30591e78bcc1SAlexander Graf                                               enum device_endian endian)
30608df1cd07Sbellard {
30618df1cd07Sbellard     uint8_t *ptr;
30625c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3063149f54b5SPaolo Bonzini     hwaddr l = 4;
3064149f54b5SPaolo Bonzini     hwaddr addr1;
306550013115SPeter Maydell     MemTxResult r;
30668df1cd07Sbellard 
306741063e1eSPaolo Bonzini     rcu_read_lock();
3068ab1da857SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3069149f54b5SPaolo Bonzini                                  true);
30705c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
30711e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
30721e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
30731e78bcc1SAlexander Graf             val = bswap32(val);
30741e78bcc1SAlexander Graf         }
30751e78bcc1SAlexander Graf #else
30761e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
30771e78bcc1SAlexander Graf             val = bswap32(val);
30781e78bcc1SAlexander Graf         }
30791e78bcc1SAlexander Graf #endif
308050013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
30818df1cd07Sbellard     } else {
30828df1cd07Sbellard         /* RAM case */
30835c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
30845579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
30851e78bcc1SAlexander Graf         switch (endian) {
30861e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
30871e78bcc1SAlexander Graf             stl_le_p(ptr, val);
30881e78bcc1SAlexander Graf             break;
30891e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
30901e78bcc1SAlexander Graf             stl_be_p(ptr, val);
30911e78bcc1SAlexander Graf             break;
30921e78bcc1SAlexander Graf         default:
30938df1cd07Sbellard             stl_p(ptr, val);
30941e78bcc1SAlexander Graf             break;
30951e78bcc1SAlexander Graf         }
309651d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 4);
309750013115SPeter Maydell         r = MEMTX_OK;
30988df1cd07Sbellard     }
309950013115SPeter Maydell     if (result) {
310050013115SPeter Maydell         *result = r;
310150013115SPeter Maydell     }
310241063e1eSPaolo Bonzini     rcu_read_unlock();
310350013115SPeter Maydell }
310450013115SPeter Maydell 
310550013115SPeter Maydell void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
310650013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
310750013115SPeter Maydell {
310850013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
310950013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
311050013115SPeter Maydell }
311150013115SPeter Maydell 
311250013115SPeter Maydell void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
311350013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
311450013115SPeter Maydell {
311550013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
311650013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
311750013115SPeter Maydell }
311850013115SPeter Maydell 
311950013115SPeter Maydell void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
312050013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
312150013115SPeter Maydell {
312250013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
312350013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
31243a7d929eSbellard }
31258df1cd07Sbellard 
3126ab1da857SEdgar E. Iglesias void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
31271e78bcc1SAlexander Graf {
312850013115SPeter Maydell     address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
31291e78bcc1SAlexander Graf }
31301e78bcc1SAlexander Graf 
3131ab1da857SEdgar E. Iglesias void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
31321e78bcc1SAlexander Graf {
313350013115SPeter Maydell     address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
31341e78bcc1SAlexander Graf }
31351e78bcc1SAlexander Graf 
3136ab1da857SEdgar E. Iglesias void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
31371e78bcc1SAlexander Graf {
313850013115SPeter Maydell     address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
31391e78bcc1SAlexander Graf }
31401e78bcc1SAlexander Graf 
3141aab33094Sbellard /* XXX: optimize */
314250013115SPeter Maydell void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
314350013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
3144aab33094Sbellard {
3145aab33094Sbellard     uint8_t v = val;
314650013115SPeter Maydell     MemTxResult r;
314750013115SPeter Maydell 
314850013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &v, 1, 1);
314950013115SPeter Maydell     if (result) {
315050013115SPeter Maydell         *result = r;
315150013115SPeter Maydell     }
315250013115SPeter Maydell }
315350013115SPeter Maydell 
315450013115SPeter Maydell void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
315550013115SPeter Maydell {
315650013115SPeter Maydell     address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3157aab33094Sbellard }
3158aab33094Sbellard 
3159733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
316050013115SPeter Maydell static inline void address_space_stw_internal(AddressSpace *as,
31615ce5944dSEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
316250013115SPeter Maydell                                               MemTxAttrs attrs,
316350013115SPeter Maydell                                               MemTxResult *result,
31641e78bcc1SAlexander Graf                                               enum device_endian endian)
3165aab33094Sbellard {
3166733f0b02SMichael S. Tsirkin     uint8_t *ptr;
31675c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3168149f54b5SPaolo Bonzini     hwaddr l = 2;
3169149f54b5SPaolo Bonzini     hwaddr addr1;
317050013115SPeter Maydell     MemTxResult r;
3171733f0b02SMichael S. Tsirkin 
317241063e1eSPaolo Bonzini     rcu_read_lock();
31735ce5944dSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, true);
31745c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
31751e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
31761e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
31771e78bcc1SAlexander Graf             val = bswap16(val);
31781e78bcc1SAlexander Graf         }
31791e78bcc1SAlexander Graf #else
31801e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
31811e78bcc1SAlexander Graf             val = bswap16(val);
31821e78bcc1SAlexander Graf         }
31831e78bcc1SAlexander Graf #endif
318450013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3185733f0b02SMichael S. Tsirkin     } else {
3186733f0b02SMichael S. Tsirkin         /* RAM case */
31875c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3188733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
31891e78bcc1SAlexander Graf         switch (endian) {
31901e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
31911e78bcc1SAlexander Graf             stw_le_p(ptr, val);
31921e78bcc1SAlexander Graf             break;
31931e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
31941e78bcc1SAlexander Graf             stw_be_p(ptr, val);
31951e78bcc1SAlexander Graf             break;
31961e78bcc1SAlexander Graf         default:
3197733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
31981e78bcc1SAlexander Graf             break;
31991e78bcc1SAlexander Graf         }
320051d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 2);
320150013115SPeter Maydell         r = MEMTX_OK;
3202733f0b02SMichael S. Tsirkin     }
320350013115SPeter Maydell     if (result) {
320450013115SPeter Maydell         *result = r;
320550013115SPeter Maydell     }
320641063e1eSPaolo Bonzini     rcu_read_unlock();
320750013115SPeter Maydell }
320850013115SPeter Maydell 
320950013115SPeter Maydell void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
321050013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
321150013115SPeter Maydell {
321250013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
321350013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
321450013115SPeter Maydell }
321550013115SPeter Maydell 
321650013115SPeter Maydell void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
321750013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
321850013115SPeter Maydell {
321950013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
322050013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
322150013115SPeter Maydell }
322250013115SPeter Maydell 
322350013115SPeter Maydell void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
322450013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
322550013115SPeter Maydell {
322650013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
322750013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
3228aab33094Sbellard }
3229aab33094Sbellard 
32305ce5944dSEdgar E. Iglesias void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
32311e78bcc1SAlexander Graf {
323250013115SPeter Maydell     address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
32331e78bcc1SAlexander Graf }
32341e78bcc1SAlexander Graf 
32355ce5944dSEdgar E. Iglesias void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
32361e78bcc1SAlexander Graf {
323750013115SPeter Maydell     address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
32381e78bcc1SAlexander Graf }
32391e78bcc1SAlexander Graf 
32405ce5944dSEdgar E. Iglesias void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
32411e78bcc1SAlexander Graf {
324250013115SPeter Maydell     address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
32431e78bcc1SAlexander Graf }
32441e78bcc1SAlexander Graf 
3245aab33094Sbellard /* XXX: optimize */
324650013115SPeter Maydell void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
324750013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
324850013115SPeter Maydell {
324950013115SPeter Maydell     MemTxResult r;
325050013115SPeter Maydell     val = tswap64(val);
325150013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
325250013115SPeter Maydell     if (result) {
325350013115SPeter Maydell         *result = r;
325450013115SPeter Maydell     }
325550013115SPeter Maydell }
325650013115SPeter Maydell 
325750013115SPeter Maydell void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
325850013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
325950013115SPeter Maydell {
326050013115SPeter Maydell     MemTxResult r;
326150013115SPeter Maydell     val = cpu_to_le64(val);
326250013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
326350013115SPeter Maydell     if (result) {
326450013115SPeter Maydell         *result = r;
326550013115SPeter Maydell     }
326650013115SPeter Maydell }
326750013115SPeter Maydell void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
326850013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
326950013115SPeter Maydell {
327050013115SPeter Maydell     MemTxResult r;
327150013115SPeter Maydell     val = cpu_to_be64(val);
327250013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
327350013115SPeter Maydell     if (result) {
327450013115SPeter Maydell         *result = r;
327550013115SPeter Maydell     }
327650013115SPeter Maydell }
327750013115SPeter Maydell 
3278f606604fSEdgar E. Iglesias void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3279aab33094Sbellard {
328050013115SPeter Maydell     address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3281aab33094Sbellard }
3282aab33094Sbellard 
3283f606604fSEdgar E. Iglesias void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
32841e78bcc1SAlexander Graf {
328550013115SPeter Maydell     address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
32861e78bcc1SAlexander Graf }
32871e78bcc1SAlexander Graf 
3288f606604fSEdgar E. Iglesias void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
32891e78bcc1SAlexander Graf {
329050013115SPeter Maydell     address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
32911e78bcc1SAlexander Graf }
32921e78bcc1SAlexander Graf 
32935e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
3294f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3295b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
329613eb76e0Sbellard {
329713eb76e0Sbellard     int l;
3298a8170e5eSAvi Kivity     hwaddr phys_addr;
32999b3c35e0Sj_mayer     target_ulong page;
330013eb76e0Sbellard 
330113eb76e0Sbellard     while (len > 0) {
330213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
3303f17ec444SAndreas Färber         phys_addr = cpu_get_phys_page_debug(cpu, page);
330413eb76e0Sbellard         /* if no physical page mapped, return an error */
330513eb76e0Sbellard         if (phys_addr == -1)
330613eb76e0Sbellard             return -1;
330713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
330813eb76e0Sbellard         if (l > len)
330913eb76e0Sbellard             l = len;
33105e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
33112e38847bSEdgar E. Iglesias         if (is_write) {
33122e38847bSEdgar E. Iglesias             cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
33132e38847bSEdgar E. Iglesias         } else {
33145c9eb028SPeter Maydell             address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
33155c9eb028SPeter Maydell                              buf, l, 0);
33162e38847bSEdgar E. Iglesias         }
331713eb76e0Sbellard         len -= l;
331813eb76e0Sbellard         buf += l;
331913eb76e0Sbellard         addr += l;
332013eb76e0Sbellard     }
332113eb76e0Sbellard     return 0;
332213eb76e0Sbellard }
3323a68fe89cSPaul Brook #endif
332413eb76e0Sbellard 
33258e4a424bSBlue Swirl /*
33268e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
33278e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
33288e4a424bSBlue Swirl  */
332998ed8ecfSGreg Kurz bool target_words_bigendian(void);
333098ed8ecfSGreg Kurz bool target_words_bigendian(void)
33318e4a424bSBlue Swirl {
33328e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
33338e4a424bSBlue Swirl     return true;
33348e4a424bSBlue Swirl #else
33358e4a424bSBlue Swirl     return false;
33368e4a424bSBlue Swirl #endif
33378e4a424bSBlue Swirl }
33388e4a424bSBlue Swirl 
333976f35538SWen Congyang #ifndef CONFIG_USER_ONLY
3340a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
334176f35538SWen Congyang {
33425c8a00ceSPaolo Bonzini     MemoryRegion*mr;
3343149f54b5SPaolo Bonzini     hwaddr l = 1;
334441063e1eSPaolo Bonzini     bool res;
334576f35538SWen Congyang 
334641063e1eSPaolo Bonzini     rcu_read_lock();
33475c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
3348149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
334976f35538SWen Congyang 
335041063e1eSPaolo Bonzini     res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
335141063e1eSPaolo Bonzini     rcu_read_unlock();
335241063e1eSPaolo Bonzini     return res;
335376f35538SWen Congyang }
3354bd2fa51fSMichael R. Hines 
3355bd2fa51fSMichael R. Hines void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3356bd2fa51fSMichael R. Hines {
3357bd2fa51fSMichael R. Hines     RAMBlock *block;
3358bd2fa51fSMichael R. Hines 
33590dc3f44aSMike Day     rcu_read_lock();
33600dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
33619b8424d5SMichael S. Tsirkin         func(block->host, block->offset, block->used_length, opaque);
3362bd2fa51fSMichael R. Hines     }
33630dc3f44aSMike Day     rcu_read_unlock();
3364bd2fa51fSMichael R. Hines }
3365ec3f8c99SPeter Maydell #endif
3366