xref: /qemu/system/physmem.c (revision 6886b98036a8f8f5bce8b10756ce080084cef11b)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
197b31bbc2SPeter Maydell #include "qemu/osdep.h"
20da34e65cSMarkus Armbruster #include "qapi/error.h"
21777872e5SStefan Weil #ifndef _WIN32
22d5a8f07cSbellard #include <sys/mman.h>
23d5a8f07cSbellard #endif
2454936004Sbellard 
25f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
266180a181Sbellard #include "cpu.h"
2763c91552SPaolo Bonzini #include "exec/exec-all.h"
28b67d9a52Sbellard #include "tcg.h"
29741da0d3SPaolo Bonzini #include "hw/qdev-core.h"
304485bd26SMichael S. Tsirkin #if !defined(CONFIG_USER_ONLY)
3147c8ca53SMarcel Apfelbaum #include "hw/boards.h"
3233c11879SPaolo Bonzini #include "hw/xen/xen.h"
334485bd26SMichael S. Tsirkin #endif
349c17d615SPaolo Bonzini #include "sysemu/kvm.h"
352ff3de68SMarkus Armbruster #include "sysemu/sysemu.h"
361de7afc9SPaolo Bonzini #include "qemu/timer.h"
371de7afc9SPaolo Bonzini #include "qemu/config-file.h"
3875a34036SAndreas Färber #include "qemu/error-report.h"
3953a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4053a5960aSpbrook #include <qemu.h>
41432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
42741da0d3SPaolo Bonzini #include "hw/hw.h"
43741da0d3SPaolo Bonzini #include "exec/memory.h"
44df43d49cSPaolo Bonzini #include "exec/ioport.h"
45741da0d3SPaolo Bonzini #include "sysemu/dma.h"
46741da0d3SPaolo Bonzini #include "exec/address-spaces.h"
479c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
486506e4f9SStefano Stabellini #include "trace.h"
4953a5960aSpbrook #endif
500d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
510dc3f44aSMike Day #include "qemu/rcu_queue.h"
524840f10eSJan Kiszka #include "qemu/main-loop.h"
535b6dd868SBlue Swirl #include "translate-all.h"
547615936eSPavel Dovgalyuk #include "sysemu/replay.h"
550cac1b66SBlue Swirl 
56022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
57220c3ebdSJuan Quintela #include "exec/ram_addr.h"
58508127e2SPaolo Bonzini #include "exec/log.h"
5967d95c15SAvi Kivity 
609dfeca7cSBharata B Rao #include "migration/vmstate.h"
619dfeca7cSBharata B Rao 
62b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
63794e8f30SMichael S. Tsirkin #ifndef _WIN32
64794e8f30SMichael S. Tsirkin #include "qemu/mmap-alloc.h"
65794e8f30SMichael S. Tsirkin #endif
66b35ba30fSMichael S. Tsirkin 
67db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
681196be37Sths 
6999773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
700dc3f44aSMike Day /* ram_list is read under rcu_read_lock()/rcu_read_unlock().  Writes
710dc3f44aSMike Day  * are protected by the ramlist lock.
720dc3f44aSMike Day  */
730d53d9feSMike Day RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
7462152b8aSAvi Kivity 
7562152b8aSAvi Kivity static MemoryRegion *system_memory;
76309cb471SAvi Kivity static MemoryRegion *system_io;
7762152b8aSAvi Kivity 
78f6790af6SAvi Kivity AddressSpace address_space_io;
79f6790af6SAvi Kivity AddressSpace address_space_memory;
802673a5daSAvi Kivity 
810844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
82acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
830e0df1e2SAvi Kivity 
847bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
857bd4f430SPaolo Bonzini #define RAM_PREALLOC   (1 << 0)
867bd4f430SPaolo Bonzini 
87dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */
88dbcb8981SPaolo Bonzini #define RAM_SHARED     (1 << 1)
89dbcb8981SPaolo Bonzini 
9062be4e3aSMichael S. Tsirkin /* Only a portion of RAM (used_length) is actually used, and migrated.
9162be4e3aSMichael S. Tsirkin  * This used_length size can change across reboots.
9262be4e3aSMichael S. Tsirkin  */
9362be4e3aSMichael S. Tsirkin #define RAM_RESIZEABLE (1 << 2)
9462be4e3aSMichael S. Tsirkin 
95e2eef170Spbrook #endif
969fa3e853Sbellard 
97bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
986a00d601Sbellard /* current CPU in the current thread. It is only valid inside
996a00d601Sbellard    cpu_exec() */
100f240eb6fSPaolo Bonzini __thread CPUState *current_cpu;
1012e70f6efSpbrook /* 0 = Do not count executed instructions.
102bf20dc07Sths    1 = Precise instruction counting.
1032e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1045708fc66SPaolo Bonzini int use_icount;
1056a00d601Sbellard 
106e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1074346ae3eSAvi Kivity 
1081db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
1091db8abb1SPaolo Bonzini 
1101db8abb1SPaolo Bonzini struct PhysPageEntry {
1119736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
1128b795765SMichael S. Tsirkin     uint32_t skip : 6;
1139736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
1148b795765SMichael S. Tsirkin     uint32_t ptr : 26;
1151db8abb1SPaolo Bonzini };
1161db8abb1SPaolo Bonzini 
1178b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
1188b795765SMichael S. Tsirkin 
11903f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
12057271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
12103f49957SPaolo Bonzini 
122026736ceSMichael S. Tsirkin #define P_L2_BITS 9
12303f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
12403f49957SPaolo Bonzini 
12503f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
12603f49957SPaolo Bonzini 
12703f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1280475d94fSPaolo Bonzini 
12953cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
13079e2b9aeSPaolo Bonzini     struct rcu_head rcu;
13179e2b9aeSPaolo Bonzini 
13253cb28cbSMarcel Apfelbaum     unsigned sections_nb;
13353cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
13453cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
13553cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
13653cb28cbSMarcel Apfelbaum     Node *nodes;
13753cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
13853cb28cbSMarcel Apfelbaum } PhysPageMap;
13953cb28cbSMarcel Apfelbaum 
1401db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
14179e2b9aeSPaolo Bonzini     struct rcu_head rcu;
14279e2b9aeSPaolo Bonzini 
143729633c2SFam Zheng     MemoryRegionSection *mru_section;
1441db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1451db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1461db8abb1SPaolo Bonzini      */
1471db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
14853cb28cbSMarcel Apfelbaum     PhysPageMap map;
149acc9d80bSJan Kiszka     AddressSpace *as;
1501db8abb1SPaolo Bonzini };
1511db8abb1SPaolo Bonzini 
15290260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
15390260c6cSJan Kiszka typedef struct subpage_t {
15490260c6cSJan Kiszka     MemoryRegion iomem;
155acc9d80bSJan Kiszka     AddressSpace *as;
15690260c6cSJan Kiszka     hwaddr base;
15790260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
15890260c6cSJan Kiszka } subpage_t;
15990260c6cSJan Kiszka 
160b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
161b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1
162b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2
163b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3
1645312bd8bSAvi Kivity 
165e2eef170Spbrook static void io_mem_init(void);
16662152b8aSAvi Kivity static void memory_map_init(void);
16709daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener);
168e2eef170Spbrook 
1691ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
17032857f4dSPeter Maydell 
17132857f4dSPeter Maydell /**
17232857f4dSPeter Maydell  * CPUAddressSpace: all the information a CPU needs about an AddressSpace
17332857f4dSPeter Maydell  * @cpu: the CPU whose AddressSpace this is
17432857f4dSPeter Maydell  * @as: the AddressSpace itself
17532857f4dSPeter Maydell  * @memory_dispatch: its dispatch pointer (cached, RCU protected)
17632857f4dSPeter Maydell  * @tcg_as_listener: listener for tracking changes to the AddressSpace
17732857f4dSPeter Maydell  */
17832857f4dSPeter Maydell struct CPUAddressSpace {
17932857f4dSPeter Maydell     CPUState *cpu;
18032857f4dSPeter Maydell     AddressSpace *as;
18132857f4dSPeter Maydell     struct AddressSpaceDispatch *memory_dispatch;
18232857f4dSPeter Maydell     MemoryListener tcg_as_listener;
18332857f4dSPeter Maydell };
18432857f4dSPeter Maydell 
1856658ffb8Spbrook #endif
18654936004Sbellard 
1876d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
188d6f2ea22SAvi Kivity 
18953cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
190f7bf5461SAvi Kivity {
19153cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
19253cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
19353cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
19453cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
195f7bf5461SAvi Kivity     }
196f7bf5461SAvi Kivity }
197f7bf5461SAvi Kivity 
198db94604bSPaolo Bonzini static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
199d6f2ea22SAvi Kivity {
200d6f2ea22SAvi Kivity     unsigned i;
2018b795765SMichael S. Tsirkin     uint32_t ret;
202db94604bSPaolo Bonzini     PhysPageEntry e;
203db94604bSPaolo Bonzini     PhysPageEntry *p;
204d6f2ea22SAvi Kivity 
20553cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
206db94604bSPaolo Bonzini     p = map->nodes[ret];
207d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
20853cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
209db94604bSPaolo Bonzini 
210db94604bSPaolo Bonzini     e.skip = leaf ? 0 : 1;
211db94604bSPaolo Bonzini     e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
21203f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
213db94604bSPaolo Bonzini         memcpy(&p[i], &e, sizeof(e));
214d6f2ea22SAvi Kivity     }
215f7bf5461SAvi Kivity     return ret;
216d6f2ea22SAvi Kivity }
217d6f2ea22SAvi Kivity 
21853cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
21953cb28cbSMarcel Apfelbaum                                 hwaddr *index, hwaddr *nb, uint16_t leaf,
2202999097bSAvi Kivity                                 int level)
22192e873b9Sbellard {
222f7bf5461SAvi Kivity     PhysPageEntry *p;
22303f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
2245cd2c5b6SRichard Henderson 
2259736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
226db94604bSPaolo Bonzini         lp->ptr = phys_map_node_alloc(map, level == 0);
227db94604bSPaolo Bonzini     }
22853cb28cbSMarcel Apfelbaum     p = map->nodes[lp->ptr];
22903f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
230f7bf5461SAvi Kivity 
23103f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
23207f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2339736e55bSMichael S. Tsirkin             lp->skip = 0;
234c19e8800SAvi Kivity             lp->ptr = leaf;
23507f07b31SAvi Kivity             *index += step;
23607f07b31SAvi Kivity             *nb -= step;
237f7bf5461SAvi Kivity         } else {
23853cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2392999097bSAvi Kivity         }
2402999097bSAvi Kivity         ++lp;
241f7bf5461SAvi Kivity     }
2424346ae3eSAvi Kivity }
2435cd2c5b6SRichard Henderson 
244ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
245a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2462999097bSAvi Kivity                           uint16_t leaf)
247f7bf5461SAvi Kivity {
2482999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
24953cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
250f7bf5461SAvi Kivity 
25153cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
25292e873b9Sbellard }
25392e873b9Sbellard 
254b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
255b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
256b35ba30fSMichael S. Tsirkin  */
257b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
258b35ba30fSMichael S. Tsirkin {
259b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
260b35ba30fSMichael S. Tsirkin     int valid = 0;
261b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
262b35ba30fSMichael S. Tsirkin     int i;
263b35ba30fSMichael S. Tsirkin 
264b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
265b35ba30fSMichael S. Tsirkin         return;
266b35ba30fSMichael S. Tsirkin     }
267b35ba30fSMichael S. Tsirkin 
268b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
269b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
270b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
271b35ba30fSMichael S. Tsirkin             continue;
272b35ba30fSMichael S. Tsirkin         }
273b35ba30fSMichael S. Tsirkin 
274b35ba30fSMichael S. Tsirkin         valid_ptr = i;
275b35ba30fSMichael S. Tsirkin         valid++;
276b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
277b35ba30fSMichael S. Tsirkin             phys_page_compact(&p[i], nodes, compacted);
278b35ba30fSMichael S. Tsirkin         }
279b35ba30fSMichael S. Tsirkin     }
280b35ba30fSMichael S. Tsirkin 
281b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
282b35ba30fSMichael S. Tsirkin     if (valid != 1) {
283b35ba30fSMichael S. Tsirkin         return;
284b35ba30fSMichael S. Tsirkin     }
285b35ba30fSMichael S. Tsirkin 
286b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
287b35ba30fSMichael S. Tsirkin 
288b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
289b35ba30fSMichael S. Tsirkin     if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
290b35ba30fSMichael S. Tsirkin         return;
291b35ba30fSMichael S. Tsirkin     }
292b35ba30fSMichael S. Tsirkin 
293b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
294b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
295b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
296b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
297b35ba30fSMichael S. Tsirkin          * should never reach here.
298b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
299b35ba30fSMichael S. Tsirkin          * change this rule.
300b35ba30fSMichael S. Tsirkin          */
301b35ba30fSMichael S. Tsirkin         lp->skip = 0;
302b35ba30fSMichael S. Tsirkin     } else {
303b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
304b35ba30fSMichael S. Tsirkin     }
305b35ba30fSMichael S. Tsirkin }
306b35ba30fSMichael S. Tsirkin 
307b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
308b35ba30fSMichael S. Tsirkin {
309b35ba30fSMichael S. Tsirkin     DECLARE_BITMAP(compacted, nodes_nb);
310b35ba30fSMichael S. Tsirkin 
311b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
31253cb28cbSMarcel Apfelbaum         phys_page_compact(&d->phys_map, d->map.nodes, compacted);
313b35ba30fSMichael S. Tsirkin     }
314b35ba30fSMichael S. Tsirkin }
315b35ba30fSMichael S. Tsirkin 
31629cb533dSFam Zheng static inline bool section_covers_addr(const MemoryRegionSection *section,
31729cb533dSFam Zheng                                        hwaddr addr)
31829cb533dSFam Zheng {
31929cb533dSFam Zheng     /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
32029cb533dSFam Zheng      * the section must cover the entire address space.
32129cb533dSFam Zheng      */
32229cb533dSFam Zheng     return section->size.hi ||
32329cb533dSFam Zheng            range_covers_byte(section->offset_within_address_space,
32429cb533dSFam Zheng                              section->size.lo, addr);
32529cb533dSFam Zheng }
32629cb533dSFam Zheng 
32797115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
3289affd6fcSPaolo Bonzini                                            Node *nodes, MemoryRegionSection *sections)
32992e873b9Sbellard {
33031ab2b4aSAvi Kivity     PhysPageEntry *p;
33197115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
33231ab2b4aSAvi Kivity     int i;
333f1f6e3b8SAvi Kivity 
3349736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
335c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
3369affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
337f1f6e3b8SAvi Kivity         }
3389affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
33903f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
34031ab2b4aSAvi Kivity     }
341b35ba30fSMichael S. Tsirkin 
34229cb533dSFam Zheng     if (section_covers_addr(&sections[lp.ptr], addr)) {
3439affd6fcSPaolo Bonzini         return &sections[lp.ptr];
344b35ba30fSMichael S. Tsirkin     } else {
345b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
346b35ba30fSMichael S. Tsirkin     }
347f3705d53SAvi Kivity }
348f3705d53SAvi Kivity 
349e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
350e5548617SBlue Swirl {
3512a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
352e5548617SBlue Swirl         && mr != &io_mem_watch;
353e5548617SBlue Swirl }
354149f54b5SPaolo Bonzini 
35579e2b9aeSPaolo Bonzini /* Called from RCU critical section */
356c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
35790260c6cSJan Kiszka                                                         hwaddr addr,
35890260c6cSJan Kiszka                                                         bool resolve_subpage)
3599f029603SJan Kiszka {
360729633c2SFam Zheng     MemoryRegionSection *section = atomic_read(&d->mru_section);
36190260c6cSJan Kiszka     subpage_t *subpage;
362729633c2SFam Zheng     bool update;
36390260c6cSJan Kiszka 
364729633c2SFam Zheng     if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
365729633c2SFam Zheng         section_covers_addr(section, addr)) {
366729633c2SFam Zheng         update = false;
367729633c2SFam Zheng     } else {
368729633c2SFam Zheng         section = phys_page_find(d->phys_map, addr, d->map.nodes,
369729633c2SFam Zheng                                  d->map.sections);
370729633c2SFam Zheng         update = true;
371729633c2SFam Zheng     }
37290260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
37390260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
37453cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
37590260c6cSJan Kiszka     }
376729633c2SFam Zheng     if (update) {
377729633c2SFam Zheng         atomic_set(&d->mru_section, section);
378729633c2SFam Zheng     }
37990260c6cSJan Kiszka     return section;
3809f029603SJan Kiszka }
3819f029603SJan Kiszka 
38279e2b9aeSPaolo Bonzini /* Called from RCU critical section */
38390260c6cSJan Kiszka static MemoryRegionSection *
384c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
38590260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
386149f54b5SPaolo Bonzini {
387149f54b5SPaolo Bonzini     MemoryRegionSection *section;
388965eb2fcSPaolo Bonzini     MemoryRegion *mr;
389a87f3954SPaolo Bonzini     Int128 diff;
390149f54b5SPaolo Bonzini 
391c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
392149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
393149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
394149f54b5SPaolo Bonzini 
395149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
396149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
397149f54b5SPaolo Bonzini 
398965eb2fcSPaolo Bonzini     mr = section->mr;
399b242e0e0SPaolo Bonzini 
400b242e0e0SPaolo Bonzini     /* MMIO registers can be expected to perform full-width accesses based only
401b242e0e0SPaolo Bonzini      * on their address, without considering adjacent registers that could
402b242e0e0SPaolo Bonzini      * decode to completely different MemoryRegions.  When such registers
403b242e0e0SPaolo Bonzini      * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
404b242e0e0SPaolo Bonzini      * regions overlap wildly.  For this reason we cannot clamp the accesses
405b242e0e0SPaolo Bonzini      * here.
406b242e0e0SPaolo Bonzini      *
407b242e0e0SPaolo Bonzini      * If the length is small (as is the case for address_space_ldl/stl),
408b242e0e0SPaolo Bonzini      * everything works fine.  If the incoming length is large, however,
409b242e0e0SPaolo Bonzini      * the caller really has to do the clamping through memory_access_size.
410b242e0e0SPaolo Bonzini      */
411965eb2fcSPaolo Bonzini     if (memory_region_is_ram(mr)) {
412e4a511f8SPaolo Bonzini         diff = int128_sub(section->size, int128_make64(addr));
4133752a036SPeter Maydell         *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
414965eb2fcSPaolo Bonzini     }
415149f54b5SPaolo Bonzini     return section;
416149f54b5SPaolo Bonzini }
41790260c6cSJan Kiszka 
41841063e1eSPaolo Bonzini /* Called from RCU critical section */
4195c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
42090260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
42190260c6cSJan Kiszka                                       bool is_write)
42290260c6cSJan Kiszka {
42330951157SAvi Kivity     IOMMUTLBEntry iotlb;
42430951157SAvi Kivity     MemoryRegionSection *section;
42530951157SAvi Kivity     MemoryRegion *mr;
42630951157SAvi Kivity 
42730951157SAvi Kivity     for (;;) {
42879e2b9aeSPaolo Bonzini         AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
42979e2b9aeSPaolo Bonzini         section = address_space_translate_internal(d, addr, &addr, plen, true);
43030951157SAvi Kivity         mr = section->mr;
43130951157SAvi Kivity 
43230951157SAvi Kivity         if (!mr->iommu_ops) {
43330951157SAvi Kivity             break;
43430951157SAvi Kivity         }
43530951157SAvi Kivity 
4368d7b8cb9SLe Tan         iotlb = mr->iommu_ops->translate(mr, addr, is_write);
43730951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
43830951157SAvi Kivity                 | (addr & iotlb.addr_mask));
43923820dbfSPeter Crosthwaite         *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
44030951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
44130951157SAvi Kivity             mr = &io_mem_unassigned;
44230951157SAvi Kivity             break;
44330951157SAvi Kivity         }
44430951157SAvi Kivity 
44530951157SAvi Kivity         as = iotlb.target_as;
44630951157SAvi Kivity     }
44730951157SAvi Kivity 
448fe680d0dSAlexey Kardashevskiy     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
449a87f3954SPaolo Bonzini         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
45023820dbfSPeter Crosthwaite         *plen = MIN(page, *plen);
451a87f3954SPaolo Bonzini     }
452a87f3954SPaolo Bonzini 
45330951157SAvi Kivity     *xlat = addr;
45430951157SAvi Kivity     return mr;
45590260c6cSJan Kiszka }
45690260c6cSJan Kiszka 
45779e2b9aeSPaolo Bonzini /* Called from RCU critical section */
45890260c6cSJan Kiszka MemoryRegionSection *
459d7898cdaSPeter Maydell address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
4609d82b5a7SPaolo Bonzini                                   hwaddr *xlat, hwaddr *plen)
46190260c6cSJan Kiszka {
46230951157SAvi Kivity     MemoryRegionSection *section;
463d7898cdaSPeter Maydell     AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
464d7898cdaSPeter Maydell 
465d7898cdaSPeter Maydell     section = address_space_translate_internal(d, addr, xlat, plen, false);
46630951157SAvi Kivity 
46730951157SAvi Kivity     assert(!section->mr->iommu_ops);
46830951157SAvi Kivity     return section;
46990260c6cSJan Kiszka }
4709fa3e853Sbellard #endif
471fd6ce8f6Sbellard 
472b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
4739656f324Spbrook 
474e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
475e7f4eff7SJuan Quintela {
476259186a7SAndreas Färber     CPUState *cpu = opaque;
477e7f4eff7SJuan Quintela 
4783098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
4793098dba0Saurel32        version_id is increased. */
480259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
481c01a71c1SChristian Borntraeger     tlb_flush(cpu, 1);
4829656f324Spbrook 
4839656f324Spbrook     return 0;
4849656f324Spbrook }
485e7f4eff7SJuan Quintela 
4866c3bff0eSPavel Dovgaluk static int cpu_common_pre_load(void *opaque)
4876c3bff0eSPavel Dovgaluk {
4886c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4896c3bff0eSPavel Dovgaluk 
490adee6424SPaolo Bonzini     cpu->exception_index = -1;
4916c3bff0eSPavel Dovgaluk 
4926c3bff0eSPavel Dovgaluk     return 0;
4936c3bff0eSPavel Dovgaluk }
4946c3bff0eSPavel Dovgaluk 
4956c3bff0eSPavel Dovgaluk static bool cpu_common_exception_index_needed(void *opaque)
4966c3bff0eSPavel Dovgaluk {
4976c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4986c3bff0eSPavel Dovgaluk 
499adee6424SPaolo Bonzini     return tcg_enabled() && cpu->exception_index != -1;
5006c3bff0eSPavel Dovgaluk }
5016c3bff0eSPavel Dovgaluk 
5026c3bff0eSPavel Dovgaluk static const VMStateDescription vmstate_cpu_common_exception_index = {
5036c3bff0eSPavel Dovgaluk     .name = "cpu_common/exception_index",
5046c3bff0eSPavel Dovgaluk     .version_id = 1,
5056c3bff0eSPavel Dovgaluk     .minimum_version_id = 1,
5065cd8cadaSJuan Quintela     .needed = cpu_common_exception_index_needed,
5076c3bff0eSPavel Dovgaluk     .fields = (VMStateField[]) {
5086c3bff0eSPavel Dovgaluk         VMSTATE_INT32(exception_index, CPUState),
5096c3bff0eSPavel Dovgaluk         VMSTATE_END_OF_LIST()
5106c3bff0eSPavel Dovgaluk     }
5116c3bff0eSPavel Dovgaluk };
5126c3bff0eSPavel Dovgaluk 
513bac05aa9SAndrey Smetanin static bool cpu_common_crash_occurred_needed(void *opaque)
514bac05aa9SAndrey Smetanin {
515bac05aa9SAndrey Smetanin     CPUState *cpu = opaque;
516bac05aa9SAndrey Smetanin 
517bac05aa9SAndrey Smetanin     return cpu->crash_occurred;
518bac05aa9SAndrey Smetanin }
519bac05aa9SAndrey Smetanin 
520bac05aa9SAndrey Smetanin static const VMStateDescription vmstate_cpu_common_crash_occurred = {
521bac05aa9SAndrey Smetanin     .name = "cpu_common/crash_occurred",
522bac05aa9SAndrey Smetanin     .version_id = 1,
523bac05aa9SAndrey Smetanin     .minimum_version_id = 1,
524bac05aa9SAndrey Smetanin     .needed = cpu_common_crash_occurred_needed,
525bac05aa9SAndrey Smetanin     .fields = (VMStateField[]) {
526bac05aa9SAndrey Smetanin         VMSTATE_BOOL(crash_occurred, CPUState),
527bac05aa9SAndrey Smetanin         VMSTATE_END_OF_LIST()
528bac05aa9SAndrey Smetanin     }
529bac05aa9SAndrey Smetanin };
530bac05aa9SAndrey Smetanin 
5311a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
532e7f4eff7SJuan Quintela     .name = "cpu_common",
533e7f4eff7SJuan Quintela     .version_id = 1,
534e7f4eff7SJuan Quintela     .minimum_version_id = 1,
5356c3bff0eSPavel Dovgaluk     .pre_load = cpu_common_pre_load,
536e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
537e7f4eff7SJuan Quintela     .fields = (VMStateField[]) {
538259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
539259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
540e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
5416c3bff0eSPavel Dovgaluk     },
5425cd8cadaSJuan Quintela     .subsections = (const VMStateDescription*[]) {
5435cd8cadaSJuan Quintela         &vmstate_cpu_common_exception_index,
544bac05aa9SAndrey Smetanin         &vmstate_cpu_common_crash_occurred,
5455cd8cadaSJuan Quintela         NULL
546e7f4eff7SJuan Quintela     }
547e7f4eff7SJuan Quintela };
5481a1562f5SAndreas Färber 
5499656f324Spbrook #endif
5509656f324Spbrook 
55138d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
552950f1472SGlauber Costa {
553bdc44640SAndreas Färber     CPUState *cpu;
554950f1472SGlauber Costa 
555bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
55655e5c285SAndreas Färber         if (cpu->cpu_index == index) {
557bdc44640SAndreas Färber             return cpu;
55855e5c285SAndreas Färber         }
559950f1472SGlauber Costa     }
560950f1472SGlauber Costa 
561bdc44640SAndreas Färber     return NULL;
562950f1472SGlauber Costa }
563950f1472SGlauber Costa 
56409daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY)
56556943e8cSPeter Maydell void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
56609daed84SEdgar E. Iglesias {
56712ebc9a7SPeter Maydell     CPUAddressSpace *newas;
56812ebc9a7SPeter Maydell 
56912ebc9a7SPeter Maydell     /* Target code should have set num_ases before calling us */
57012ebc9a7SPeter Maydell     assert(asidx < cpu->num_ases);
57112ebc9a7SPeter Maydell 
57256943e8cSPeter Maydell     if (asidx == 0) {
57356943e8cSPeter Maydell         /* address space 0 gets the convenience alias */
57456943e8cSPeter Maydell         cpu->as = as;
57556943e8cSPeter Maydell     }
57656943e8cSPeter Maydell 
57712ebc9a7SPeter Maydell     /* KVM cannot currently support multiple address spaces. */
57812ebc9a7SPeter Maydell     assert(asidx == 0 || !kvm_enabled());
57909daed84SEdgar E. Iglesias 
58012ebc9a7SPeter Maydell     if (!cpu->cpu_ases) {
58112ebc9a7SPeter Maydell         cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
58209daed84SEdgar E. Iglesias     }
58332857f4dSPeter Maydell 
58412ebc9a7SPeter Maydell     newas = &cpu->cpu_ases[asidx];
58512ebc9a7SPeter Maydell     newas->cpu = cpu;
58612ebc9a7SPeter Maydell     newas->as = as;
58756943e8cSPeter Maydell     if (tcg_enabled()) {
58812ebc9a7SPeter Maydell         newas->tcg_as_listener.commit = tcg_commit;
58912ebc9a7SPeter Maydell         memory_listener_register(&newas->tcg_as_listener, as);
59009daed84SEdgar E. Iglesias     }
59156943e8cSPeter Maydell }
592651a5bc0SPeter Maydell 
593651a5bc0SPeter Maydell AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
594651a5bc0SPeter Maydell {
595651a5bc0SPeter Maydell     /* Return the AddressSpace corresponding to the specified index */
596651a5bc0SPeter Maydell     return cpu->cpu_ases[asidx].as;
597651a5bc0SPeter Maydell }
59809daed84SEdgar E. Iglesias #endif
59909daed84SEdgar E. Iglesias 
600b7bca733SBharata B Rao #ifndef CONFIG_USER_ONLY
601b7bca733SBharata B Rao static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
602b7bca733SBharata B Rao 
603b7bca733SBharata B Rao static int cpu_get_free_index(Error **errp)
604b7bca733SBharata B Rao {
605b7bca733SBharata B Rao     int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
606b7bca733SBharata B Rao 
607b7bca733SBharata B Rao     if (cpu >= MAX_CPUMASK_BITS) {
608b7bca733SBharata B Rao         error_setg(errp, "Trying to use more CPUs than max of %d",
609b7bca733SBharata B Rao                    MAX_CPUMASK_BITS);
610b7bca733SBharata B Rao         return -1;
611b7bca733SBharata B Rao     }
612b7bca733SBharata B Rao 
613b7bca733SBharata B Rao     bitmap_set(cpu_index_map, cpu, 1);
614b7bca733SBharata B Rao     return cpu;
615b7bca733SBharata B Rao }
616b7bca733SBharata B Rao 
6171c59eb39SBharata B Rao static void cpu_release_index(CPUState *cpu)
618b7bca733SBharata B Rao {
619b7bca733SBharata B Rao     bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
620b7bca733SBharata B Rao }
621b7bca733SBharata B Rao #else
622b7bca733SBharata B Rao 
623b7bca733SBharata B Rao static int cpu_get_free_index(Error **errp)
624b7bca733SBharata B Rao {
625b7bca733SBharata B Rao     CPUState *some_cpu;
626b7bca733SBharata B Rao     int cpu_index = 0;
627b7bca733SBharata B Rao 
628b7bca733SBharata B Rao     CPU_FOREACH(some_cpu) {
629b7bca733SBharata B Rao         cpu_index++;
630b7bca733SBharata B Rao     }
631b7bca733SBharata B Rao     return cpu_index;
632b7bca733SBharata B Rao }
633b7bca733SBharata B Rao 
6341c59eb39SBharata B Rao static void cpu_release_index(CPUState *cpu)
635b7bca733SBharata B Rao {
6361c59eb39SBharata B Rao     return;
637b7bca733SBharata B Rao }
638b7bca733SBharata B Rao #endif
639b7bca733SBharata B Rao 
6401c59eb39SBharata B Rao void cpu_exec_exit(CPUState *cpu)
6411c59eb39SBharata B Rao {
6429dfeca7cSBharata B Rao     CPUClass *cc = CPU_GET_CLASS(cpu);
6439dfeca7cSBharata B Rao 
6441c59eb39SBharata B Rao #if defined(CONFIG_USER_ONLY)
6451c59eb39SBharata B Rao     cpu_list_lock();
6461c59eb39SBharata B Rao #endif
6471c59eb39SBharata B Rao     if (cpu->cpu_index == -1) {
6481c59eb39SBharata B Rao         /* cpu_index was never allocated by this @cpu or was already freed. */
6491c59eb39SBharata B Rao #if defined(CONFIG_USER_ONLY)
6501c59eb39SBharata B Rao         cpu_list_unlock();
6511c59eb39SBharata B Rao #endif
6521c59eb39SBharata B Rao         return;
6531c59eb39SBharata B Rao     }
6541c59eb39SBharata B Rao 
6551c59eb39SBharata B Rao     QTAILQ_REMOVE(&cpus, cpu, node);
6561c59eb39SBharata B Rao     cpu_release_index(cpu);
6571c59eb39SBharata B Rao     cpu->cpu_index = -1;
6581c59eb39SBharata B Rao #if defined(CONFIG_USER_ONLY)
6591c59eb39SBharata B Rao     cpu_list_unlock();
6601c59eb39SBharata B Rao #endif
6619dfeca7cSBharata B Rao 
6629dfeca7cSBharata B Rao     if (cc->vmsd != NULL) {
6639dfeca7cSBharata B Rao         vmstate_unregister(NULL, cc->vmsd, cpu);
6649dfeca7cSBharata B Rao     }
6659dfeca7cSBharata B Rao     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
6669dfeca7cSBharata B Rao         vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
6679dfeca7cSBharata B Rao     }
6681c59eb39SBharata B Rao }
6691c59eb39SBharata B Rao 
6704bad9e39SPeter Crosthwaite void cpu_exec_init(CPUState *cpu, Error **errp)
671fd6ce8f6Sbellard {
672b170fce3SAndreas Färber     CPUClass *cc = CPU_GET_CLASS(cpu);
673b7bca733SBharata B Rao     Error *local_err = NULL;
6746a00d601Sbellard 
67556943e8cSPeter Maydell     cpu->as = NULL;
67612ebc9a7SPeter Maydell     cpu->num_ases = 0;
67756943e8cSPeter Maydell 
678291135b5SEduardo Habkost #ifndef CONFIG_USER_ONLY
679291135b5SEduardo Habkost     cpu->thread_id = qemu_get_thread_id();
6806731d864SPeter Crosthwaite 
6816731d864SPeter Crosthwaite     /* This is a softmmu CPU object, so create a property for it
6826731d864SPeter Crosthwaite      * so users can wire up its memory. (This can't go in qom/cpu.c
6836731d864SPeter Crosthwaite      * because that file is compiled only once for both user-mode
6846731d864SPeter Crosthwaite      * and system builds.) The default if no link is set up is to use
6856731d864SPeter Crosthwaite      * the system address space.
6866731d864SPeter Crosthwaite      */
6876731d864SPeter Crosthwaite     object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
6886731d864SPeter Crosthwaite                              (Object **)&cpu->memory,
6896731d864SPeter Crosthwaite                              qdev_prop_allow_set_link_before_realize,
6906731d864SPeter Crosthwaite                              OBJ_PROP_LINK_UNREF_ON_RELEASE,
6916731d864SPeter Crosthwaite                              &error_abort);
6926731d864SPeter Crosthwaite     cpu->memory = system_memory;
6936731d864SPeter Crosthwaite     object_ref(OBJECT(cpu->memory));
694291135b5SEduardo Habkost #endif
695291135b5SEduardo Habkost 
696c2764719Spbrook #if defined(CONFIG_USER_ONLY)
697c2764719Spbrook     cpu_list_lock();
698c2764719Spbrook #endif
699741da0d3SPaolo Bonzini     cpu->cpu_index = cpu_get_free_index(&local_err);
700b7bca733SBharata B Rao     if (local_err) {
701b7bca733SBharata B Rao         error_propagate(errp, local_err);
702b7bca733SBharata B Rao #if defined(CONFIG_USER_ONLY)
703b7bca733SBharata B Rao         cpu_list_unlock();
704b7bca733SBharata B Rao #endif
705b7bca733SBharata B Rao         return;
7066a00d601Sbellard     }
707bdc44640SAndreas Färber     QTAILQ_INSERT_TAIL(&cpus, cpu, node);
708c2764719Spbrook #if defined(CONFIG_USER_ONLY)
709741da0d3SPaolo Bonzini     (void) cc;
710c2764719Spbrook     cpu_list_unlock();
711741da0d3SPaolo Bonzini #else
712e0d47944SAndreas Färber     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
713741da0d3SPaolo Bonzini         vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
714e0d47944SAndreas Färber     }
715b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
716741da0d3SPaolo Bonzini         vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
717b170fce3SAndreas Färber     }
718741da0d3SPaolo Bonzini #endif
719fd6ce8f6Sbellard }
720fd6ce8f6Sbellard 
72194df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
72200b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
72394df27fdSPaul Brook {
72494df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
72594df27fdSPaul Brook }
72694df27fdSPaul Brook #else
72700b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
7281e7855a5SMax Filippov {
7295232e4c7SPeter Maydell     MemTxAttrs attrs;
7305232e4c7SPeter Maydell     hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
7315232e4c7SPeter Maydell     int asidx = cpu_asidx_from_attrs(cpu, attrs);
732e8262a1bSMax Filippov     if (phys != -1) {
7335232e4c7SPeter Maydell         tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
73429d8ec7bSEdgar E. Iglesias                                 phys | (pc & ~TARGET_PAGE_MASK));
735e8262a1bSMax Filippov     }
7361e7855a5SMax Filippov }
737c27004ecSbellard #endif
738d720b93dSbellard 
739c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
74075a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
741c527ee8fSPaul Brook 
742c527ee8fSPaul Brook {
743c527ee8fSPaul Brook }
744c527ee8fSPaul Brook 
7453ee887e8SPeter Maydell int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
7463ee887e8SPeter Maydell                           int flags)
7473ee887e8SPeter Maydell {
7483ee887e8SPeter Maydell     return -ENOSYS;
7493ee887e8SPeter Maydell }
7503ee887e8SPeter Maydell 
7513ee887e8SPeter Maydell void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
7523ee887e8SPeter Maydell {
7533ee887e8SPeter Maydell }
7543ee887e8SPeter Maydell 
75575a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
756c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
757c527ee8fSPaul Brook {
758c527ee8fSPaul Brook     return -ENOSYS;
759c527ee8fSPaul Brook }
760c527ee8fSPaul Brook #else
7616658ffb8Spbrook /* Add a watchpoint.  */
76275a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
763a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
7646658ffb8Spbrook {
765c0ce998eSaliguori     CPUWatchpoint *wp;
7666658ffb8Spbrook 
76705068c0dSPeter Maydell     /* forbid ranges which are empty or run off the end of the address space */
76807e2863dSMax Filippov     if (len == 0 || (addr + len - 1) < addr) {
76975a34036SAndreas Färber         error_report("tried to set invalid watchpoint at %"
77075a34036SAndreas Färber                      VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
771b4051334Saliguori         return -EINVAL;
772b4051334Saliguori     }
7737267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
7746658ffb8Spbrook 
775a1d1bb31Saliguori     wp->vaddr = addr;
77605068c0dSPeter Maydell     wp->len = len;
777a1d1bb31Saliguori     wp->flags = flags;
778a1d1bb31Saliguori 
7792dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
780ff4700b0SAndreas Färber     if (flags & BP_GDB) {
781ff4700b0SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
782ff4700b0SAndreas Färber     } else {
783ff4700b0SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
784ff4700b0SAndreas Färber     }
785a1d1bb31Saliguori 
78631b030d4SAndreas Färber     tlb_flush_page(cpu, addr);
787a1d1bb31Saliguori 
788a1d1bb31Saliguori     if (watchpoint)
789a1d1bb31Saliguori         *watchpoint = wp;
790a1d1bb31Saliguori     return 0;
7916658ffb8Spbrook }
7926658ffb8Spbrook 
793a1d1bb31Saliguori /* Remove a specific watchpoint.  */
79475a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
795a1d1bb31Saliguori                           int flags)
7966658ffb8Spbrook {
797a1d1bb31Saliguori     CPUWatchpoint *wp;
7986658ffb8Spbrook 
799ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
80005068c0dSPeter Maydell         if (addr == wp->vaddr && len == wp->len
8016e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
80275a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
8036658ffb8Spbrook             return 0;
8046658ffb8Spbrook         }
8056658ffb8Spbrook     }
806a1d1bb31Saliguori     return -ENOENT;
8076658ffb8Spbrook }
8086658ffb8Spbrook 
809a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
81075a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
811a1d1bb31Saliguori {
812ff4700b0SAndreas Färber     QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
8137d03f82fSedgar_igl 
81431b030d4SAndreas Färber     tlb_flush_page(cpu, watchpoint->vaddr);
815a1d1bb31Saliguori 
8167267c094SAnthony Liguori     g_free(watchpoint);
8177d03f82fSedgar_igl }
8187d03f82fSedgar_igl 
819a1d1bb31Saliguori /* Remove all matching watchpoints.  */
82075a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
821a1d1bb31Saliguori {
822c0ce998eSaliguori     CPUWatchpoint *wp, *next;
823a1d1bb31Saliguori 
824ff4700b0SAndreas Färber     QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
82575a34036SAndreas Färber         if (wp->flags & mask) {
82675a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
82775a34036SAndreas Färber         }
828a1d1bb31Saliguori     }
829c0ce998eSaliguori }
83005068c0dSPeter Maydell 
83105068c0dSPeter Maydell /* Return true if this watchpoint address matches the specified
83205068c0dSPeter Maydell  * access (ie the address range covered by the watchpoint overlaps
83305068c0dSPeter Maydell  * partially or completely with the address range covered by the
83405068c0dSPeter Maydell  * access).
83505068c0dSPeter Maydell  */
83605068c0dSPeter Maydell static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
83705068c0dSPeter Maydell                                                   vaddr addr,
83805068c0dSPeter Maydell                                                   vaddr len)
83905068c0dSPeter Maydell {
84005068c0dSPeter Maydell     /* We know the lengths are non-zero, but a little caution is
84105068c0dSPeter Maydell      * required to avoid errors in the case where the range ends
84205068c0dSPeter Maydell      * exactly at the top of the address space and so addr + len
84305068c0dSPeter Maydell      * wraps round to zero.
84405068c0dSPeter Maydell      */
84505068c0dSPeter Maydell     vaddr wpend = wp->vaddr + wp->len - 1;
84605068c0dSPeter Maydell     vaddr addrend = addr + len - 1;
84705068c0dSPeter Maydell 
84805068c0dSPeter Maydell     return !(addr > wpend || wp->vaddr > addrend);
84905068c0dSPeter Maydell }
85005068c0dSPeter Maydell 
851c527ee8fSPaul Brook #endif
852a1d1bb31Saliguori 
853a1d1bb31Saliguori /* Add a breakpoint.  */
854b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
855a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
8564c3a88a2Sbellard {
857c0ce998eSaliguori     CPUBreakpoint *bp;
8584c3a88a2Sbellard 
8597267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
8604c3a88a2Sbellard 
861a1d1bb31Saliguori     bp->pc = pc;
862a1d1bb31Saliguori     bp->flags = flags;
863a1d1bb31Saliguori 
8642dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
86500b941e5SAndreas Färber     if (flags & BP_GDB) {
866f0c3c505SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
86700b941e5SAndreas Färber     } else {
868f0c3c505SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
86900b941e5SAndreas Färber     }
870d720b93dSbellard 
871f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, pc);
872a1d1bb31Saliguori 
87300b941e5SAndreas Färber     if (breakpoint) {
874a1d1bb31Saliguori         *breakpoint = bp;
87500b941e5SAndreas Färber     }
8764c3a88a2Sbellard     return 0;
8774c3a88a2Sbellard }
8784c3a88a2Sbellard 
879a1d1bb31Saliguori /* Remove a specific breakpoint.  */
880b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
881a1d1bb31Saliguori {
882a1d1bb31Saliguori     CPUBreakpoint *bp;
883a1d1bb31Saliguori 
884f0c3c505SAndreas Färber     QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
885a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
886b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
887a1d1bb31Saliguori             return 0;
8887d03f82fSedgar_igl         }
889a1d1bb31Saliguori     }
890a1d1bb31Saliguori     return -ENOENT;
8917d03f82fSedgar_igl }
8927d03f82fSedgar_igl 
893a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
894b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
8954c3a88a2Sbellard {
896f0c3c505SAndreas Färber     QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
897f0c3c505SAndreas Färber 
898f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, breakpoint->pc);
899a1d1bb31Saliguori 
9007267c094SAnthony Liguori     g_free(breakpoint);
901a1d1bb31Saliguori }
902a1d1bb31Saliguori 
903a1d1bb31Saliguori /* Remove all matching breakpoints. */
904b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
905a1d1bb31Saliguori {
906c0ce998eSaliguori     CPUBreakpoint *bp, *next;
907a1d1bb31Saliguori 
908f0c3c505SAndreas Färber     QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
909b3310ab3SAndreas Färber         if (bp->flags & mask) {
910b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
911b3310ab3SAndreas Färber         }
912c0ce998eSaliguori     }
9134c3a88a2Sbellard }
9144c3a88a2Sbellard 
915c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
916c33a346eSbellard    CPU loop after each instruction */
9173825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled)
918c33a346eSbellard {
919ed2803daSAndreas Färber     if (cpu->singlestep_enabled != enabled) {
920ed2803daSAndreas Färber         cpu->singlestep_enabled = enabled;
921ed2803daSAndreas Färber         if (kvm_enabled()) {
92238e478ecSStefan Weil             kvm_update_guest_debug(cpu, 0);
923ed2803daSAndreas Färber         } else {
924ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
9259fa3e853Sbellard             /* XXX: only flush what is necessary */
926bbd77c18SPeter Crosthwaite             tb_flush(cpu);
927c33a346eSbellard         }
928e22a25c9Saliguori     }
929c33a346eSbellard }
930c33a346eSbellard 
931a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...)
9327501267eSbellard {
9337501267eSbellard     va_list ap;
934493ae1f0Spbrook     va_list ap2;
9357501267eSbellard 
9367501267eSbellard     va_start(ap, fmt);
937493ae1f0Spbrook     va_copy(ap2, ap);
9387501267eSbellard     fprintf(stderr, "qemu: fatal: ");
9397501267eSbellard     vfprintf(stderr, fmt, ap);
9407501267eSbellard     fprintf(stderr, "\n");
941878096eeSAndreas Färber     cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
942013a2942SPaolo Bonzini     if (qemu_log_separate()) {
94393fcfe39Saliguori         qemu_log("qemu: fatal: ");
94493fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
94593fcfe39Saliguori         qemu_log("\n");
946a0762859SAndreas Färber         log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
94731b1a7b4Saliguori         qemu_log_flush();
94893fcfe39Saliguori         qemu_log_close();
949924edcaeSbalrog     }
950493ae1f0Spbrook     va_end(ap2);
951f9373291Sj_mayer     va_end(ap);
9527615936eSPavel Dovgalyuk     replay_finish();
953fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
954fd052bf6SRiku Voipio     {
955fd052bf6SRiku Voipio         struct sigaction act;
956fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
957fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
958fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
959fd052bf6SRiku Voipio     }
960fd052bf6SRiku Voipio #endif
9617501267eSbellard     abort();
9627501267eSbellard }
9637501267eSbellard 
9640124311eSbellard #if !defined(CONFIG_USER_ONLY)
9650dc3f44aSMike Day /* Called from RCU critical section */
966041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
967041603feSPaolo Bonzini {
968041603feSPaolo Bonzini     RAMBlock *block;
969041603feSPaolo Bonzini 
97043771539SPaolo Bonzini     block = atomic_rcu_read(&ram_list.mru_block);
9719b8424d5SMichael S. Tsirkin     if (block && addr - block->offset < block->max_length) {
97268851b98SPaolo Bonzini         return block;
973041603feSPaolo Bonzini     }
9740dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9759b8424d5SMichael S. Tsirkin         if (addr - block->offset < block->max_length) {
976041603feSPaolo Bonzini             goto found;
977041603feSPaolo Bonzini         }
978041603feSPaolo Bonzini     }
979041603feSPaolo Bonzini 
980041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
981041603feSPaolo Bonzini     abort();
982041603feSPaolo Bonzini 
983041603feSPaolo Bonzini found:
98443771539SPaolo Bonzini     /* It is safe to write mru_block outside the iothread lock.  This
98543771539SPaolo Bonzini      * is what happens:
98643771539SPaolo Bonzini      *
98743771539SPaolo Bonzini      *     mru_block = xxx
98843771539SPaolo Bonzini      *     rcu_read_unlock()
98943771539SPaolo Bonzini      *                                        xxx removed from list
99043771539SPaolo Bonzini      *                  rcu_read_lock()
99143771539SPaolo Bonzini      *                  read mru_block
99243771539SPaolo Bonzini      *                                        mru_block = NULL;
99343771539SPaolo Bonzini      *                                        call_rcu(reclaim_ramblock, xxx);
99443771539SPaolo Bonzini      *                  rcu_read_unlock()
99543771539SPaolo Bonzini      *
99643771539SPaolo Bonzini      * atomic_rcu_set is not needed here.  The block was already published
99743771539SPaolo Bonzini      * when it was placed into the list.  Here we're just making an extra
99843771539SPaolo Bonzini      * copy of the pointer.
99943771539SPaolo Bonzini      */
1000041603feSPaolo Bonzini     ram_list.mru_block = block;
1001041603feSPaolo Bonzini     return block;
1002041603feSPaolo Bonzini }
1003041603feSPaolo Bonzini 
1004a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
10051ccde1cbSbellard {
10069a13565dSPeter Crosthwaite     CPUState *cpu;
1007041603feSPaolo Bonzini     ram_addr_t start1;
1008a2f4d5beSJuan Quintela     RAMBlock *block;
1009a2f4d5beSJuan Quintela     ram_addr_t end;
1010a2f4d5beSJuan Quintela 
1011a2f4d5beSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length);
1012a2f4d5beSJuan Quintela     start &= TARGET_PAGE_MASK;
1013f23db169Sbellard 
10140dc3f44aSMike Day     rcu_read_lock();
1015041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
1016041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
10171240be24SMichael S. Tsirkin     start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
10189a13565dSPeter Crosthwaite     CPU_FOREACH(cpu) {
10199a13565dSPeter Crosthwaite         tlb_reset_dirty(cpu, start1, length);
10209a13565dSPeter Crosthwaite     }
10210dc3f44aSMike Day     rcu_read_unlock();
1022d24981d3SJuan Quintela }
1023d24981d3SJuan Quintela 
1024d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
102503eebc9eSStefan Hajnoczi bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
102603eebc9eSStefan Hajnoczi                                               ram_addr_t length,
102752159192SJuan Quintela                                               unsigned client)
1028d24981d3SJuan Quintela {
10295b82b703SStefan Hajnoczi     DirtyMemoryBlocks *blocks;
103003eebc9eSStefan Hajnoczi     unsigned long end, page;
10315b82b703SStefan Hajnoczi     bool dirty = false;
1032d24981d3SJuan Quintela 
103303eebc9eSStefan Hajnoczi     if (length == 0) {
103403eebc9eSStefan Hajnoczi         return false;
103503eebc9eSStefan Hajnoczi     }
103603eebc9eSStefan Hajnoczi 
103703eebc9eSStefan Hajnoczi     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
103803eebc9eSStefan Hajnoczi     page = start >> TARGET_PAGE_BITS;
10395b82b703SStefan Hajnoczi 
10405b82b703SStefan Hajnoczi     rcu_read_lock();
10415b82b703SStefan Hajnoczi 
10425b82b703SStefan Hajnoczi     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
10435b82b703SStefan Hajnoczi 
10445b82b703SStefan Hajnoczi     while (page < end) {
10455b82b703SStefan Hajnoczi         unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
10465b82b703SStefan Hajnoczi         unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
10475b82b703SStefan Hajnoczi         unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
10485b82b703SStefan Hajnoczi 
10495b82b703SStefan Hajnoczi         dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
10505b82b703SStefan Hajnoczi                                               offset, num);
10515b82b703SStefan Hajnoczi         page += num;
10525b82b703SStefan Hajnoczi     }
10535b82b703SStefan Hajnoczi 
10545b82b703SStefan Hajnoczi     rcu_read_unlock();
105503eebc9eSStefan Hajnoczi 
105603eebc9eSStefan Hajnoczi     if (dirty && tcg_enabled()) {
1057a2f4d5beSJuan Quintela         tlb_reset_dirty_range_all(start, length);
1058d24981d3SJuan Quintela     }
105903eebc9eSStefan Hajnoczi 
106003eebc9eSStefan Hajnoczi     return dirty;
10611ccde1cbSbellard }
10621ccde1cbSbellard 
106379e2b9aeSPaolo Bonzini /* Called from RCU critical section */
1064bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1065e5548617SBlue Swirl                                        MemoryRegionSection *section,
1066e5548617SBlue Swirl                                        target_ulong vaddr,
1067149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
1068e5548617SBlue Swirl                                        int prot,
1069e5548617SBlue Swirl                                        target_ulong *address)
1070e5548617SBlue Swirl {
1071a8170e5eSAvi Kivity     hwaddr iotlb;
1072e5548617SBlue Swirl     CPUWatchpoint *wp;
1073e5548617SBlue Swirl 
1074cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
1075e5548617SBlue Swirl         /* Normal RAM.  */
1076e4e69794SPaolo Bonzini         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1077e5548617SBlue Swirl         if (!section->readonly) {
1078b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_NOTDIRTY;
1079e5548617SBlue Swirl         } else {
1080b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_ROM;
1081e5548617SBlue Swirl         }
1082e5548617SBlue Swirl     } else {
10830b8e2c10SPeter Maydell         AddressSpaceDispatch *d;
10840b8e2c10SPeter Maydell 
10850b8e2c10SPeter Maydell         d = atomic_rcu_read(&section->address_space->dispatch);
10860b8e2c10SPeter Maydell         iotlb = section - d->map.sections;
1087149f54b5SPaolo Bonzini         iotlb += xlat;
1088e5548617SBlue Swirl     }
1089e5548617SBlue Swirl 
1090e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
1091e5548617SBlue Swirl        watchpoint trap routines.  */
1092ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
109305068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1094e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
1095e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1096b41aac4fSLiu Ping Fan                 iotlb = PHYS_SECTION_WATCH + paddr;
1097e5548617SBlue Swirl                 *address |= TLB_MMIO;
1098e5548617SBlue Swirl                 break;
1099e5548617SBlue Swirl             }
1100e5548617SBlue Swirl         }
1101e5548617SBlue Swirl     }
1102e5548617SBlue Swirl 
1103e5548617SBlue Swirl     return iotlb;
1104e5548617SBlue Swirl }
11059fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
110633417e70Sbellard 
1107e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
11088da3ff18Spbrook 
1109c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
11105312bd8bSAvi Kivity                              uint16_t section);
1111acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
111254688b1eSAvi Kivity 
1113a2b257d6SIgor Mammedov static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1114a2b257d6SIgor Mammedov                                qemu_anon_ram_alloc;
111591138037SMarkus Armbruster 
111691138037SMarkus Armbruster /*
111791138037SMarkus Armbruster  * Set a custom physical guest memory alloator.
111891138037SMarkus Armbruster  * Accelerators with unusual needs may need this.  Hopefully, we can
111991138037SMarkus Armbruster  * get rid of it eventually.
112091138037SMarkus Armbruster  */
1121a2b257d6SIgor Mammedov void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
112291138037SMarkus Armbruster {
112391138037SMarkus Armbruster     phys_mem_alloc = alloc;
112491138037SMarkus Armbruster }
112591138037SMarkus Armbruster 
112653cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
112753cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
11285312bd8bSAvi Kivity {
112968f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
113068f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
113168f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
113268f3f65bSPaolo Bonzini      */
113353cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
113468f3f65bSPaolo Bonzini 
113553cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
113653cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
113753cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
113853cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
11395312bd8bSAvi Kivity     }
114053cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
1141dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
114253cb28cbSMarcel Apfelbaum     return map->sections_nb++;
11435312bd8bSAvi Kivity }
11445312bd8bSAvi Kivity 
1145058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
1146058bc4b5SPaolo Bonzini {
114755b4e80bSDon Slutz     bool have_sub_page = mr->subpage;
114855b4e80bSDon Slutz 
1149dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
1150dfde4e6eSPaolo Bonzini 
115155b4e80bSDon Slutz     if (have_sub_page) {
1152058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
1153b4fefef9SPeter Crosthwaite         object_unref(OBJECT(&subpage->iomem));
1154058bc4b5SPaolo Bonzini         g_free(subpage);
1155058bc4b5SPaolo Bonzini     }
1156058bc4b5SPaolo Bonzini }
1157058bc4b5SPaolo Bonzini 
11586092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
11595312bd8bSAvi Kivity {
11609affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
11619affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
1162058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
1163058bc4b5SPaolo Bonzini     }
11649affd6fcSPaolo Bonzini     g_free(map->sections);
11659affd6fcSPaolo Bonzini     g_free(map->nodes);
11665312bd8bSAvi Kivity }
11675312bd8bSAvi Kivity 
1168ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
11690f0cb164SAvi Kivity {
11700f0cb164SAvi Kivity     subpage_t *subpage;
1171a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
11720f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
117397115a8dSMichael S. Tsirkin     MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
117453cb28cbSMarcel Apfelbaum                                                    d->map.nodes, d->map.sections);
11750f0cb164SAvi Kivity     MemoryRegionSection subsection = {
11760f0cb164SAvi Kivity         .offset_within_address_space = base,
1177052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
11780f0cb164SAvi Kivity     };
1179a8170e5eSAvi Kivity     hwaddr start, end;
11800f0cb164SAvi Kivity 
1181f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
11820f0cb164SAvi Kivity 
1183f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
1184acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
11853be91e86SEdgar E. Iglesias         subsection.address_space = d->as;
11860f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
1187ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
118853cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
11890f0cb164SAvi Kivity     } else {
1190f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
11910f0cb164SAvi Kivity     }
11920f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1193052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
119453cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
119553cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
11960f0cb164SAvi Kivity }
11970f0cb164SAvi Kivity 
11980f0cb164SAvi Kivity 
1199052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
1200052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
120133417e70Sbellard {
1202a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
120353cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
1204052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
1205052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
1206dd81124bSAvi Kivity 
1207733d5ef5SPaolo Bonzini     assert(num_pages);
1208733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
120933417e70Sbellard }
121033417e70Sbellard 
1211ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
12120f0cb164SAvi Kivity {
121389ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
121400752703SPaolo Bonzini     AddressSpaceDispatch *d = as->next_dispatch;
121599b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
1216052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
12170f0cb164SAvi Kivity 
1218733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1219733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1220733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
1221733d5ef5SPaolo Bonzini 
1222052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
1223ac1970fbSAvi Kivity         register_subpage(d, &now);
1224733d5ef5SPaolo Bonzini     } else {
1225052e87b0SPaolo Bonzini         now.size = int128_zero();
1226733d5ef5SPaolo Bonzini     }
1227052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
1228052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
1229052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
1230052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
12310f0cb164SAvi Kivity         now = remain;
1232052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
1233733d5ef5SPaolo Bonzini             register_subpage(d, &now);
123488266249SHu Tao         } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1235052e87b0SPaolo Bonzini             now.size = page_size;
1236ac1970fbSAvi Kivity             register_subpage(d, &now);
123769b67646STyler Hall         } else {
1238052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
1239ac1970fbSAvi Kivity             register_multipage(d, &now);
124069b67646STyler Hall         }
12410f0cb164SAvi Kivity     }
12420f0cb164SAvi Kivity }
12430f0cb164SAvi Kivity 
124462a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
124562a2744cSSheng Yang {
124662a2744cSSheng Yang     if (kvm_enabled())
124762a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
124862a2744cSSheng Yang }
124962a2744cSSheng Yang 
1250b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
1251b2a8658eSUmesh Deshpande {
1252b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
1253b2a8658eSUmesh Deshpande }
1254b2a8658eSUmesh Deshpande 
1255b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
1256b2a8658eSUmesh Deshpande {
1257b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
1258b2a8658eSUmesh Deshpande }
1259b2a8658eSUmesh Deshpande 
1260e1e84ba0SMarkus Armbruster #ifdef __linux__
126104b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
126204b16653SAlex Williamson                             ram_addr_t memory,
12637f56e740SPaolo Bonzini                             const char *path,
12647f56e740SPaolo Bonzini                             Error **errp)
1265c902760fSMarcelo Tosatti {
1266fd97fd44SMarkus Armbruster     bool unlink_on_error = false;
1267c902760fSMarcelo Tosatti     char *filename;
12688ca761f6SPeter Feiner     char *sanitized_name;
12698ca761f6SPeter Feiner     char *c;
1270794e8f30SMichael S. Tsirkin     void *area;
12715c3ece79SPaolo Bonzini     int fd = -1;
1272e1fb6471SMarkus Armbruster     int64_t page_size;
1273c902760fSMarcelo Tosatti 
1274c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
12757f56e740SPaolo Bonzini         error_setg(errp,
12767f56e740SPaolo Bonzini                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1277fd97fd44SMarkus Armbruster         return NULL;
1278c902760fSMarcelo Tosatti     }
1279c902760fSMarcelo Tosatti 
1280fd97fd44SMarkus Armbruster     for (;;) {
1281fd97fd44SMarkus Armbruster         fd = open(path, O_RDWR);
1282fd97fd44SMarkus Armbruster         if (fd >= 0) {
1283fd97fd44SMarkus Armbruster             /* @path names an existing file, use it */
1284fd97fd44SMarkus Armbruster             break;
1285fd97fd44SMarkus Armbruster         }
1286fd97fd44SMarkus Armbruster         if (errno == ENOENT) {
1287fd97fd44SMarkus Armbruster             /* @path names a file that doesn't exist, create it */
1288fd97fd44SMarkus Armbruster             fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1289fd97fd44SMarkus Armbruster             if (fd >= 0) {
1290fd97fd44SMarkus Armbruster                 unlink_on_error = true;
1291fd97fd44SMarkus Armbruster                 break;
1292fd97fd44SMarkus Armbruster             }
1293fd97fd44SMarkus Armbruster         } else if (errno == EISDIR) {
1294fd97fd44SMarkus Armbruster             /* @path names a directory, create a file there */
12958ca761f6SPeter Feiner             /* Make name safe to use with mkstemp by replacing '/' with '_'. */
129683234bf2SPeter Crosthwaite             sanitized_name = g_strdup(memory_region_name(block->mr));
12978ca761f6SPeter Feiner             for (c = sanitized_name; *c != '\0'; c++) {
12988d31d6b6SPavel Fedin                 if (*c == '/') {
12998ca761f6SPeter Feiner                     *c = '_';
13008ca761f6SPeter Feiner                 }
13018d31d6b6SPavel Fedin             }
13028ca761f6SPeter Feiner 
13038ca761f6SPeter Feiner             filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
13048ca761f6SPeter Feiner                                        sanitized_name);
13058ca761f6SPeter Feiner             g_free(sanitized_name);
1306c902760fSMarcelo Tosatti 
1307c902760fSMarcelo Tosatti             fd = mkstemp(filename);
13088d31d6b6SPavel Fedin             if (fd >= 0) {
13098d31d6b6SPavel Fedin                 unlink(filename);
1310fd97fd44SMarkus Armbruster                 g_free(filename);
1311fd97fd44SMarkus Armbruster                 break;
13128d31d6b6SPavel Fedin             }
13138d31d6b6SPavel Fedin             g_free(filename);
1314fd97fd44SMarkus Armbruster         }
1315fd97fd44SMarkus Armbruster         if (errno != EEXIST && errno != EINTR) {
1316fd97fd44SMarkus Armbruster             error_setg_errno(errp, errno,
1317fd97fd44SMarkus Armbruster                              "can't open backing store %s for guest RAM",
1318fd97fd44SMarkus Armbruster                              path);
1319fd97fd44SMarkus Armbruster             goto error;
1320fd97fd44SMarkus Armbruster         }
1321fd97fd44SMarkus Armbruster         /*
1322fd97fd44SMarkus Armbruster          * Try again on EINTR and EEXIST.  The latter happens when
1323fd97fd44SMarkus Armbruster          * something else creates the file between our two open().
1324fd97fd44SMarkus Armbruster          */
13258d31d6b6SPavel Fedin     }
13268d31d6b6SPavel Fedin 
1327e1fb6471SMarkus Armbruster     page_size = qemu_fd_getpagesize(fd);
1328d2f39addSDominik Dingel     block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
1329fd97fd44SMarkus Armbruster 
1330e1fb6471SMarkus Armbruster     if (memory < page_size) {
1331fd97fd44SMarkus Armbruster         error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1332fd97fd44SMarkus Armbruster                    "or larger than page size 0x%" PRIx64,
1333e1fb6471SMarkus Armbruster                    memory, page_size);
1334f9a49dfaSMarcelo Tosatti         goto error;
1335c902760fSMarcelo Tosatti     }
1336c902760fSMarcelo Tosatti 
1337e1fb6471SMarkus Armbruster     memory = ROUND_UP(memory, page_size);
1338c902760fSMarcelo Tosatti 
1339c902760fSMarcelo Tosatti     /*
1340c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1341c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1342c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1343c902760fSMarcelo Tosatti      * mmap will fail.
1344c902760fSMarcelo Tosatti      */
13457f56e740SPaolo Bonzini     if (ftruncate(fd, memory)) {
1346c902760fSMarcelo Tosatti         perror("ftruncate");
13477f56e740SPaolo Bonzini     }
1348c902760fSMarcelo Tosatti 
1349d2f39addSDominik Dingel     area = qemu_ram_mmap(fd, memory, block->mr->align,
1350d2f39addSDominik Dingel                          block->flags & RAM_SHARED);
1351c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
13527f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
1353fd97fd44SMarkus Armbruster                          "unable to map backing store for guest RAM");
1354f9a49dfaSMarcelo Tosatti         goto error;
1355c902760fSMarcelo Tosatti     }
1356ef36fa14SMarcelo Tosatti 
1357ef36fa14SMarcelo Tosatti     if (mem_prealloc) {
135838183310SPaolo Bonzini         os_mem_prealloc(fd, area, memory);
1359ef36fa14SMarcelo Tosatti     }
1360ef36fa14SMarcelo Tosatti 
136104b16653SAlex Williamson     block->fd = fd;
1362c902760fSMarcelo Tosatti     return area;
1363f9a49dfaSMarcelo Tosatti 
1364f9a49dfaSMarcelo Tosatti error:
1365fd97fd44SMarkus Armbruster     if (unlink_on_error) {
1366fd97fd44SMarkus Armbruster         unlink(path);
1367fd97fd44SMarkus Armbruster     }
13685c3ece79SPaolo Bonzini     if (fd != -1) {
1369fd97fd44SMarkus Armbruster         close(fd);
13705c3ece79SPaolo Bonzini     }
1371f9a49dfaSMarcelo Tosatti     return NULL;
1372c902760fSMarcelo Tosatti }
1373c902760fSMarcelo Tosatti #endif
1374c902760fSMarcelo Tosatti 
13750dc3f44aSMike Day /* Called with the ramlist lock held.  */
1376d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1377d17b5288SAlex Williamson {
137804b16653SAlex Williamson     RAMBlock *block, *next_block;
13793e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
138004b16653SAlex Williamson 
138149cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
138249cd9ac6SStefan Hajnoczi 
13830dc3f44aSMike Day     if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
138404b16653SAlex Williamson         return 0;
13850d53d9feSMike Day     }
138604b16653SAlex Williamson 
13870dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1388f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
138904b16653SAlex Williamson 
139062be4e3aSMichael S. Tsirkin         end = block->offset + block->max_length;
139104b16653SAlex Williamson 
13920dc3f44aSMike Day         QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
139304b16653SAlex Williamson             if (next_block->offset >= end) {
139404b16653SAlex Williamson                 next = MIN(next, next_block->offset);
139504b16653SAlex Williamson             }
139604b16653SAlex Williamson         }
139704b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
139804b16653SAlex Williamson             offset = end;
139904b16653SAlex Williamson             mingap = next - end;
140004b16653SAlex Williamson         }
140104b16653SAlex Williamson     }
14023e837b2cSAlex Williamson 
14033e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
14043e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
14053e837b2cSAlex Williamson                 (uint64_t)size);
14063e837b2cSAlex Williamson         abort();
14073e837b2cSAlex Williamson     }
14083e837b2cSAlex Williamson 
140904b16653SAlex Williamson     return offset;
141004b16653SAlex Williamson }
141104b16653SAlex Williamson 
1412652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
141304b16653SAlex Williamson {
1414d17b5288SAlex Williamson     RAMBlock *block;
1415d17b5288SAlex Williamson     ram_addr_t last = 0;
1416d17b5288SAlex Williamson 
14170dc3f44aSMike Day     rcu_read_lock();
14180dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
141962be4e3aSMichael S. Tsirkin         last = MAX(last, block->offset + block->max_length);
14200d53d9feSMike Day     }
14210dc3f44aSMike Day     rcu_read_unlock();
1422d17b5288SAlex Williamson     return last;
1423d17b5288SAlex Williamson }
1424d17b5288SAlex Williamson 
1425ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1426ddb97f1dSJason Baron {
1427ddb97f1dSJason Baron     int ret;
1428ddb97f1dSJason Baron 
1429ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
143047c8ca53SMarcel Apfelbaum     if (!machine_dump_guest_core(current_machine)) {
1431ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1432ddb97f1dSJason Baron         if (ret) {
1433ddb97f1dSJason Baron             perror("qemu_madvise");
1434ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1435ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1436ddb97f1dSJason Baron         }
1437ddb97f1dSJason Baron     }
1438ddb97f1dSJason Baron }
1439ddb97f1dSJason Baron 
1440422148d3SDr. David Alan Gilbert const char *qemu_ram_get_idstr(RAMBlock *rb)
1441422148d3SDr. David Alan Gilbert {
1442422148d3SDr. David Alan Gilbert     return rb->idstr;
1443422148d3SDr. David Alan Gilbert }
1444422148d3SDr. David Alan Gilbert 
1445ae3a7047SMike Day /* Called with iothread lock held.  */
1446fa53a0e5SGonglei void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
144720cfe881SHu Tao {
1448fa53a0e5SGonglei     RAMBlock *block;
144920cfe881SHu Tao 
1450c5705a77SAvi Kivity     assert(new_block);
1451c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
145284b89d78SCam Macdonell 
145309e5ab63SAnthony Liguori     if (dev) {
145409e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
145584b89d78SCam Macdonell         if (id) {
145684b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
14577267c094SAnthony Liguori             g_free(id);
145884b89d78SCam Macdonell         }
145984b89d78SCam Macdonell     }
146084b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
146184b89d78SCam Macdonell 
1462ab0a9956SGonglei     rcu_read_lock();
14630dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1464fa53a0e5SGonglei         if (block != new_block &&
1465fa53a0e5SGonglei             !strcmp(block->idstr, new_block->idstr)) {
146684b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
146784b89d78SCam Macdonell                     new_block->idstr);
146884b89d78SCam Macdonell             abort();
146984b89d78SCam Macdonell         }
147084b89d78SCam Macdonell     }
14710dc3f44aSMike Day     rcu_read_unlock();
1472c5705a77SAvi Kivity }
1473c5705a77SAvi Kivity 
1474ae3a7047SMike Day /* Called with iothread lock held.  */
1475fa53a0e5SGonglei void qemu_ram_unset_idstr(RAMBlock *block)
147620cfe881SHu Tao {
1477ae3a7047SMike Day     /* FIXME: arch_init.c assumes that this is not called throughout
1478ae3a7047SMike Day      * migration.  Ignore the problem since hot-unplug during migration
1479ae3a7047SMike Day      * does not work anyway.
1480ae3a7047SMike Day      */
148120cfe881SHu Tao     if (block) {
148220cfe881SHu Tao         memset(block->idstr, 0, sizeof(block->idstr));
148320cfe881SHu Tao     }
148420cfe881SHu Tao }
148520cfe881SHu Tao 
14868490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
14878490fc78SLuiz Capitulino {
148875cc7f01SMarcel Apfelbaum     if (!machine_mem_merge(current_machine)) {
14898490fc78SLuiz Capitulino         /* disabled by the user */
14908490fc78SLuiz Capitulino         return 0;
14918490fc78SLuiz Capitulino     }
14928490fc78SLuiz Capitulino 
14938490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
14948490fc78SLuiz Capitulino }
14958490fc78SLuiz Capitulino 
149662be4e3aSMichael S. Tsirkin /* Only legal before guest might have detected the memory size: e.g. on
149762be4e3aSMichael S. Tsirkin  * incoming migration, or right after reset.
149862be4e3aSMichael S. Tsirkin  *
149962be4e3aSMichael S. Tsirkin  * As memory core doesn't know how is memory accessed, it is up to
150062be4e3aSMichael S. Tsirkin  * resize callback to update device state and/or add assertions to detect
150162be4e3aSMichael S. Tsirkin  * misuse, if necessary.
150262be4e3aSMichael S. Tsirkin  */
1503fa53a0e5SGonglei int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
150462be4e3aSMichael S. Tsirkin {
150562be4e3aSMichael S. Tsirkin     assert(block);
150662be4e3aSMichael S. Tsirkin 
15074ed023ceSDr. David Alan Gilbert     newsize = HOST_PAGE_ALIGN(newsize);
1508129ddaf3SMichael S. Tsirkin 
150962be4e3aSMichael S. Tsirkin     if (block->used_length == newsize) {
151062be4e3aSMichael S. Tsirkin         return 0;
151162be4e3aSMichael S. Tsirkin     }
151262be4e3aSMichael S. Tsirkin 
151362be4e3aSMichael S. Tsirkin     if (!(block->flags & RAM_RESIZEABLE)) {
151462be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
151562be4e3aSMichael S. Tsirkin                          "Length mismatch: %s: 0x" RAM_ADDR_FMT
151662be4e3aSMichael S. Tsirkin                          " in != 0x" RAM_ADDR_FMT, block->idstr,
151762be4e3aSMichael S. Tsirkin                          newsize, block->used_length);
151862be4e3aSMichael S. Tsirkin         return -EINVAL;
151962be4e3aSMichael S. Tsirkin     }
152062be4e3aSMichael S. Tsirkin 
152162be4e3aSMichael S. Tsirkin     if (block->max_length < newsize) {
152262be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
152362be4e3aSMichael S. Tsirkin                          "Length too large: %s: 0x" RAM_ADDR_FMT
152462be4e3aSMichael S. Tsirkin                          " > 0x" RAM_ADDR_FMT, block->idstr,
152562be4e3aSMichael S. Tsirkin                          newsize, block->max_length);
152662be4e3aSMichael S. Tsirkin         return -EINVAL;
152762be4e3aSMichael S. Tsirkin     }
152862be4e3aSMichael S. Tsirkin 
152962be4e3aSMichael S. Tsirkin     cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
153062be4e3aSMichael S. Tsirkin     block->used_length = newsize;
153158d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
153258d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
153362be4e3aSMichael S. Tsirkin     memory_region_set_size(block->mr, newsize);
153462be4e3aSMichael S. Tsirkin     if (block->resized) {
153562be4e3aSMichael S. Tsirkin         block->resized(block->idstr, newsize, block->host);
153662be4e3aSMichael S. Tsirkin     }
153762be4e3aSMichael S. Tsirkin     return 0;
153862be4e3aSMichael S. Tsirkin }
153962be4e3aSMichael S. Tsirkin 
15405b82b703SStefan Hajnoczi /* Called with ram_list.mutex held */
15415b82b703SStefan Hajnoczi static void dirty_memory_extend(ram_addr_t old_ram_size,
15425b82b703SStefan Hajnoczi                                 ram_addr_t new_ram_size)
15435b82b703SStefan Hajnoczi {
15445b82b703SStefan Hajnoczi     ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
15455b82b703SStefan Hajnoczi                                              DIRTY_MEMORY_BLOCK_SIZE);
15465b82b703SStefan Hajnoczi     ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
15475b82b703SStefan Hajnoczi                                              DIRTY_MEMORY_BLOCK_SIZE);
15485b82b703SStefan Hajnoczi     int i;
15495b82b703SStefan Hajnoczi 
15505b82b703SStefan Hajnoczi     /* Only need to extend if block count increased */
15515b82b703SStefan Hajnoczi     if (new_num_blocks <= old_num_blocks) {
15525b82b703SStefan Hajnoczi         return;
15535b82b703SStefan Hajnoczi     }
15545b82b703SStefan Hajnoczi 
15555b82b703SStefan Hajnoczi     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
15565b82b703SStefan Hajnoczi         DirtyMemoryBlocks *old_blocks;
15575b82b703SStefan Hajnoczi         DirtyMemoryBlocks *new_blocks;
15585b82b703SStefan Hajnoczi         int j;
15595b82b703SStefan Hajnoczi 
15605b82b703SStefan Hajnoczi         old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
15615b82b703SStefan Hajnoczi         new_blocks = g_malloc(sizeof(*new_blocks) +
15625b82b703SStefan Hajnoczi                               sizeof(new_blocks->blocks[0]) * new_num_blocks);
15635b82b703SStefan Hajnoczi 
15645b82b703SStefan Hajnoczi         if (old_num_blocks) {
15655b82b703SStefan Hajnoczi             memcpy(new_blocks->blocks, old_blocks->blocks,
15665b82b703SStefan Hajnoczi                    old_num_blocks * sizeof(old_blocks->blocks[0]));
15675b82b703SStefan Hajnoczi         }
15685b82b703SStefan Hajnoczi 
15695b82b703SStefan Hajnoczi         for (j = old_num_blocks; j < new_num_blocks; j++) {
15705b82b703SStefan Hajnoczi             new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
15715b82b703SStefan Hajnoczi         }
15725b82b703SStefan Hajnoczi 
15735b82b703SStefan Hajnoczi         atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
15745b82b703SStefan Hajnoczi 
15755b82b703SStefan Hajnoczi         if (old_blocks) {
15765b82b703SStefan Hajnoczi             g_free_rcu(old_blocks, rcu);
15775b82b703SStefan Hajnoczi         }
15785b82b703SStefan Hajnoczi     }
15795b82b703SStefan Hajnoczi }
15805b82b703SStefan Hajnoczi 
1581528f46afSFam Zheng static void ram_block_add(RAMBlock *new_block, Error **errp)
1582c5705a77SAvi Kivity {
1583e1c57ab8SPaolo Bonzini     RAMBlock *block;
15840d53d9feSMike Day     RAMBlock *last_block = NULL;
15852152f5caSJuan Quintela     ram_addr_t old_ram_size, new_ram_size;
158637aa7a0eSMarkus Armbruster     Error *err = NULL;
15872152f5caSJuan Quintela 
15882152f5caSJuan Quintela     old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1589c5705a77SAvi Kivity 
1590b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
15919b8424d5SMichael S. Tsirkin     new_block->offset = find_ram_offset(new_block->max_length);
1592e1c57ab8SPaolo Bonzini 
15930628c182SMarkus Armbruster     if (!new_block->host) {
1594e1c57ab8SPaolo Bonzini         if (xen_enabled()) {
15959b8424d5SMichael S. Tsirkin             xen_ram_alloc(new_block->offset, new_block->max_length,
159637aa7a0eSMarkus Armbruster                           new_block->mr, &err);
159737aa7a0eSMarkus Armbruster             if (err) {
159837aa7a0eSMarkus Armbruster                 error_propagate(errp, err);
159937aa7a0eSMarkus Armbruster                 qemu_mutex_unlock_ramlist();
160039c350eeSPaolo Bonzini                 return;
160137aa7a0eSMarkus Armbruster             }
1602e1c57ab8SPaolo Bonzini         } else {
16039b8424d5SMichael S. Tsirkin             new_block->host = phys_mem_alloc(new_block->max_length,
1604a2b257d6SIgor Mammedov                                              &new_block->mr->align);
160539228250SMarkus Armbruster             if (!new_block->host) {
1606ef701d7bSHu Tao                 error_setg_errno(errp, errno,
1607ef701d7bSHu Tao                                  "cannot set up guest memory '%s'",
1608ef701d7bSHu Tao                                  memory_region_name(new_block->mr));
1609ef701d7bSHu Tao                 qemu_mutex_unlock_ramlist();
161039c350eeSPaolo Bonzini                 return;
161139228250SMarkus Armbruster             }
16129b8424d5SMichael S. Tsirkin             memory_try_enable_merging(new_block->host, new_block->max_length);
1613c902760fSMarcelo Tosatti         }
16146977dfe6SYoshiaki Tamura     }
161594a6b54fSpbrook 
1616dd631697SLi Zhijian     new_ram_size = MAX(old_ram_size,
1617dd631697SLi Zhijian               (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1618dd631697SLi Zhijian     if (new_ram_size > old_ram_size) {
1619dd631697SLi Zhijian         migration_bitmap_extend(old_ram_size, new_ram_size);
16205b82b703SStefan Hajnoczi         dirty_memory_extend(old_ram_size, new_ram_size);
1621dd631697SLi Zhijian     }
16220d53d9feSMike Day     /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
16230d53d9feSMike Day      * QLIST (which has an RCU-friendly variant) does not have insertion at
16240d53d9feSMike Day      * tail, so save the last element in last_block.
16250d53d9feSMike Day      */
16260dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
16270d53d9feSMike Day         last_block = block;
16289b8424d5SMichael S. Tsirkin         if (block->max_length < new_block->max_length) {
1629abb26d63SPaolo Bonzini             break;
1630abb26d63SPaolo Bonzini         }
1631abb26d63SPaolo Bonzini     }
1632abb26d63SPaolo Bonzini     if (block) {
16330dc3f44aSMike Day         QLIST_INSERT_BEFORE_RCU(block, new_block, next);
16340d53d9feSMike Day     } else if (last_block) {
16350dc3f44aSMike Day         QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
16360d53d9feSMike Day     } else { /* list is empty */
16370dc3f44aSMike Day         QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1638abb26d63SPaolo Bonzini     }
16390d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
164094a6b54fSpbrook 
16410dc3f44aSMike Day     /* Write list before version */
16420dc3f44aSMike Day     smp_wmb();
1643f798b07fSUmesh Deshpande     ram_list.version++;
1644b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1645f798b07fSUmesh Deshpande 
16469b8424d5SMichael S. Tsirkin     cpu_physical_memory_set_dirty_range(new_block->offset,
164758d2707eSPaolo Bonzini                                         new_block->used_length,
164858d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
164994a6b54fSpbrook 
1650a904c911SPaolo Bonzini     if (new_block->host) {
16519b8424d5SMichael S. Tsirkin         qemu_ram_setup_dump(new_block->host, new_block->max_length);
16529b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
16539b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1654e1c57ab8SPaolo Bonzini         if (kvm_enabled()) {
16559b8424d5SMichael S. Tsirkin             kvm_setup_guest_memory(new_block->host, new_block->max_length);
1656e1c57ab8SPaolo Bonzini         }
1657a904c911SPaolo Bonzini     }
165894a6b54fSpbrook }
1659e9a1ab19Sbellard 
16600b183fc8SPaolo Bonzini #ifdef __linux__
1661528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1662dbcb8981SPaolo Bonzini                                    bool share, const char *mem_path,
16637f56e740SPaolo Bonzini                                    Error **errp)
1664e1c57ab8SPaolo Bonzini {
1665e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1666ef701d7bSHu Tao     Error *local_err = NULL;
1667e1c57ab8SPaolo Bonzini 
1668e1c57ab8SPaolo Bonzini     if (xen_enabled()) {
16697f56e740SPaolo Bonzini         error_setg(errp, "-mem-path not supported with Xen");
1670528f46afSFam Zheng         return NULL;
1671e1c57ab8SPaolo Bonzini     }
1672e1c57ab8SPaolo Bonzini 
1673e1c57ab8SPaolo Bonzini     if (phys_mem_alloc != qemu_anon_ram_alloc) {
1674e1c57ab8SPaolo Bonzini         /*
1675e1c57ab8SPaolo Bonzini          * file_ram_alloc() needs to allocate just like
1676e1c57ab8SPaolo Bonzini          * phys_mem_alloc, but we haven't bothered to provide
1677e1c57ab8SPaolo Bonzini          * a hook there.
1678e1c57ab8SPaolo Bonzini          */
16797f56e740SPaolo Bonzini         error_setg(errp,
16807f56e740SPaolo Bonzini                    "-mem-path not supported with this accelerator");
1681528f46afSFam Zheng         return NULL;
1682e1c57ab8SPaolo Bonzini     }
1683e1c57ab8SPaolo Bonzini 
16844ed023ceSDr. David Alan Gilbert     size = HOST_PAGE_ALIGN(size);
1685e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1686e1c57ab8SPaolo Bonzini     new_block->mr = mr;
16879b8424d5SMichael S. Tsirkin     new_block->used_length = size;
16889b8424d5SMichael S. Tsirkin     new_block->max_length = size;
1689dbcb8981SPaolo Bonzini     new_block->flags = share ? RAM_SHARED : 0;
16907f56e740SPaolo Bonzini     new_block->host = file_ram_alloc(new_block, size,
16917f56e740SPaolo Bonzini                                      mem_path, errp);
16927f56e740SPaolo Bonzini     if (!new_block->host) {
16937f56e740SPaolo Bonzini         g_free(new_block);
1694528f46afSFam Zheng         return NULL;
16957f56e740SPaolo Bonzini     }
16967f56e740SPaolo Bonzini 
1697528f46afSFam Zheng     ram_block_add(new_block, &local_err);
1698ef701d7bSHu Tao     if (local_err) {
1699ef701d7bSHu Tao         g_free(new_block);
1700ef701d7bSHu Tao         error_propagate(errp, local_err);
1701528f46afSFam Zheng         return NULL;
1702ef701d7bSHu Tao     }
1703528f46afSFam Zheng     return new_block;
1704e1c57ab8SPaolo Bonzini }
17050b183fc8SPaolo Bonzini #endif
1706e1c57ab8SPaolo Bonzini 
170762be4e3aSMichael S. Tsirkin static
1708528f46afSFam Zheng RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
170962be4e3aSMichael S. Tsirkin                                   void (*resized)(const char*,
171062be4e3aSMichael S. Tsirkin                                                   uint64_t length,
171162be4e3aSMichael S. Tsirkin                                                   void *host),
171262be4e3aSMichael S. Tsirkin                                   void *host, bool resizeable,
1713ef701d7bSHu Tao                                   MemoryRegion *mr, Error **errp)
1714e1c57ab8SPaolo Bonzini {
1715e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1716ef701d7bSHu Tao     Error *local_err = NULL;
1717e1c57ab8SPaolo Bonzini 
17184ed023ceSDr. David Alan Gilbert     size = HOST_PAGE_ALIGN(size);
17194ed023ceSDr. David Alan Gilbert     max_size = HOST_PAGE_ALIGN(max_size);
1720e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1721e1c57ab8SPaolo Bonzini     new_block->mr = mr;
172262be4e3aSMichael S. Tsirkin     new_block->resized = resized;
17239b8424d5SMichael S. Tsirkin     new_block->used_length = size;
17249b8424d5SMichael S. Tsirkin     new_block->max_length = max_size;
172562be4e3aSMichael S. Tsirkin     assert(max_size >= size);
1726e1c57ab8SPaolo Bonzini     new_block->fd = -1;
1727e1c57ab8SPaolo Bonzini     new_block->host = host;
1728e1c57ab8SPaolo Bonzini     if (host) {
17297bd4f430SPaolo Bonzini         new_block->flags |= RAM_PREALLOC;
1730e1c57ab8SPaolo Bonzini     }
173162be4e3aSMichael S. Tsirkin     if (resizeable) {
173262be4e3aSMichael S. Tsirkin         new_block->flags |= RAM_RESIZEABLE;
173362be4e3aSMichael S. Tsirkin     }
1734528f46afSFam Zheng     ram_block_add(new_block, &local_err);
1735ef701d7bSHu Tao     if (local_err) {
1736ef701d7bSHu Tao         g_free(new_block);
1737ef701d7bSHu Tao         error_propagate(errp, local_err);
1738528f46afSFam Zheng         return NULL;
1739ef701d7bSHu Tao     }
1740528f46afSFam Zheng     return new_block;
1741e1c57ab8SPaolo Bonzini }
1742e1c57ab8SPaolo Bonzini 
1743528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
174462be4e3aSMichael S. Tsirkin                                    MemoryRegion *mr, Error **errp)
174562be4e3aSMichael S. Tsirkin {
174662be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
174762be4e3aSMichael S. Tsirkin }
174862be4e3aSMichael S. Tsirkin 
1749528f46afSFam Zheng RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
17506977dfe6SYoshiaki Tamura {
175162be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
175262be4e3aSMichael S. Tsirkin }
175362be4e3aSMichael S. Tsirkin 
1754528f46afSFam Zheng RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
175562be4e3aSMichael S. Tsirkin                                      void (*resized)(const char*,
175662be4e3aSMichael S. Tsirkin                                                      uint64_t length,
175762be4e3aSMichael S. Tsirkin                                                      void *host),
175862be4e3aSMichael S. Tsirkin                                      MemoryRegion *mr, Error **errp)
175962be4e3aSMichael S. Tsirkin {
176062be4e3aSMichael S. Tsirkin     return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
17616977dfe6SYoshiaki Tamura }
17626977dfe6SYoshiaki Tamura 
176343771539SPaolo Bonzini static void reclaim_ramblock(RAMBlock *block)
1764e9a1ab19Sbellard {
17657bd4f430SPaolo Bonzini     if (block->flags & RAM_PREALLOC) {
1766cd19cfa2SHuang Ying         ;
1767dfeaf2abSMarkus Armbruster     } else if (xen_enabled()) {
1768dfeaf2abSMarkus Armbruster         xen_invalidate_map_cache_entry(block->host);
1769089f3f76SStefan Weil #ifndef _WIN32
17703435f395SMarkus Armbruster     } else if (block->fd >= 0) {
1771794e8f30SMichael S. Tsirkin         qemu_ram_munmap(block->host, block->max_length);
177204b16653SAlex Williamson         close(block->fd);
1773089f3f76SStefan Weil #endif
177404b16653SAlex Williamson     } else {
17759b8424d5SMichael S. Tsirkin         qemu_anon_ram_free(block->host, block->max_length);
177604b16653SAlex Williamson     }
17777267c094SAnthony Liguori     g_free(block);
177843771539SPaolo Bonzini }
177943771539SPaolo Bonzini 
1780f1060c55SFam Zheng void qemu_ram_free(RAMBlock *block)
178143771539SPaolo Bonzini {
178285bc2a15SMarc-André Lureau     if (!block) {
178385bc2a15SMarc-André Lureau         return;
178485bc2a15SMarc-André Lureau     }
178585bc2a15SMarc-André Lureau 
178643771539SPaolo Bonzini     qemu_mutex_lock_ramlist();
17870dc3f44aSMike Day     QLIST_REMOVE_RCU(block, next);
178843771539SPaolo Bonzini     ram_list.mru_block = NULL;
17890dc3f44aSMike Day     /* Write list before version */
17900dc3f44aSMike Day     smp_wmb();
179143771539SPaolo Bonzini     ram_list.version++;
179243771539SPaolo Bonzini     call_rcu(block, reclaim_ramblock, rcu);
1793b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1794e9a1ab19Sbellard }
1795e9a1ab19Sbellard 
1796cd19cfa2SHuang Ying #ifndef _WIN32
1797cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1798cd19cfa2SHuang Ying {
1799cd19cfa2SHuang Ying     RAMBlock *block;
1800cd19cfa2SHuang Ying     ram_addr_t offset;
1801cd19cfa2SHuang Ying     int flags;
1802cd19cfa2SHuang Ying     void *area, *vaddr;
1803cd19cfa2SHuang Ying 
18040dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1805cd19cfa2SHuang Ying         offset = addr - block->offset;
18069b8424d5SMichael S. Tsirkin         if (offset < block->max_length) {
18071240be24SMichael S. Tsirkin             vaddr = ramblock_ptr(block, offset);
18087bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1809cd19cfa2SHuang Ying                 ;
1810dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1811dfeaf2abSMarkus Armbruster                 abort();
1812cd19cfa2SHuang Ying             } else {
1813cd19cfa2SHuang Ying                 flags = MAP_FIXED;
18143435f395SMarkus Armbruster                 if (block->fd >= 0) {
1815dbcb8981SPaolo Bonzini                     flags |= (block->flags & RAM_SHARED ?
1816dbcb8981SPaolo Bonzini                               MAP_SHARED : MAP_PRIVATE);
1817cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1818cd19cfa2SHuang Ying                                 flags, block->fd, offset);
1819cd19cfa2SHuang Ying                 } else {
18202eb9fbaaSMarkus Armbruster                     /*
18212eb9fbaaSMarkus Armbruster                      * Remap needs to match alloc.  Accelerators that
18222eb9fbaaSMarkus Armbruster                      * set phys_mem_alloc never remap.  If they did,
18232eb9fbaaSMarkus Armbruster                      * we'd need a remap hook here.
18242eb9fbaaSMarkus Armbruster                      */
18252eb9fbaaSMarkus Armbruster                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
18262eb9fbaaSMarkus Armbruster 
1827cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1828cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1829cd19cfa2SHuang Ying                                 flags, -1, 0);
1830cd19cfa2SHuang Ying                 }
1831cd19cfa2SHuang Ying                 if (area != vaddr) {
1832f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1833f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1834cd19cfa2SHuang Ying                             length, addr);
1835cd19cfa2SHuang Ying                     exit(1);
1836cd19cfa2SHuang Ying                 }
18378490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1838ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1839cd19cfa2SHuang Ying             }
1840cd19cfa2SHuang Ying         }
1841cd19cfa2SHuang Ying     }
1842cd19cfa2SHuang Ying }
1843cd19cfa2SHuang Ying #endif /* !_WIN32 */
1844cd19cfa2SHuang Ying 
18451b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.
1846ae3a7047SMike Day  * This should not be used for general purpose DMA.  Use address_space_map
1847ae3a7047SMike Day  * or address_space_rw instead. For local memory (e.g. video ram) that the
1848ae3a7047SMike Day  * device owns, use memory_region_get_ram_ptr.
18490dc3f44aSMike Day  *
185049b24afcSPaolo Bonzini  * Called within RCU critical section.
18511b5ec234SPaolo Bonzini  */
18520878d0e1SPaolo Bonzini void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
18531b5ec234SPaolo Bonzini {
18543655cb9cSGonglei     RAMBlock *block = ram_block;
18553655cb9cSGonglei 
18563655cb9cSGonglei     if (block == NULL) {
18573655cb9cSGonglei         block = qemu_get_ram_block(addr);
18580878d0e1SPaolo Bonzini         addr -= block->offset;
18593655cb9cSGonglei     }
1860ae3a7047SMike Day 
1861ae3a7047SMike Day     if (xen_enabled() && block->host == NULL) {
1862432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1863432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1864712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1865432d268cSJun Nakajima          */
1866432d268cSJun Nakajima         if (block->offset == 0) {
186749b24afcSPaolo Bonzini             return xen_map_cache(addr, 0, 0);
1868432d268cSJun Nakajima         }
1869ae3a7047SMike Day 
1870ae3a7047SMike Day         block->host = xen_map_cache(block->offset, block->max_length, 1);
1871432d268cSJun Nakajima     }
18720878d0e1SPaolo Bonzini     return ramblock_ptr(block, addr);
187394a6b54fSpbrook }
1874f471a17eSAlex Williamson 
18750878d0e1SPaolo Bonzini /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1876ae3a7047SMike Day  * but takes a size argument.
18770dc3f44aSMike Day  *
1878e81bcda5SPaolo Bonzini  * Called within RCU critical section.
1879ae3a7047SMike Day  */
18803655cb9cSGonglei static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
18813655cb9cSGonglei                                  hwaddr *size)
188238bee5dcSStefano Stabellini {
18833655cb9cSGonglei     RAMBlock *block = ram_block;
18848ab934f9SStefano Stabellini     if (*size == 0) {
18858ab934f9SStefano Stabellini         return NULL;
18868ab934f9SStefano Stabellini     }
1887e81bcda5SPaolo Bonzini 
18883655cb9cSGonglei     if (block == NULL) {
1889e81bcda5SPaolo Bonzini         block = qemu_get_ram_block(addr);
18900878d0e1SPaolo Bonzini         addr -= block->offset;
18913655cb9cSGonglei     }
18920878d0e1SPaolo Bonzini     *size = MIN(*size, block->max_length - addr);
1893e81bcda5SPaolo Bonzini 
1894e81bcda5SPaolo Bonzini     if (xen_enabled() && block->host == NULL) {
1895e81bcda5SPaolo Bonzini         /* We need to check if the requested address is in the RAM
1896e81bcda5SPaolo Bonzini          * because we don't want to map the entire memory in QEMU.
1897e81bcda5SPaolo Bonzini          * In that case just map the requested area.
1898e81bcda5SPaolo Bonzini          */
1899e81bcda5SPaolo Bonzini         if (block->offset == 0) {
1900e41d7c69SJan Kiszka             return xen_map_cache(addr, *size, 1);
190138bee5dcSStefano Stabellini         }
190238bee5dcSStefano Stabellini 
1903e81bcda5SPaolo Bonzini         block->host = xen_map_cache(block->offset, block->max_length, 1);
190438bee5dcSStefano Stabellini     }
1905e81bcda5SPaolo Bonzini 
19060878d0e1SPaolo Bonzini     return ramblock_ptr(block, addr);
190738bee5dcSStefano Stabellini }
190838bee5dcSStefano Stabellini 
1909422148d3SDr. David Alan Gilbert /*
1910422148d3SDr. David Alan Gilbert  * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1911422148d3SDr. David Alan Gilbert  * in that RAMBlock.
1912422148d3SDr. David Alan Gilbert  *
1913422148d3SDr. David Alan Gilbert  * ptr: Host pointer to look up
1914422148d3SDr. David Alan Gilbert  * round_offset: If true round the result offset down to a page boundary
1915422148d3SDr. David Alan Gilbert  * *ram_addr: set to result ram_addr
1916422148d3SDr. David Alan Gilbert  * *offset: set to result offset within the RAMBlock
1917422148d3SDr. David Alan Gilbert  *
1918422148d3SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
1919ae3a7047SMike Day  *
1920ae3a7047SMike Day  * By the time this function returns, the returned pointer is not protected
1921ae3a7047SMike Day  * by RCU anymore.  If the caller is not within an RCU critical section and
1922ae3a7047SMike Day  * does not hold the iothread lock, it must have other means of protecting the
1923ae3a7047SMike Day  * pointer, such as a reference to the region that includes the incoming
1924ae3a7047SMike Day  * ram_addr_t.
1925ae3a7047SMike Day  */
1926422148d3SDr. David Alan Gilbert RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1927422148d3SDr. David Alan Gilbert                                    ram_addr_t *offset)
19285579c7f3Spbrook {
192994a6b54fSpbrook     RAMBlock *block;
193094a6b54fSpbrook     uint8_t *host = ptr;
193194a6b54fSpbrook 
1932868bb33fSJan Kiszka     if (xen_enabled()) {
1933f615f396SPaolo Bonzini         ram_addr_t ram_addr;
19340dc3f44aSMike Day         rcu_read_lock();
1935f615f396SPaolo Bonzini         ram_addr = xen_ram_addr_from_mapcache(ptr);
1936f615f396SPaolo Bonzini         block = qemu_get_ram_block(ram_addr);
1937422148d3SDr. David Alan Gilbert         if (block) {
1938422148d3SDr. David Alan Gilbert             *offset = (host - block->host);
1939422148d3SDr. David Alan Gilbert         }
19400dc3f44aSMike Day         rcu_read_unlock();
1941422148d3SDr. David Alan Gilbert         return block;
1942712c2b41SStefano Stabellini     }
1943712c2b41SStefano Stabellini 
19440dc3f44aSMike Day     rcu_read_lock();
19450dc3f44aSMike Day     block = atomic_rcu_read(&ram_list.mru_block);
19469b8424d5SMichael S. Tsirkin     if (block && block->host && host - block->host < block->max_length) {
194723887b79SPaolo Bonzini         goto found;
194823887b79SPaolo Bonzini     }
194923887b79SPaolo Bonzini 
19500dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1951432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1952432d268cSJun Nakajima         if (block->host == NULL) {
1953432d268cSJun Nakajima             continue;
1954432d268cSJun Nakajima         }
19559b8424d5SMichael S. Tsirkin         if (host - block->host < block->max_length) {
195623887b79SPaolo Bonzini             goto found;
195794a6b54fSpbrook         }
1958f471a17eSAlex Williamson     }
1959432d268cSJun Nakajima 
19600dc3f44aSMike Day     rcu_read_unlock();
19611b5ec234SPaolo Bonzini     return NULL;
196223887b79SPaolo Bonzini 
196323887b79SPaolo Bonzini found:
1964422148d3SDr. David Alan Gilbert     *offset = (host - block->host);
1965422148d3SDr. David Alan Gilbert     if (round_offset) {
1966422148d3SDr. David Alan Gilbert         *offset &= TARGET_PAGE_MASK;
1967422148d3SDr. David Alan Gilbert     }
19680dc3f44aSMike Day     rcu_read_unlock();
1969422148d3SDr. David Alan Gilbert     return block;
1970422148d3SDr. David Alan Gilbert }
1971422148d3SDr. David Alan Gilbert 
1972e3dd7493SDr. David Alan Gilbert /*
1973e3dd7493SDr. David Alan Gilbert  * Finds the named RAMBlock
1974e3dd7493SDr. David Alan Gilbert  *
1975e3dd7493SDr. David Alan Gilbert  * name: The name of RAMBlock to find
1976e3dd7493SDr. David Alan Gilbert  *
1977e3dd7493SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
1978e3dd7493SDr. David Alan Gilbert  */
1979e3dd7493SDr. David Alan Gilbert RAMBlock *qemu_ram_block_by_name(const char *name)
1980e3dd7493SDr. David Alan Gilbert {
1981e3dd7493SDr. David Alan Gilbert     RAMBlock *block;
1982e3dd7493SDr. David Alan Gilbert 
1983e3dd7493SDr. David Alan Gilbert     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1984e3dd7493SDr. David Alan Gilbert         if (!strcmp(name, block->idstr)) {
1985e3dd7493SDr. David Alan Gilbert             return block;
1986e3dd7493SDr. David Alan Gilbert         }
1987e3dd7493SDr. David Alan Gilbert     }
1988e3dd7493SDr. David Alan Gilbert 
1989e3dd7493SDr. David Alan Gilbert     return NULL;
1990e3dd7493SDr. David Alan Gilbert }
1991e3dd7493SDr. David Alan Gilbert 
1992422148d3SDr. David Alan Gilbert /* Some of the softmmu routines need to translate from a host pointer
1993422148d3SDr. David Alan Gilbert    (typically a TLB entry) back to a ram offset.  */
199407bdaa41SPaolo Bonzini ram_addr_t qemu_ram_addr_from_host(void *ptr)
1995422148d3SDr. David Alan Gilbert {
1996422148d3SDr. David Alan Gilbert     RAMBlock *block;
1997f615f396SPaolo Bonzini     ram_addr_t offset;
1998422148d3SDr. David Alan Gilbert 
1999f615f396SPaolo Bonzini     block = qemu_ram_block_from_host(ptr, false, &offset);
2000422148d3SDr. David Alan Gilbert     if (!block) {
200107bdaa41SPaolo Bonzini         return RAM_ADDR_INVALID;
2002422148d3SDr. David Alan Gilbert     }
2003422148d3SDr. David Alan Gilbert 
200407bdaa41SPaolo Bonzini     return block->offset + offset;
2005e890261fSMarcelo Tosatti }
2006f471a17eSAlex Williamson 
200749b24afcSPaolo Bonzini /* Called within RCU critical section.  */
2008a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
20090e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
20101ccde1cbSbellard {
201152159192SJuan Quintela     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
20120e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
20133a7d929eSbellard     }
20140e0df1e2SAvi Kivity     switch (size) {
20150e0df1e2SAvi Kivity     case 1:
20160878d0e1SPaolo Bonzini         stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
20170e0df1e2SAvi Kivity         break;
20180e0df1e2SAvi Kivity     case 2:
20190878d0e1SPaolo Bonzini         stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
20200e0df1e2SAvi Kivity         break;
20210e0df1e2SAvi Kivity     case 4:
20220878d0e1SPaolo Bonzini         stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
20230e0df1e2SAvi Kivity         break;
20240e0df1e2SAvi Kivity     default:
20250e0df1e2SAvi Kivity         abort();
20260e0df1e2SAvi Kivity     }
202758d2707eSPaolo Bonzini     /* Set both VGA and migration bits for simplicity and to remove
202858d2707eSPaolo Bonzini      * the notdirty callback faster.
202958d2707eSPaolo Bonzini      */
203058d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(ram_addr, size,
203158d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_NOCODE);
2032f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2033f23db169Sbellard        flushed */
2034a2cd8c85SJuan Quintela     if (!cpu_physical_memory_is_clean(ram_addr)) {
2035bcae01e4SPeter Crosthwaite         tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
20364917cf44SAndreas Färber     }
20371ccde1cbSbellard }
20381ccde1cbSbellard 
2039b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2040b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
2041b018ddf6SPaolo Bonzini {
2042b018ddf6SPaolo Bonzini     return is_write;
2043b018ddf6SPaolo Bonzini }
2044b018ddf6SPaolo Bonzini 
20450e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
20460e0df1e2SAvi Kivity     .write = notdirty_mem_write,
2047b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
20480e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
20491ccde1cbSbellard };
20501ccde1cbSbellard 
20510f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
205266b9b43cSPeter Maydell static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
20530f459d16Spbrook {
205493afeadeSAndreas Färber     CPUState *cpu = current_cpu;
2055568496c0SSergey Fedorov     CPUClass *cc = CPU_GET_CLASS(cpu);
205693afeadeSAndreas Färber     CPUArchState *env = cpu->env_ptr;
205706d55cc1Saliguori     target_ulong pc, cs_base;
20580f459d16Spbrook     target_ulong vaddr;
2059a1d1bb31Saliguori     CPUWatchpoint *wp;
206089fee74aSEmilio G. Cota     uint32_t cpu_flags;
20610f459d16Spbrook 
2062ff4700b0SAndreas Färber     if (cpu->watchpoint_hit) {
206306d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
206406d55cc1Saliguori          * the debug interrupt so that is will trigger after the
206506d55cc1Saliguori          * current instruction. */
206693afeadeSAndreas Färber         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
206706d55cc1Saliguori         return;
206806d55cc1Saliguori     }
206993afeadeSAndreas Färber     vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2070ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
207105068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, len)
207205068c0dSPeter Maydell             && (wp->flags & flags)) {
207308225676SPeter Maydell             if (flags == BP_MEM_READ) {
207408225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_READ;
207508225676SPeter Maydell             } else {
207608225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
207708225676SPeter Maydell             }
207808225676SPeter Maydell             wp->hitaddr = vaddr;
207966b9b43cSPeter Maydell             wp->hitattrs = attrs;
2080ff4700b0SAndreas Färber             if (!cpu->watchpoint_hit) {
2081568496c0SSergey Fedorov                 if (wp->flags & BP_CPU &&
2082568496c0SSergey Fedorov                     !cc->debug_check_watchpoint(cpu, wp)) {
2083568496c0SSergey Fedorov                     wp->flags &= ~BP_WATCHPOINT_HIT;
2084568496c0SSergey Fedorov                     continue;
2085568496c0SSergey Fedorov                 }
2086ff4700b0SAndreas Färber                 cpu->watchpoint_hit = wp;
2087239c51a5SAndreas Färber                 tb_check_watchpoint(cpu);
208806d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
208927103424SAndreas Färber                     cpu->exception_index = EXCP_DEBUG;
20905638d180SAndreas Färber                     cpu_loop_exit(cpu);
209106d55cc1Saliguori                 } else {
209206d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2093648f034cSAndreas Färber                     tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
20946886b980SPeter Maydell                     cpu_loop_exit_noexc(cpu);
20950f459d16Spbrook                 }
2096488d6577SMax Filippov             }
20976e140f28Saliguori         } else {
20986e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
20996e140f28Saliguori         }
21000f459d16Spbrook     }
21010f459d16Spbrook }
21020f459d16Spbrook 
21036658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
21046658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
21056658ffb8Spbrook    phys routines.  */
210666b9b43cSPeter Maydell static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
210766b9b43cSPeter Maydell                                   unsigned size, MemTxAttrs attrs)
21086658ffb8Spbrook {
210966b9b43cSPeter Maydell     MemTxResult res;
211066b9b43cSPeter Maydell     uint64_t data;
211179ed0416SPeter Maydell     int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
211279ed0416SPeter Maydell     AddressSpace *as = current_cpu->cpu_ases[asidx].as;
21136658ffb8Spbrook 
211466b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
21151ec9b909SAvi Kivity     switch (size) {
211667364150SMax Filippov     case 1:
211779ed0416SPeter Maydell         data = address_space_ldub(as, addr, attrs, &res);
211867364150SMax Filippov         break;
211967364150SMax Filippov     case 2:
212079ed0416SPeter Maydell         data = address_space_lduw(as, addr, attrs, &res);
212167364150SMax Filippov         break;
212267364150SMax Filippov     case 4:
212379ed0416SPeter Maydell         data = address_space_ldl(as, addr, attrs, &res);
212467364150SMax Filippov         break;
21251ec9b909SAvi Kivity     default: abort();
21261ec9b909SAvi Kivity     }
212766b9b43cSPeter Maydell     *pdata = data;
212866b9b43cSPeter Maydell     return res;
212966b9b43cSPeter Maydell }
213066b9b43cSPeter Maydell 
213166b9b43cSPeter Maydell static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
213266b9b43cSPeter Maydell                                    uint64_t val, unsigned size,
213366b9b43cSPeter Maydell                                    MemTxAttrs attrs)
213466b9b43cSPeter Maydell {
213566b9b43cSPeter Maydell     MemTxResult res;
213679ed0416SPeter Maydell     int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
213779ed0416SPeter Maydell     AddressSpace *as = current_cpu->cpu_ases[asidx].as;
213866b9b43cSPeter Maydell 
213966b9b43cSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
214066b9b43cSPeter Maydell     switch (size) {
214166b9b43cSPeter Maydell     case 1:
214279ed0416SPeter Maydell         address_space_stb(as, addr, val, attrs, &res);
214366b9b43cSPeter Maydell         break;
214466b9b43cSPeter Maydell     case 2:
214579ed0416SPeter Maydell         address_space_stw(as, addr, val, attrs, &res);
214666b9b43cSPeter Maydell         break;
214766b9b43cSPeter Maydell     case 4:
214879ed0416SPeter Maydell         address_space_stl(as, addr, val, attrs, &res);
214966b9b43cSPeter Maydell         break;
215066b9b43cSPeter Maydell     default: abort();
215166b9b43cSPeter Maydell     }
215266b9b43cSPeter Maydell     return res;
21536658ffb8Spbrook }
21546658ffb8Spbrook 
21551ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
215666b9b43cSPeter Maydell     .read_with_attrs = watch_mem_read,
215766b9b43cSPeter Maydell     .write_with_attrs = watch_mem_write,
21581ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
21596658ffb8Spbrook };
21606658ffb8Spbrook 
2161f25a49e0SPeter Maydell static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2162f25a49e0SPeter Maydell                                 unsigned len, MemTxAttrs attrs)
2163db7b5426Sblueswir1 {
2164acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2165ff6cff75SPaolo Bonzini     uint8_t buf[8];
21665c9eb028SPeter Maydell     MemTxResult res;
2167791af8c8SPaolo Bonzini 
2168db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2169016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2170acc9d80bSJan Kiszka            subpage, len, addr);
2171db7b5426Sblueswir1 #endif
21725c9eb028SPeter Maydell     res = address_space_read(subpage->as, addr + subpage->base,
21735c9eb028SPeter Maydell                              attrs, buf, len);
21745c9eb028SPeter Maydell     if (res) {
21755c9eb028SPeter Maydell         return res;
2176f25a49e0SPeter Maydell     }
2177acc9d80bSJan Kiszka     switch (len) {
2178acc9d80bSJan Kiszka     case 1:
2179f25a49e0SPeter Maydell         *data = ldub_p(buf);
2180f25a49e0SPeter Maydell         return MEMTX_OK;
2181acc9d80bSJan Kiszka     case 2:
2182f25a49e0SPeter Maydell         *data = lduw_p(buf);
2183f25a49e0SPeter Maydell         return MEMTX_OK;
2184acc9d80bSJan Kiszka     case 4:
2185f25a49e0SPeter Maydell         *data = ldl_p(buf);
2186f25a49e0SPeter Maydell         return MEMTX_OK;
2187ff6cff75SPaolo Bonzini     case 8:
2188f25a49e0SPeter Maydell         *data = ldq_p(buf);
2189f25a49e0SPeter Maydell         return MEMTX_OK;
2190acc9d80bSJan Kiszka     default:
2191acc9d80bSJan Kiszka         abort();
2192acc9d80bSJan Kiszka     }
2193db7b5426Sblueswir1 }
2194db7b5426Sblueswir1 
2195f25a49e0SPeter Maydell static MemTxResult subpage_write(void *opaque, hwaddr addr,
2196f25a49e0SPeter Maydell                                  uint64_t value, unsigned len, MemTxAttrs attrs)
2197db7b5426Sblueswir1 {
2198acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2199ff6cff75SPaolo Bonzini     uint8_t buf[8];
2200acc9d80bSJan Kiszka 
2201db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2202016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2203acc9d80bSJan Kiszka            " value %"PRIx64"\n",
2204acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
2205db7b5426Sblueswir1 #endif
2206acc9d80bSJan Kiszka     switch (len) {
2207acc9d80bSJan Kiszka     case 1:
2208acc9d80bSJan Kiszka         stb_p(buf, value);
2209acc9d80bSJan Kiszka         break;
2210acc9d80bSJan Kiszka     case 2:
2211acc9d80bSJan Kiszka         stw_p(buf, value);
2212acc9d80bSJan Kiszka         break;
2213acc9d80bSJan Kiszka     case 4:
2214acc9d80bSJan Kiszka         stl_p(buf, value);
2215acc9d80bSJan Kiszka         break;
2216ff6cff75SPaolo Bonzini     case 8:
2217ff6cff75SPaolo Bonzini         stq_p(buf, value);
2218ff6cff75SPaolo Bonzini         break;
2219acc9d80bSJan Kiszka     default:
2220acc9d80bSJan Kiszka         abort();
2221acc9d80bSJan Kiszka     }
22225c9eb028SPeter Maydell     return address_space_write(subpage->as, addr + subpage->base,
22235c9eb028SPeter Maydell                                attrs, buf, len);
2224db7b5426Sblueswir1 }
2225db7b5426Sblueswir1 
2226c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
2227016e9d62SAmos Kong                             unsigned len, bool is_write)
2228c353e4ccSPaolo Bonzini {
2229acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2230c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
2231016e9d62SAmos Kong     printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2232acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
2233c353e4ccSPaolo Bonzini #endif
2234c353e4ccSPaolo Bonzini 
2235acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
2236016e9d62SAmos Kong                                       len, is_write);
2237c353e4ccSPaolo Bonzini }
2238c353e4ccSPaolo Bonzini 
223970c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
2240f25a49e0SPeter Maydell     .read_with_attrs = subpage_read,
2241f25a49e0SPeter Maydell     .write_with_attrs = subpage_write,
2242ff6cff75SPaolo Bonzini     .impl.min_access_size = 1,
2243ff6cff75SPaolo Bonzini     .impl.max_access_size = 8,
2244ff6cff75SPaolo Bonzini     .valid.min_access_size = 1,
2245ff6cff75SPaolo Bonzini     .valid.max_access_size = 8,
2246c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
224770c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
2248db7b5426Sblueswir1 };
2249db7b5426Sblueswir1 
2250c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
22515312bd8bSAvi Kivity                              uint16_t section)
2252db7b5426Sblueswir1 {
2253db7b5426Sblueswir1     int idx, eidx;
2254db7b5426Sblueswir1 
2255db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2256db7b5426Sblueswir1         return -1;
2257db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2258db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2259db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2260016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2261016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
2262db7b5426Sblueswir1 #endif
2263db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
22645312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
2265db7b5426Sblueswir1     }
2266db7b5426Sblueswir1 
2267db7b5426Sblueswir1     return 0;
2268db7b5426Sblueswir1 }
2269db7b5426Sblueswir1 
2270acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2271db7b5426Sblueswir1 {
2272c227f099SAnthony Liguori     subpage_t *mmio;
2273db7b5426Sblueswir1 
22747267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
22751eec614bSaliguori 
2276acc9d80bSJan Kiszka     mmio->as = as;
2277db7b5426Sblueswir1     mmio->base = base;
22782c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2279b4fefef9SPeter Crosthwaite                           NULL, TARGET_PAGE_SIZE);
2280b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
2281db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2282016e9d62SAmos Kong     printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2283016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
2284db7b5426Sblueswir1 #endif
2285b41aac4fSLiu Ping Fan     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2286db7b5426Sblueswir1 
2287db7b5426Sblueswir1     return mmio;
2288db7b5426Sblueswir1 }
2289db7b5426Sblueswir1 
2290a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2291a656e22fSPeter Crosthwaite                               MemoryRegion *mr)
22925312bd8bSAvi Kivity {
2293a656e22fSPeter Crosthwaite     assert(as);
22945312bd8bSAvi Kivity     MemoryRegionSection section = {
2295a656e22fSPeter Crosthwaite         .address_space = as,
22965312bd8bSAvi Kivity         .mr = mr,
22975312bd8bSAvi Kivity         .offset_within_address_space = 0,
22985312bd8bSAvi Kivity         .offset_within_region = 0,
2299052e87b0SPaolo Bonzini         .size = int128_2_64(),
23005312bd8bSAvi Kivity     };
23015312bd8bSAvi Kivity 
230253cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
23035312bd8bSAvi Kivity }
23045312bd8bSAvi Kivity 
2305a54c87b6SPeter Maydell MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2306aa102231SAvi Kivity {
2307a54c87b6SPeter Maydell     int asidx = cpu_asidx_from_attrs(cpu, attrs);
2308a54c87b6SPeter Maydell     CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
230932857f4dSPeter Maydell     AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
231079e2b9aeSPaolo Bonzini     MemoryRegionSection *sections = d->map.sections;
23119d82b5a7SPaolo Bonzini 
23129d82b5a7SPaolo Bonzini     return sections[index & ~TARGET_PAGE_MASK].mr;
2313aa102231SAvi Kivity }
2314aa102231SAvi Kivity 
2315e9179ce1SAvi Kivity static void io_mem_init(void)
2316e9179ce1SAvi Kivity {
23171f6245e5SPaolo Bonzini     memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
23182c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
23191f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
23202c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
23211f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
23222c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
23231f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
2324e9179ce1SAvi Kivity }
2325e9179ce1SAvi Kivity 
2326ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
2327ac1970fbSAvi Kivity {
232889ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
232953cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
233053cb28cbSMarcel Apfelbaum     uint16_t n;
233153cb28cbSMarcel Apfelbaum 
2332a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_unassigned);
233353cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
2334a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_notdirty);
233553cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_NOTDIRTY);
2336a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_rom);
233753cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_ROM);
2338a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_watch);
233953cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_WATCH);
234000752703SPaolo Bonzini 
23419736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
234200752703SPaolo Bonzini     d->as = as;
234300752703SPaolo Bonzini     as->next_dispatch = d;
234400752703SPaolo Bonzini }
234500752703SPaolo Bonzini 
234679e2b9aeSPaolo Bonzini static void address_space_dispatch_free(AddressSpaceDispatch *d)
234779e2b9aeSPaolo Bonzini {
234879e2b9aeSPaolo Bonzini     phys_sections_free(&d->map);
234979e2b9aeSPaolo Bonzini     g_free(d);
235079e2b9aeSPaolo Bonzini }
235179e2b9aeSPaolo Bonzini 
235200752703SPaolo Bonzini static void mem_commit(MemoryListener *listener)
235300752703SPaolo Bonzini {
235400752703SPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
23550475d94fSPaolo Bonzini     AddressSpaceDispatch *cur = as->dispatch;
23560475d94fSPaolo Bonzini     AddressSpaceDispatch *next = as->next_dispatch;
2357ac1970fbSAvi Kivity 
235853cb28cbSMarcel Apfelbaum     phys_page_compact_all(next, next->map.nodes_nb);
2359b35ba30fSMichael S. Tsirkin 
236079e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, next);
236153cb28cbSMarcel Apfelbaum     if (cur) {
236279e2b9aeSPaolo Bonzini         call_rcu(cur, address_space_dispatch_free, rcu);
2363ac1970fbSAvi Kivity     }
23649affd6fcSPaolo Bonzini }
23659affd6fcSPaolo Bonzini 
23661d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
236750c1e149SAvi Kivity {
236832857f4dSPeter Maydell     CPUAddressSpace *cpuas;
236932857f4dSPeter Maydell     AddressSpaceDispatch *d;
2370117712c3SAvi Kivity 
2371117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
2372117712c3SAvi Kivity        reset the modified entries */
237332857f4dSPeter Maydell     cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
237432857f4dSPeter Maydell     cpu_reloading_memory_map();
237532857f4dSPeter Maydell     /* The CPU and TLB are protected by the iothread lock.
237632857f4dSPeter Maydell      * We reload the dispatch pointer now because cpu_reloading_memory_map()
237732857f4dSPeter Maydell      * may have split the RCU critical section.
237832857f4dSPeter Maydell      */
237932857f4dSPeter Maydell     d = atomic_rcu_read(&cpuas->as->dispatch);
238032857f4dSPeter Maydell     cpuas->memory_dispatch = d;
238132857f4dSPeter Maydell     tlb_flush(cpuas->cpu, 1);
238250c1e149SAvi Kivity }
238350c1e149SAvi Kivity 
2384ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
2385ac1970fbSAvi Kivity {
238600752703SPaolo Bonzini     as->dispatch = NULL;
238789ae337aSPaolo Bonzini     as->dispatch_listener = (MemoryListener) {
2388ac1970fbSAvi Kivity         .begin = mem_begin,
238900752703SPaolo Bonzini         .commit = mem_commit,
2390ac1970fbSAvi Kivity         .region_add = mem_add,
2391ac1970fbSAvi Kivity         .region_nop = mem_add,
2392ac1970fbSAvi Kivity         .priority = 0,
2393ac1970fbSAvi Kivity     };
239489ae337aSPaolo Bonzini     memory_listener_register(&as->dispatch_listener, as);
2395ac1970fbSAvi Kivity }
2396ac1970fbSAvi Kivity 
23976e48e8f9SPaolo Bonzini void address_space_unregister(AddressSpace *as)
23986e48e8f9SPaolo Bonzini {
23996e48e8f9SPaolo Bonzini     memory_listener_unregister(&as->dispatch_listener);
24006e48e8f9SPaolo Bonzini }
24016e48e8f9SPaolo Bonzini 
240283f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
240383f3c251SAvi Kivity {
240483f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
240583f3c251SAvi Kivity 
240679e2b9aeSPaolo Bonzini     atomic_rcu_set(&as->dispatch, NULL);
240779e2b9aeSPaolo Bonzini     if (d) {
240879e2b9aeSPaolo Bonzini         call_rcu(d, address_space_dispatch_free, rcu);
240979e2b9aeSPaolo Bonzini     }
241083f3c251SAvi Kivity }
241183f3c251SAvi Kivity 
241262152b8aSAvi Kivity static void memory_map_init(void)
241362152b8aSAvi Kivity {
24147267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
241503f49957SPaolo Bonzini 
241657271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
24177dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
2418309cb471SAvi Kivity 
24197267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
24203bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
24213bb28b72SJan Kiszka                           65536);
24227dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
24232641689aSliguang }
242462152b8aSAvi Kivity 
242562152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
242662152b8aSAvi Kivity {
242762152b8aSAvi Kivity     return system_memory;
242862152b8aSAvi Kivity }
242962152b8aSAvi Kivity 
2430309cb471SAvi Kivity MemoryRegion *get_system_io(void)
2431309cb471SAvi Kivity {
2432309cb471SAvi Kivity     return system_io;
2433309cb471SAvi Kivity }
2434309cb471SAvi Kivity 
2435e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2436e2eef170Spbrook 
243713eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
243813eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
2439f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2440a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
244113eb76e0Sbellard {
244213eb76e0Sbellard     int l, flags;
244313eb76e0Sbellard     target_ulong page;
244453a5960aSpbrook     void * p;
244513eb76e0Sbellard 
244613eb76e0Sbellard     while (len > 0) {
244713eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
244813eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
244913eb76e0Sbellard         if (l > len)
245013eb76e0Sbellard             l = len;
245113eb76e0Sbellard         flags = page_get_flags(page);
245213eb76e0Sbellard         if (!(flags & PAGE_VALID))
2453a68fe89cSPaul Brook             return -1;
245413eb76e0Sbellard         if (is_write) {
245513eb76e0Sbellard             if (!(flags & PAGE_WRITE))
2456a68fe89cSPaul Brook                 return -1;
2457579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
245872fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2459a68fe89cSPaul Brook                 return -1;
246072fb7daaSaurel32             memcpy(p, buf, l);
246172fb7daaSaurel32             unlock_user(p, addr, l);
246213eb76e0Sbellard         } else {
246313eb76e0Sbellard             if (!(flags & PAGE_READ))
2464a68fe89cSPaul Brook                 return -1;
2465579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
246672fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2467a68fe89cSPaul Brook                 return -1;
246872fb7daaSaurel32             memcpy(buf, p, l);
24695b257578Saurel32             unlock_user(p, addr, 0);
247013eb76e0Sbellard         }
247113eb76e0Sbellard         len -= l;
247213eb76e0Sbellard         buf += l;
247313eb76e0Sbellard         addr += l;
247413eb76e0Sbellard     }
2475a68fe89cSPaul Brook     return 0;
247613eb76e0Sbellard }
24778df1cd07Sbellard 
247813eb76e0Sbellard #else
247951d7a9ebSAnthony PERARD 
2480845b6214SPaolo Bonzini static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2481a8170e5eSAvi Kivity                                      hwaddr length)
248251d7a9ebSAnthony PERARD {
2483845b6214SPaolo Bonzini     uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
24840878d0e1SPaolo Bonzini     addr += memory_region_get_ram_addr(mr);
24850878d0e1SPaolo Bonzini 
2486e87f7778SPaolo Bonzini     /* No early return if dirty_log_mask is or becomes 0, because
2487e87f7778SPaolo Bonzini      * cpu_physical_memory_set_dirty_range will still call
2488e87f7778SPaolo Bonzini      * xen_modified_memory.
2489e87f7778SPaolo Bonzini      */
2490e87f7778SPaolo Bonzini     if (dirty_log_mask) {
2491e87f7778SPaolo Bonzini         dirty_log_mask =
2492e87f7778SPaolo Bonzini             cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2493e87f7778SPaolo Bonzini     }
2494845b6214SPaolo Bonzini     if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
249535865339SPaolo Bonzini         tb_invalidate_phys_range(addr, addr + length);
2496845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2497845b6214SPaolo Bonzini     }
249858d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
249949dfcec4SPaolo Bonzini }
250051d7a9ebSAnthony PERARD 
250123326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
250282f2563fSPaolo Bonzini {
2503e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
250423326164SRichard Henderson 
250523326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
250623326164SRichard Henderson        otherwise specified.  */
250723326164SRichard Henderson     if (access_size_max == 0) {
250823326164SRichard Henderson         access_size_max = 4;
250982f2563fSPaolo Bonzini     }
251023326164SRichard Henderson 
251123326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
251223326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
251323326164SRichard Henderson         unsigned align_size_max = addr & -addr;
251423326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
251523326164SRichard Henderson             access_size_max = align_size_max;
251623326164SRichard Henderson         }
251723326164SRichard Henderson     }
251823326164SRichard Henderson 
251923326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
252023326164SRichard Henderson     if (l > access_size_max) {
252123326164SRichard Henderson         l = access_size_max;
252223326164SRichard Henderson     }
25236554f5c0SPeter Maydell     l = pow2floor(l);
252423326164SRichard Henderson 
252523326164SRichard Henderson     return l;
252682f2563fSPaolo Bonzini }
252782f2563fSPaolo Bonzini 
25284840f10eSJan Kiszka static bool prepare_mmio_access(MemoryRegion *mr)
2529125b3806SPaolo Bonzini {
25304840f10eSJan Kiszka     bool unlocked = !qemu_mutex_iothread_locked();
25314840f10eSJan Kiszka     bool release_lock = false;
25324840f10eSJan Kiszka 
25334840f10eSJan Kiszka     if (unlocked && mr->global_locking) {
25344840f10eSJan Kiszka         qemu_mutex_lock_iothread();
25354840f10eSJan Kiszka         unlocked = false;
25364840f10eSJan Kiszka         release_lock = true;
2537125b3806SPaolo Bonzini     }
25384840f10eSJan Kiszka     if (mr->flush_coalesced_mmio) {
25394840f10eSJan Kiszka         if (unlocked) {
25404840f10eSJan Kiszka             qemu_mutex_lock_iothread();
25414840f10eSJan Kiszka         }
25424840f10eSJan Kiszka         qemu_flush_coalesced_mmio_buffer();
25434840f10eSJan Kiszka         if (unlocked) {
25444840f10eSJan Kiszka             qemu_mutex_unlock_iothread();
25454840f10eSJan Kiszka         }
25464840f10eSJan Kiszka     }
25474840f10eSJan Kiszka 
25484840f10eSJan Kiszka     return release_lock;
2549125b3806SPaolo Bonzini }
2550125b3806SPaolo Bonzini 
2551a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
2552a203ac70SPaolo Bonzini static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2553a203ac70SPaolo Bonzini                                                 MemTxAttrs attrs,
2554a203ac70SPaolo Bonzini                                                 const uint8_t *buf,
2555a203ac70SPaolo Bonzini                                                 int len, hwaddr addr1,
2556a203ac70SPaolo Bonzini                                                 hwaddr l, MemoryRegion *mr)
255713eb76e0Sbellard {
255813eb76e0Sbellard     uint8_t *ptr;
2559791af8c8SPaolo Bonzini     uint64_t val;
25603b643495SPeter Maydell     MemTxResult result = MEMTX_OK;
25614840f10eSJan Kiszka     bool release_lock = false;
256213eb76e0Sbellard 
2563a203ac70SPaolo Bonzini     for (;;) {
2564eb7eeb88SPaolo Bonzini         if (!memory_access_is_direct(mr, true)) {
25654840f10eSJan Kiszka             release_lock |= prepare_mmio_access(mr);
25665c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr1);
25674917cf44SAndreas Färber             /* XXX: could force current_cpu to NULL to avoid
25686a00d601Sbellard                potential bugs */
256923326164SRichard Henderson             switch (l) {
257023326164SRichard Henderson             case 8:
257123326164SRichard Henderson                 /* 64 bit write access */
257223326164SRichard Henderson                 val = ldq_p(buf);
25733b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 8,
25743b643495SPeter Maydell                                                        attrs);
257523326164SRichard Henderson                 break;
257623326164SRichard Henderson             case 4:
25771c213d19Sbellard                 /* 32 bit write access */
2578c27004ecSbellard                 val = ldl_p(buf);
25793b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 4,
25803b643495SPeter Maydell                                                        attrs);
258123326164SRichard Henderson                 break;
258223326164SRichard Henderson             case 2:
25831c213d19Sbellard                 /* 16 bit write access */
2584c27004ecSbellard                 val = lduw_p(buf);
25853b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 2,
25863b643495SPeter Maydell                                                        attrs);
258723326164SRichard Henderson                 break;
258823326164SRichard Henderson             case 1:
25891c213d19Sbellard                 /* 8 bit write access */
2590c27004ecSbellard                 val = ldub_p(buf);
25913b643495SPeter Maydell                 result |= memory_region_dispatch_write(mr, addr1, val, 1,
25923b643495SPeter Maydell                                                        attrs);
259323326164SRichard Henderson                 break;
259423326164SRichard Henderson             default:
259523326164SRichard Henderson                 abort();
259613eb76e0Sbellard             }
25972bbfa05dSPaolo Bonzini         } else {
259813eb76e0Sbellard             /* RAM case */
25990878d0e1SPaolo Bonzini             ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
260013eb76e0Sbellard             memcpy(ptr, buf, l);
2601845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, l);
26023a7d929eSbellard         }
2603eb7eeb88SPaolo Bonzini 
2604eb7eeb88SPaolo Bonzini         if (release_lock) {
2605eb7eeb88SPaolo Bonzini             qemu_mutex_unlock_iothread();
2606eb7eeb88SPaolo Bonzini             release_lock = false;
2607eb7eeb88SPaolo Bonzini         }
2608eb7eeb88SPaolo Bonzini 
2609eb7eeb88SPaolo Bonzini         len -= l;
2610eb7eeb88SPaolo Bonzini         buf += l;
2611eb7eeb88SPaolo Bonzini         addr += l;
2612a203ac70SPaolo Bonzini 
2613a203ac70SPaolo Bonzini         if (!len) {
2614a203ac70SPaolo Bonzini             break;
2615eb7eeb88SPaolo Bonzini         }
2616a203ac70SPaolo Bonzini 
2617a203ac70SPaolo Bonzini         l = len;
2618a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, true);
2619a203ac70SPaolo Bonzini     }
2620eb7eeb88SPaolo Bonzini 
2621eb7eeb88SPaolo Bonzini     return result;
2622eb7eeb88SPaolo Bonzini }
2623eb7eeb88SPaolo Bonzini 
2624a203ac70SPaolo Bonzini MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2625a203ac70SPaolo Bonzini                                 const uint8_t *buf, int len)
2626eb7eeb88SPaolo Bonzini {
2627eb7eeb88SPaolo Bonzini     hwaddr l;
2628eb7eeb88SPaolo Bonzini     hwaddr addr1;
2629eb7eeb88SPaolo Bonzini     MemoryRegion *mr;
2630eb7eeb88SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2631a203ac70SPaolo Bonzini 
2632a203ac70SPaolo Bonzini     if (len > 0) {
2633a203ac70SPaolo Bonzini         rcu_read_lock();
2634a203ac70SPaolo Bonzini         l = len;
2635a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, true);
2636a203ac70SPaolo Bonzini         result = address_space_write_continue(as, addr, attrs, buf, len,
2637a203ac70SPaolo Bonzini                                               addr1, l, mr);
2638a203ac70SPaolo Bonzini         rcu_read_unlock();
2639a203ac70SPaolo Bonzini     }
2640a203ac70SPaolo Bonzini 
2641a203ac70SPaolo Bonzini     return result;
2642a203ac70SPaolo Bonzini }
2643a203ac70SPaolo Bonzini 
2644a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
2645a203ac70SPaolo Bonzini MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2646a203ac70SPaolo Bonzini                                         MemTxAttrs attrs, uint8_t *buf,
2647a203ac70SPaolo Bonzini                                         int len, hwaddr addr1, hwaddr l,
2648a203ac70SPaolo Bonzini                                         MemoryRegion *mr)
2649a203ac70SPaolo Bonzini {
2650a203ac70SPaolo Bonzini     uint8_t *ptr;
2651a203ac70SPaolo Bonzini     uint64_t val;
2652a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2653eb7eeb88SPaolo Bonzini     bool release_lock = false;
2654eb7eeb88SPaolo Bonzini 
2655a203ac70SPaolo Bonzini     for (;;) {
2656eb7eeb88SPaolo Bonzini         if (!memory_access_is_direct(mr, false)) {
265713eb76e0Sbellard             /* I/O case */
26584840f10eSJan Kiszka             release_lock |= prepare_mmio_access(mr);
26595c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr1);
266023326164SRichard Henderson             switch (l) {
266123326164SRichard Henderson             case 8:
266223326164SRichard Henderson                 /* 64 bit read access */
26633b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
26643b643495SPeter Maydell                                                       attrs);
266523326164SRichard Henderson                 stq_p(buf, val);
266623326164SRichard Henderson                 break;
266723326164SRichard Henderson             case 4:
266813eb76e0Sbellard                 /* 32 bit read access */
26693b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
26703b643495SPeter Maydell                                                       attrs);
2671c27004ecSbellard                 stl_p(buf, val);
267223326164SRichard Henderson                 break;
267323326164SRichard Henderson             case 2:
267413eb76e0Sbellard                 /* 16 bit read access */
26753b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
26763b643495SPeter Maydell                                                       attrs);
2677c27004ecSbellard                 stw_p(buf, val);
267823326164SRichard Henderson                 break;
267923326164SRichard Henderson             case 1:
26801c213d19Sbellard                 /* 8 bit read access */
26813b643495SPeter Maydell                 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
26823b643495SPeter Maydell                                                       attrs);
2683c27004ecSbellard                 stb_p(buf, val);
268423326164SRichard Henderson                 break;
268523326164SRichard Henderson             default:
268623326164SRichard Henderson                 abort();
268713eb76e0Sbellard             }
268813eb76e0Sbellard         } else {
268913eb76e0Sbellard             /* RAM case */
26900878d0e1SPaolo Bonzini             ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2691f3705d53SAvi Kivity             memcpy(buf, ptr, l);
269213eb76e0Sbellard         }
26934840f10eSJan Kiszka 
26944840f10eSJan Kiszka         if (release_lock) {
26954840f10eSJan Kiszka             qemu_mutex_unlock_iothread();
26964840f10eSJan Kiszka             release_lock = false;
26974840f10eSJan Kiszka         }
26984840f10eSJan Kiszka 
269913eb76e0Sbellard         len -= l;
270013eb76e0Sbellard         buf += l;
270113eb76e0Sbellard         addr += l;
2702a203ac70SPaolo Bonzini 
2703a203ac70SPaolo Bonzini         if (!len) {
2704a203ac70SPaolo Bonzini             break;
270513eb76e0Sbellard         }
2706a203ac70SPaolo Bonzini 
2707a203ac70SPaolo Bonzini         l = len;
2708a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, false);
2709a203ac70SPaolo Bonzini     }
2710a203ac70SPaolo Bonzini 
2711a203ac70SPaolo Bonzini     return result;
2712a203ac70SPaolo Bonzini }
2713a203ac70SPaolo Bonzini 
27143cc8f884SPaolo Bonzini MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
27153cc8f884SPaolo Bonzini                                     MemTxAttrs attrs, uint8_t *buf, int len)
2716a203ac70SPaolo Bonzini {
2717a203ac70SPaolo Bonzini     hwaddr l;
2718a203ac70SPaolo Bonzini     hwaddr addr1;
2719a203ac70SPaolo Bonzini     MemoryRegion *mr;
2720a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2721a203ac70SPaolo Bonzini 
2722a203ac70SPaolo Bonzini     if (len > 0) {
2723a203ac70SPaolo Bonzini         rcu_read_lock();
2724a203ac70SPaolo Bonzini         l = len;
2725a203ac70SPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, false);
2726a203ac70SPaolo Bonzini         result = address_space_read_continue(as, addr, attrs, buf, len,
2727a203ac70SPaolo Bonzini                                              addr1, l, mr);
272841063e1eSPaolo Bonzini         rcu_read_unlock();
2729a203ac70SPaolo Bonzini     }
2730fd8aaa76SPaolo Bonzini 
27313b643495SPeter Maydell     return result;
273213eb76e0Sbellard }
27338df1cd07Sbellard 
2734eb7eeb88SPaolo Bonzini MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2735eb7eeb88SPaolo Bonzini                              uint8_t *buf, int len, bool is_write)
2736ac1970fbSAvi Kivity {
2737eb7eeb88SPaolo Bonzini     if (is_write) {
2738eb7eeb88SPaolo Bonzini         return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2739eb7eeb88SPaolo Bonzini     } else {
2740eb7eeb88SPaolo Bonzini         return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2741ac1970fbSAvi Kivity     }
2742ac1970fbSAvi Kivity }
2743ac1970fbSAvi Kivity 
2744a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2745ac1970fbSAvi Kivity                             int len, int is_write)
2746ac1970fbSAvi Kivity {
27475c9eb028SPeter Maydell     address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
27485c9eb028SPeter Maydell                      buf, len, is_write);
2749ac1970fbSAvi Kivity }
2750ac1970fbSAvi Kivity 
2751582b55a9SAlexander Graf enum write_rom_type {
2752582b55a9SAlexander Graf     WRITE_DATA,
2753582b55a9SAlexander Graf     FLUSH_CACHE,
2754582b55a9SAlexander Graf };
2755582b55a9SAlexander Graf 
27562a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2757582b55a9SAlexander Graf     hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2758d0ecd2aaSbellard {
2759149f54b5SPaolo Bonzini     hwaddr l;
2760d0ecd2aaSbellard     uint8_t *ptr;
2761149f54b5SPaolo Bonzini     hwaddr addr1;
27625c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2763d0ecd2aaSbellard 
276441063e1eSPaolo Bonzini     rcu_read_lock();
2765d0ecd2aaSbellard     while (len > 0) {
2766d0ecd2aaSbellard         l = len;
27672a221651SEdgar E. Iglesias         mr = address_space_translate(as, addr, &addr1, &l, true);
2768d0ecd2aaSbellard 
27695c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
27705c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2771b242e0e0SPaolo Bonzini             l = memory_access_size(mr, l, addr1);
2772d0ecd2aaSbellard         } else {
2773d0ecd2aaSbellard             /* ROM/RAM case */
27740878d0e1SPaolo Bonzini             ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2775582b55a9SAlexander Graf             switch (type) {
2776582b55a9SAlexander Graf             case WRITE_DATA:
2777d0ecd2aaSbellard                 memcpy(ptr, buf, l);
2778845b6214SPaolo Bonzini                 invalidate_and_set_dirty(mr, addr1, l);
2779582b55a9SAlexander Graf                 break;
2780582b55a9SAlexander Graf             case FLUSH_CACHE:
2781582b55a9SAlexander Graf                 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2782582b55a9SAlexander Graf                 break;
2783582b55a9SAlexander Graf             }
2784d0ecd2aaSbellard         }
2785d0ecd2aaSbellard         len -= l;
2786d0ecd2aaSbellard         buf += l;
2787d0ecd2aaSbellard         addr += l;
2788d0ecd2aaSbellard     }
278941063e1eSPaolo Bonzini     rcu_read_unlock();
2790d0ecd2aaSbellard }
2791d0ecd2aaSbellard 
2792582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
27932a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2794582b55a9SAlexander Graf                                    const uint8_t *buf, int len)
2795582b55a9SAlexander Graf {
27962a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2797582b55a9SAlexander Graf }
2798582b55a9SAlexander Graf 
2799582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len)
2800582b55a9SAlexander Graf {
2801582b55a9SAlexander Graf     /*
2802582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
2803582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
2804582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
2805582b55a9SAlexander Graf      * the host's instruction cache at least.
2806582b55a9SAlexander Graf      */
2807582b55a9SAlexander Graf     if (tcg_enabled()) {
2808582b55a9SAlexander Graf         return;
2809582b55a9SAlexander Graf     }
2810582b55a9SAlexander Graf 
28112a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(&address_space_memory,
28122a221651SEdgar E. Iglesias                                            start, NULL, len, FLUSH_CACHE);
2813582b55a9SAlexander Graf }
2814582b55a9SAlexander Graf 
28156d16c2f8Saliguori typedef struct {
2816d3e71559SPaolo Bonzini     MemoryRegion *mr;
28176d16c2f8Saliguori     void *buffer;
2818a8170e5eSAvi Kivity     hwaddr addr;
2819a8170e5eSAvi Kivity     hwaddr len;
2820c2cba0ffSFam Zheng     bool in_use;
28216d16c2f8Saliguori } BounceBuffer;
28226d16c2f8Saliguori 
28236d16c2f8Saliguori static BounceBuffer bounce;
28246d16c2f8Saliguori 
2825ba223c29Saliguori typedef struct MapClient {
2826e95205e1SFam Zheng     QEMUBH *bh;
282772cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2828ba223c29Saliguori } MapClient;
2829ba223c29Saliguori 
283038e047b5SFam Zheng QemuMutex map_client_list_lock;
283172cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
283272cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2833ba223c29Saliguori 
2834e95205e1SFam Zheng static void cpu_unregister_map_client_do(MapClient *client)
2835ba223c29Saliguori {
283672cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
28377267c094SAnthony Liguori     g_free(client);
2838ba223c29Saliguori }
2839ba223c29Saliguori 
284033b6c2edSFam Zheng static void cpu_notify_map_clients_locked(void)
2841ba223c29Saliguori {
2842ba223c29Saliguori     MapClient *client;
2843ba223c29Saliguori 
284472cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
284572cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2846e95205e1SFam Zheng         qemu_bh_schedule(client->bh);
2847e95205e1SFam Zheng         cpu_unregister_map_client_do(client);
2848ba223c29Saliguori     }
2849ba223c29Saliguori }
2850ba223c29Saliguori 
2851e95205e1SFam Zheng void cpu_register_map_client(QEMUBH *bh)
2852d0ecd2aaSbellard {
2853d0ecd2aaSbellard     MapClient *client = g_malloc(sizeof(*client));
2854d0ecd2aaSbellard 
285538e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2856e95205e1SFam Zheng     client->bh = bh;
2857d0ecd2aaSbellard     QLIST_INSERT_HEAD(&map_client_list, client, link);
285833b6c2edSFam Zheng     if (!atomic_read(&bounce.in_use)) {
285933b6c2edSFam Zheng         cpu_notify_map_clients_locked();
286033b6c2edSFam Zheng     }
286138e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2862d0ecd2aaSbellard }
2863d0ecd2aaSbellard 
286438e047b5SFam Zheng void cpu_exec_init_all(void)
286538e047b5SFam Zheng {
286638e047b5SFam Zheng     qemu_mutex_init(&ram_list.mutex);
286738e047b5SFam Zheng     io_mem_init();
2868680a4783SPaolo Bonzini     memory_map_init();
286938e047b5SFam Zheng     qemu_mutex_init(&map_client_list_lock);
287038e047b5SFam Zheng }
287138e047b5SFam Zheng 
2872e95205e1SFam Zheng void cpu_unregister_map_client(QEMUBH *bh)
2873d0ecd2aaSbellard {
2874e95205e1SFam Zheng     MapClient *client;
2875d0ecd2aaSbellard 
2876e95205e1SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
2877e95205e1SFam Zheng     QLIST_FOREACH(client, &map_client_list, link) {
2878e95205e1SFam Zheng         if (client->bh == bh) {
2879e95205e1SFam Zheng             cpu_unregister_map_client_do(client);
2880e95205e1SFam Zheng             break;
2881e95205e1SFam Zheng         }
2882e95205e1SFam Zheng     }
2883e95205e1SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
2884d0ecd2aaSbellard }
2885d0ecd2aaSbellard 
2886d0ecd2aaSbellard static void cpu_notify_map_clients(void)
2887d0ecd2aaSbellard {
288838e047b5SFam Zheng     qemu_mutex_lock(&map_client_list_lock);
288933b6c2edSFam Zheng     cpu_notify_map_clients_locked();
289038e047b5SFam Zheng     qemu_mutex_unlock(&map_client_list_lock);
28916d16c2f8Saliguori }
28926d16c2f8Saliguori 
289351644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
289451644ab7SPaolo Bonzini {
28955c8a00ceSPaolo Bonzini     MemoryRegion *mr;
289651644ab7SPaolo Bonzini     hwaddr l, xlat;
289751644ab7SPaolo Bonzini 
289841063e1eSPaolo Bonzini     rcu_read_lock();
289951644ab7SPaolo Bonzini     while (len > 0) {
290051644ab7SPaolo Bonzini         l = len;
29015c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
29025c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
29035c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
29045c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
290551644ab7SPaolo Bonzini                 return false;
290651644ab7SPaolo Bonzini             }
290751644ab7SPaolo Bonzini         }
290851644ab7SPaolo Bonzini 
290951644ab7SPaolo Bonzini         len -= l;
291051644ab7SPaolo Bonzini         addr += l;
291151644ab7SPaolo Bonzini     }
291241063e1eSPaolo Bonzini     rcu_read_unlock();
291351644ab7SPaolo Bonzini     return true;
291451644ab7SPaolo Bonzini }
291551644ab7SPaolo Bonzini 
29166d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
29176d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
29186d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
29196d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2920ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2921ba223c29Saliguori  * likely to succeed.
29226d16c2f8Saliguori  */
2923ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2924a8170e5eSAvi Kivity                         hwaddr addr,
2925a8170e5eSAvi Kivity                         hwaddr *plen,
2926ac1970fbSAvi Kivity                         bool is_write)
29276d16c2f8Saliguori {
2928a8170e5eSAvi Kivity     hwaddr len = *plen;
2929e3127ae0SPaolo Bonzini     hwaddr done = 0;
2930e3127ae0SPaolo Bonzini     hwaddr l, xlat, base;
2931e3127ae0SPaolo Bonzini     MemoryRegion *mr, *this_mr;
2932e81bcda5SPaolo Bonzini     void *ptr;
29336d16c2f8Saliguori 
2934e3127ae0SPaolo Bonzini     if (len == 0) {
2935e3127ae0SPaolo Bonzini         return NULL;
2936e3127ae0SPaolo Bonzini     }
2937e3127ae0SPaolo Bonzini 
29386d16c2f8Saliguori     l = len;
293941063e1eSPaolo Bonzini     rcu_read_lock();
29405c8a00ceSPaolo Bonzini     mr = address_space_translate(as, addr, &xlat, &l, is_write);
294141063e1eSPaolo Bonzini 
29425c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
2943c2cba0ffSFam Zheng         if (atomic_xchg(&bounce.in_use, true)) {
294441063e1eSPaolo Bonzini             rcu_read_unlock();
2945e3127ae0SPaolo Bonzini             return NULL;
29466d16c2f8Saliguori         }
2947e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
2948e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
2949e85d9db5SKevin Wolf         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
29506d16c2f8Saliguori         bounce.addr = addr;
29516d16c2f8Saliguori         bounce.len = l;
2952d3e71559SPaolo Bonzini 
2953d3e71559SPaolo Bonzini         memory_region_ref(mr);
2954d3e71559SPaolo Bonzini         bounce.mr = mr;
29556d16c2f8Saliguori         if (!is_write) {
29565c9eb028SPeter Maydell             address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
29575c9eb028SPeter Maydell                                bounce.buffer, l);
29586d16c2f8Saliguori         }
295938bee5dcSStefano Stabellini 
296041063e1eSPaolo Bonzini         rcu_read_unlock();
296138bee5dcSStefano Stabellini         *plen = l;
296238bee5dcSStefano Stabellini         return bounce.buffer;
29636d16c2f8Saliguori     }
2964e3127ae0SPaolo Bonzini 
2965e3127ae0SPaolo Bonzini     base = xlat;
2966e3127ae0SPaolo Bonzini 
2967e3127ae0SPaolo Bonzini     for (;;) {
2968e3127ae0SPaolo Bonzini         len -= l;
2969e3127ae0SPaolo Bonzini         addr += l;
2970e3127ae0SPaolo Bonzini         done += l;
2971e3127ae0SPaolo Bonzini         if (len == 0) {
2972e3127ae0SPaolo Bonzini             break;
2973e3127ae0SPaolo Bonzini         }
2974e3127ae0SPaolo Bonzini 
2975e3127ae0SPaolo Bonzini         l = len;
2976e3127ae0SPaolo Bonzini         this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2977e3127ae0SPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
2978149f54b5SPaolo Bonzini             break;
2979149f54b5SPaolo Bonzini         }
29808ab934f9SStefano Stabellini     }
29816d16c2f8Saliguori 
2982d3e71559SPaolo Bonzini     memory_region_ref(mr);
2983e3127ae0SPaolo Bonzini     *plen = done;
29840878d0e1SPaolo Bonzini     ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
2985e81bcda5SPaolo Bonzini     rcu_read_unlock();
2986e81bcda5SPaolo Bonzini 
2987e81bcda5SPaolo Bonzini     return ptr;
29886d16c2f8Saliguori }
29896d16c2f8Saliguori 
2990ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
29916d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
29926d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
29936d16c2f8Saliguori  */
2994a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2995a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
29966d16c2f8Saliguori {
29976d16c2f8Saliguori     if (buffer != bounce.buffer) {
2998d3e71559SPaolo Bonzini         MemoryRegion *mr;
29997443b437SPaolo Bonzini         ram_addr_t addr1;
3000d3e71559SPaolo Bonzini 
300107bdaa41SPaolo Bonzini         mr = memory_region_from_host(buffer, &addr1);
30021b5ec234SPaolo Bonzini         assert(mr != NULL);
3003d3e71559SPaolo Bonzini         if (is_write) {
3004845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, access_len);
30056d16c2f8Saliguori         }
3006868bb33fSJan Kiszka         if (xen_enabled()) {
3007e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
3008050a0ddfSAnthony PERARD         }
3009d3e71559SPaolo Bonzini         memory_region_unref(mr);
30106d16c2f8Saliguori         return;
30116d16c2f8Saliguori     }
30126d16c2f8Saliguori     if (is_write) {
30135c9eb028SPeter Maydell         address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
30145c9eb028SPeter Maydell                             bounce.buffer, access_len);
30156d16c2f8Saliguori     }
3016f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
30176d16c2f8Saliguori     bounce.buffer = NULL;
3018d3e71559SPaolo Bonzini     memory_region_unref(bounce.mr);
3019c2cba0ffSFam Zheng     atomic_mb_set(&bounce.in_use, false);
3020ba223c29Saliguori     cpu_notify_map_clients();
30216d16c2f8Saliguori }
3022d0ecd2aaSbellard 
3023a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
3024a8170e5eSAvi Kivity                               hwaddr *plen,
3025ac1970fbSAvi Kivity                               int is_write)
3026ac1970fbSAvi Kivity {
3027ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
3028ac1970fbSAvi Kivity }
3029ac1970fbSAvi Kivity 
3030a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3031a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
3032ac1970fbSAvi Kivity {
3033ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3034ac1970fbSAvi Kivity }
3035ac1970fbSAvi Kivity 
30368df1cd07Sbellard /* warning: addr must be aligned */
303750013115SPeter Maydell static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
303850013115SPeter Maydell                                                   MemTxAttrs attrs,
303950013115SPeter Maydell                                                   MemTxResult *result,
30401e78bcc1SAlexander Graf                                                   enum device_endian endian)
30418df1cd07Sbellard {
30428df1cd07Sbellard     uint8_t *ptr;
3043791af8c8SPaolo Bonzini     uint64_t val;
30445c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3045149f54b5SPaolo Bonzini     hwaddr l = 4;
3046149f54b5SPaolo Bonzini     hwaddr addr1;
304750013115SPeter Maydell     MemTxResult r;
30484840f10eSJan Kiszka     bool release_lock = false;
30498df1cd07Sbellard 
305041063e1eSPaolo Bonzini     rcu_read_lock();
3051fdfba1a2SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, false);
30525c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
30534840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3054125b3806SPaolo Bonzini 
30558df1cd07Sbellard         /* I/O case */
305650013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
30571e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
30581e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
30591e78bcc1SAlexander Graf             val = bswap32(val);
30601e78bcc1SAlexander Graf         }
30611e78bcc1SAlexander Graf #else
30621e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
30631e78bcc1SAlexander Graf             val = bswap32(val);
30641e78bcc1SAlexander Graf         }
30651e78bcc1SAlexander Graf #endif
30668df1cd07Sbellard     } else {
30678df1cd07Sbellard         /* RAM case */
30680878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
30691e78bcc1SAlexander Graf         switch (endian) {
30701e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
30711e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
30721e78bcc1SAlexander Graf             break;
30731e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
30741e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
30751e78bcc1SAlexander Graf             break;
30761e78bcc1SAlexander Graf         default:
30778df1cd07Sbellard             val = ldl_p(ptr);
30781e78bcc1SAlexander Graf             break;
30791e78bcc1SAlexander Graf         }
308050013115SPeter Maydell         r = MEMTX_OK;
308150013115SPeter Maydell     }
308250013115SPeter Maydell     if (result) {
308350013115SPeter Maydell         *result = r;
30848df1cd07Sbellard     }
30854840f10eSJan Kiszka     if (release_lock) {
30864840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
30874840f10eSJan Kiszka     }
308841063e1eSPaolo Bonzini     rcu_read_unlock();
30898df1cd07Sbellard     return val;
30908df1cd07Sbellard }
30918df1cd07Sbellard 
309250013115SPeter Maydell uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
309350013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
309450013115SPeter Maydell {
309550013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
309650013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
309750013115SPeter Maydell }
309850013115SPeter Maydell 
309950013115SPeter Maydell uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
310050013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
310150013115SPeter Maydell {
310250013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
310350013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
310450013115SPeter Maydell }
310550013115SPeter Maydell 
310650013115SPeter Maydell uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
310750013115SPeter Maydell                               MemTxAttrs attrs, MemTxResult *result)
310850013115SPeter Maydell {
310950013115SPeter Maydell     return address_space_ldl_internal(as, addr, attrs, result,
311050013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
311150013115SPeter Maydell }
311250013115SPeter Maydell 
3113fdfba1a2SEdgar E. Iglesias uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
31141e78bcc1SAlexander Graf {
311550013115SPeter Maydell     return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31161e78bcc1SAlexander Graf }
31171e78bcc1SAlexander Graf 
3118fdfba1a2SEdgar E. Iglesias uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
31191e78bcc1SAlexander Graf {
312050013115SPeter Maydell     return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31211e78bcc1SAlexander Graf }
31221e78bcc1SAlexander Graf 
3123fdfba1a2SEdgar E. Iglesias uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
31241e78bcc1SAlexander Graf {
312550013115SPeter Maydell     return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
31261e78bcc1SAlexander Graf }
31271e78bcc1SAlexander Graf 
312884b7b8e7Sbellard /* warning: addr must be aligned */
312950013115SPeter Maydell static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
313050013115SPeter Maydell                                                   MemTxAttrs attrs,
313150013115SPeter Maydell                                                   MemTxResult *result,
31321e78bcc1SAlexander Graf                                                   enum device_endian endian)
313384b7b8e7Sbellard {
313484b7b8e7Sbellard     uint8_t *ptr;
313584b7b8e7Sbellard     uint64_t val;
31365c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3137149f54b5SPaolo Bonzini     hwaddr l = 8;
3138149f54b5SPaolo Bonzini     hwaddr addr1;
313950013115SPeter Maydell     MemTxResult r;
31404840f10eSJan Kiszka     bool release_lock = false;
314184b7b8e7Sbellard 
314241063e1eSPaolo Bonzini     rcu_read_lock();
31432c17449bSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3144149f54b5SPaolo Bonzini                                  false);
31455c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
31464840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3147125b3806SPaolo Bonzini 
314884b7b8e7Sbellard         /* I/O case */
314950013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3150968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
3151968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
3152968a5627SPaolo Bonzini             val = bswap64(val);
3153968a5627SPaolo Bonzini         }
3154968a5627SPaolo Bonzini #else
3155968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
3156968a5627SPaolo Bonzini             val = bswap64(val);
3157968a5627SPaolo Bonzini         }
3158968a5627SPaolo Bonzini #endif
315984b7b8e7Sbellard     } else {
316084b7b8e7Sbellard         /* RAM case */
31610878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
31621e78bcc1SAlexander Graf         switch (endian) {
31631e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
31641e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
31651e78bcc1SAlexander Graf             break;
31661e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
31671e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
31681e78bcc1SAlexander Graf             break;
31691e78bcc1SAlexander Graf         default:
317084b7b8e7Sbellard             val = ldq_p(ptr);
31711e78bcc1SAlexander Graf             break;
31721e78bcc1SAlexander Graf         }
317350013115SPeter Maydell         r = MEMTX_OK;
317450013115SPeter Maydell     }
317550013115SPeter Maydell     if (result) {
317650013115SPeter Maydell         *result = r;
317784b7b8e7Sbellard     }
31784840f10eSJan Kiszka     if (release_lock) {
31794840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
31804840f10eSJan Kiszka     }
318141063e1eSPaolo Bonzini     rcu_read_unlock();
318284b7b8e7Sbellard     return val;
318384b7b8e7Sbellard }
318484b7b8e7Sbellard 
318550013115SPeter Maydell uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
318650013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
318750013115SPeter Maydell {
318850013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
318950013115SPeter Maydell                                       DEVICE_NATIVE_ENDIAN);
319050013115SPeter Maydell }
319150013115SPeter Maydell 
319250013115SPeter Maydell uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
319350013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
319450013115SPeter Maydell {
319550013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
319650013115SPeter Maydell                                       DEVICE_LITTLE_ENDIAN);
319750013115SPeter Maydell }
319850013115SPeter Maydell 
319950013115SPeter Maydell uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
320050013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
320150013115SPeter Maydell {
320250013115SPeter Maydell     return address_space_ldq_internal(as, addr, attrs, result,
320350013115SPeter Maydell                                       DEVICE_BIG_ENDIAN);
320450013115SPeter Maydell }
320550013115SPeter Maydell 
32062c17449bSEdgar E. Iglesias uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
32071e78bcc1SAlexander Graf {
320850013115SPeter Maydell     return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32091e78bcc1SAlexander Graf }
32101e78bcc1SAlexander Graf 
32112c17449bSEdgar E. Iglesias uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
32121e78bcc1SAlexander Graf {
321350013115SPeter Maydell     return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32141e78bcc1SAlexander Graf }
32151e78bcc1SAlexander Graf 
32162c17449bSEdgar E. Iglesias uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
32171e78bcc1SAlexander Graf {
321850013115SPeter Maydell     return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
32191e78bcc1SAlexander Graf }
32201e78bcc1SAlexander Graf 
3221aab33094Sbellard /* XXX: optimize */
322250013115SPeter Maydell uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
322350013115SPeter Maydell                             MemTxAttrs attrs, MemTxResult *result)
3224aab33094Sbellard {
3225aab33094Sbellard     uint8_t val;
322650013115SPeter Maydell     MemTxResult r;
322750013115SPeter Maydell 
322850013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &val, 1, 0);
322950013115SPeter Maydell     if (result) {
323050013115SPeter Maydell         *result = r;
323150013115SPeter Maydell     }
3232aab33094Sbellard     return val;
3233aab33094Sbellard }
3234aab33094Sbellard 
323550013115SPeter Maydell uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
323650013115SPeter Maydell {
323750013115SPeter Maydell     return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
323850013115SPeter Maydell }
323950013115SPeter Maydell 
3240733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
324150013115SPeter Maydell static inline uint32_t address_space_lduw_internal(AddressSpace *as,
324250013115SPeter Maydell                                                    hwaddr addr,
324350013115SPeter Maydell                                                    MemTxAttrs attrs,
324450013115SPeter Maydell                                                    MemTxResult *result,
32451e78bcc1SAlexander Graf                                                    enum device_endian endian)
3246aab33094Sbellard {
3247733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3248733f0b02SMichael S. Tsirkin     uint64_t val;
32495c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3250149f54b5SPaolo Bonzini     hwaddr l = 2;
3251149f54b5SPaolo Bonzini     hwaddr addr1;
325250013115SPeter Maydell     MemTxResult r;
32534840f10eSJan Kiszka     bool release_lock = false;
3254733f0b02SMichael S. Tsirkin 
325541063e1eSPaolo Bonzini     rcu_read_lock();
325641701aa4SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3257149f54b5SPaolo Bonzini                                  false);
32585c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
32594840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3260125b3806SPaolo Bonzini 
3261733f0b02SMichael S. Tsirkin         /* I/O case */
326250013115SPeter Maydell         r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
32631e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
32641e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
32651e78bcc1SAlexander Graf             val = bswap16(val);
32661e78bcc1SAlexander Graf         }
32671e78bcc1SAlexander Graf #else
32681e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
32691e78bcc1SAlexander Graf             val = bswap16(val);
32701e78bcc1SAlexander Graf         }
32711e78bcc1SAlexander Graf #endif
3272733f0b02SMichael S. Tsirkin     } else {
3273733f0b02SMichael S. Tsirkin         /* RAM case */
32740878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
32751e78bcc1SAlexander Graf         switch (endian) {
32761e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
32771e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
32781e78bcc1SAlexander Graf             break;
32791e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
32801e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
32811e78bcc1SAlexander Graf             break;
32821e78bcc1SAlexander Graf         default:
3283733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
32841e78bcc1SAlexander Graf             break;
32851e78bcc1SAlexander Graf         }
328650013115SPeter Maydell         r = MEMTX_OK;
328750013115SPeter Maydell     }
328850013115SPeter Maydell     if (result) {
328950013115SPeter Maydell         *result = r;
3290733f0b02SMichael S. Tsirkin     }
32914840f10eSJan Kiszka     if (release_lock) {
32924840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
32934840f10eSJan Kiszka     }
329441063e1eSPaolo Bonzini     rcu_read_unlock();
3295733f0b02SMichael S. Tsirkin     return val;
3296aab33094Sbellard }
3297aab33094Sbellard 
329850013115SPeter Maydell uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
329950013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
330050013115SPeter Maydell {
330150013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
330250013115SPeter Maydell                                        DEVICE_NATIVE_ENDIAN);
330350013115SPeter Maydell }
330450013115SPeter Maydell 
330550013115SPeter Maydell uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
330650013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
330750013115SPeter Maydell {
330850013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
330950013115SPeter Maydell                                        DEVICE_LITTLE_ENDIAN);
331050013115SPeter Maydell }
331150013115SPeter Maydell 
331250013115SPeter Maydell uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
331350013115SPeter Maydell                            MemTxAttrs attrs, MemTxResult *result)
331450013115SPeter Maydell {
331550013115SPeter Maydell     return address_space_lduw_internal(as, addr, attrs, result,
331650013115SPeter Maydell                                        DEVICE_BIG_ENDIAN);
331750013115SPeter Maydell }
331850013115SPeter Maydell 
331941701aa4SEdgar E. Iglesias uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
33201e78bcc1SAlexander Graf {
332150013115SPeter Maydell     return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
33221e78bcc1SAlexander Graf }
33231e78bcc1SAlexander Graf 
332441701aa4SEdgar E. Iglesias uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
33251e78bcc1SAlexander Graf {
332650013115SPeter Maydell     return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
33271e78bcc1SAlexander Graf }
33281e78bcc1SAlexander Graf 
332941701aa4SEdgar E. Iglesias uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
33301e78bcc1SAlexander Graf {
333150013115SPeter Maydell     return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
33321e78bcc1SAlexander Graf }
33331e78bcc1SAlexander Graf 
33348df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
33358df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
33368df1cd07Sbellard    bits are used to track modified PTEs */
333750013115SPeter Maydell void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
333850013115SPeter Maydell                                 MemTxAttrs attrs, MemTxResult *result)
33398df1cd07Sbellard {
33408df1cd07Sbellard     uint8_t *ptr;
33415c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3342149f54b5SPaolo Bonzini     hwaddr l = 4;
3343149f54b5SPaolo Bonzini     hwaddr addr1;
334450013115SPeter Maydell     MemTxResult r;
3345845b6214SPaolo Bonzini     uint8_t dirty_log_mask;
33464840f10eSJan Kiszka     bool release_lock = false;
33478df1cd07Sbellard 
334841063e1eSPaolo Bonzini     rcu_read_lock();
33492198a121SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3350149f54b5SPaolo Bonzini                                  true);
33515c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
33524840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3353125b3806SPaolo Bonzini 
335450013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
33558df1cd07Sbellard     } else {
33560878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
33578df1cd07Sbellard         stl_p(ptr, val);
335874576198Saliguori 
3359845b6214SPaolo Bonzini         dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3360845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
33610878d0e1SPaolo Bonzini         cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
33620878d0e1SPaolo Bonzini                                             4, dirty_log_mask);
336350013115SPeter Maydell         r = MEMTX_OK;
336450013115SPeter Maydell     }
336550013115SPeter Maydell     if (result) {
336650013115SPeter Maydell         *result = r;
33678df1cd07Sbellard     }
33684840f10eSJan Kiszka     if (release_lock) {
33694840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
33704840f10eSJan Kiszka     }
337141063e1eSPaolo Bonzini     rcu_read_unlock();
33728df1cd07Sbellard }
33738df1cd07Sbellard 
337450013115SPeter Maydell void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
337550013115SPeter Maydell {
337650013115SPeter Maydell     address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
337750013115SPeter Maydell }
337850013115SPeter Maydell 
33798df1cd07Sbellard /* warning: addr must be aligned */
338050013115SPeter Maydell static inline void address_space_stl_internal(AddressSpace *as,
3381ab1da857SEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
338250013115SPeter Maydell                                               MemTxAttrs attrs,
338350013115SPeter Maydell                                               MemTxResult *result,
33841e78bcc1SAlexander Graf                                               enum device_endian endian)
33858df1cd07Sbellard {
33868df1cd07Sbellard     uint8_t *ptr;
33875c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3388149f54b5SPaolo Bonzini     hwaddr l = 4;
3389149f54b5SPaolo Bonzini     hwaddr addr1;
339050013115SPeter Maydell     MemTxResult r;
33914840f10eSJan Kiszka     bool release_lock = false;
33928df1cd07Sbellard 
339341063e1eSPaolo Bonzini     rcu_read_lock();
3394ab1da857SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
3395149f54b5SPaolo Bonzini                                  true);
33965c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
33974840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3398125b3806SPaolo Bonzini 
33991e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
34001e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
34011e78bcc1SAlexander Graf             val = bswap32(val);
34021e78bcc1SAlexander Graf         }
34031e78bcc1SAlexander Graf #else
34041e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
34051e78bcc1SAlexander Graf             val = bswap32(val);
34061e78bcc1SAlexander Graf         }
34071e78bcc1SAlexander Graf #endif
340850013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
34098df1cd07Sbellard     } else {
34108df1cd07Sbellard         /* RAM case */
34110878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
34121e78bcc1SAlexander Graf         switch (endian) {
34131e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
34141e78bcc1SAlexander Graf             stl_le_p(ptr, val);
34151e78bcc1SAlexander Graf             break;
34161e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
34171e78bcc1SAlexander Graf             stl_be_p(ptr, val);
34181e78bcc1SAlexander Graf             break;
34191e78bcc1SAlexander Graf         default:
34208df1cd07Sbellard             stl_p(ptr, val);
34211e78bcc1SAlexander Graf             break;
34221e78bcc1SAlexander Graf         }
3423845b6214SPaolo Bonzini         invalidate_and_set_dirty(mr, addr1, 4);
342450013115SPeter Maydell         r = MEMTX_OK;
34258df1cd07Sbellard     }
342650013115SPeter Maydell     if (result) {
342750013115SPeter Maydell         *result = r;
342850013115SPeter Maydell     }
34294840f10eSJan Kiszka     if (release_lock) {
34304840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
34314840f10eSJan Kiszka     }
343241063e1eSPaolo Bonzini     rcu_read_unlock();
343350013115SPeter Maydell }
343450013115SPeter Maydell 
343550013115SPeter Maydell void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
343650013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
343750013115SPeter Maydell {
343850013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
343950013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
344050013115SPeter Maydell }
344150013115SPeter Maydell 
344250013115SPeter Maydell void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
344350013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
344450013115SPeter Maydell {
344550013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
344650013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
344750013115SPeter Maydell }
344850013115SPeter Maydell 
344950013115SPeter Maydell void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
345050013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
345150013115SPeter Maydell {
345250013115SPeter Maydell     address_space_stl_internal(as, addr, val, attrs, result,
345350013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
34543a7d929eSbellard }
34558df1cd07Sbellard 
3456ab1da857SEdgar E. Iglesias void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34571e78bcc1SAlexander Graf {
345850013115SPeter Maydell     address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34591e78bcc1SAlexander Graf }
34601e78bcc1SAlexander Graf 
3461ab1da857SEdgar E. Iglesias void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34621e78bcc1SAlexander Graf {
346350013115SPeter Maydell     address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34641e78bcc1SAlexander Graf }
34651e78bcc1SAlexander Graf 
3466ab1da857SEdgar E. Iglesias void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
34671e78bcc1SAlexander Graf {
346850013115SPeter Maydell     address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
34691e78bcc1SAlexander Graf }
34701e78bcc1SAlexander Graf 
3471aab33094Sbellard /* XXX: optimize */
347250013115SPeter Maydell void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
347350013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
3474aab33094Sbellard {
3475aab33094Sbellard     uint8_t v = val;
347650013115SPeter Maydell     MemTxResult r;
347750013115SPeter Maydell 
347850013115SPeter Maydell     r = address_space_rw(as, addr, attrs, &v, 1, 1);
347950013115SPeter Maydell     if (result) {
348050013115SPeter Maydell         *result = r;
348150013115SPeter Maydell     }
348250013115SPeter Maydell }
348350013115SPeter Maydell 
348450013115SPeter Maydell void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
348550013115SPeter Maydell {
348650013115SPeter Maydell     address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3487aab33094Sbellard }
3488aab33094Sbellard 
3489733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
349050013115SPeter Maydell static inline void address_space_stw_internal(AddressSpace *as,
34915ce5944dSEdgar E. Iglesias                                               hwaddr addr, uint32_t val,
349250013115SPeter Maydell                                               MemTxAttrs attrs,
349350013115SPeter Maydell                                               MemTxResult *result,
34941e78bcc1SAlexander Graf                                               enum device_endian endian)
3495aab33094Sbellard {
3496733f0b02SMichael S. Tsirkin     uint8_t *ptr;
34975c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3498149f54b5SPaolo Bonzini     hwaddr l = 2;
3499149f54b5SPaolo Bonzini     hwaddr addr1;
350050013115SPeter Maydell     MemTxResult r;
35014840f10eSJan Kiszka     bool release_lock = false;
3502733f0b02SMichael S. Tsirkin 
350341063e1eSPaolo Bonzini     rcu_read_lock();
35045ce5944dSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, true);
35055c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
35064840f10eSJan Kiszka         release_lock |= prepare_mmio_access(mr);
3507125b3806SPaolo Bonzini 
35081e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
35091e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
35101e78bcc1SAlexander Graf             val = bswap16(val);
35111e78bcc1SAlexander Graf         }
35121e78bcc1SAlexander Graf #else
35131e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
35141e78bcc1SAlexander Graf             val = bswap16(val);
35151e78bcc1SAlexander Graf         }
35161e78bcc1SAlexander Graf #endif
351750013115SPeter Maydell         r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3518733f0b02SMichael S. Tsirkin     } else {
3519733f0b02SMichael S. Tsirkin         /* RAM case */
35200878d0e1SPaolo Bonzini         ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
35211e78bcc1SAlexander Graf         switch (endian) {
35221e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
35231e78bcc1SAlexander Graf             stw_le_p(ptr, val);
35241e78bcc1SAlexander Graf             break;
35251e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
35261e78bcc1SAlexander Graf             stw_be_p(ptr, val);
35271e78bcc1SAlexander Graf             break;
35281e78bcc1SAlexander Graf         default:
3529733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
35301e78bcc1SAlexander Graf             break;
35311e78bcc1SAlexander Graf         }
3532845b6214SPaolo Bonzini         invalidate_and_set_dirty(mr, addr1, 2);
353350013115SPeter Maydell         r = MEMTX_OK;
3534733f0b02SMichael S. Tsirkin     }
353550013115SPeter Maydell     if (result) {
353650013115SPeter Maydell         *result = r;
353750013115SPeter Maydell     }
35384840f10eSJan Kiszka     if (release_lock) {
35394840f10eSJan Kiszka         qemu_mutex_unlock_iothread();
35404840f10eSJan Kiszka     }
354141063e1eSPaolo Bonzini     rcu_read_unlock();
354250013115SPeter Maydell }
354350013115SPeter Maydell 
354450013115SPeter Maydell void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
354550013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
354650013115SPeter Maydell {
354750013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
354850013115SPeter Maydell                                DEVICE_NATIVE_ENDIAN);
354950013115SPeter Maydell }
355050013115SPeter Maydell 
355150013115SPeter Maydell void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
355250013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
355350013115SPeter Maydell {
355450013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
355550013115SPeter Maydell                                DEVICE_LITTLE_ENDIAN);
355650013115SPeter Maydell }
355750013115SPeter Maydell 
355850013115SPeter Maydell void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
355950013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
356050013115SPeter Maydell {
356150013115SPeter Maydell     address_space_stw_internal(as, addr, val, attrs, result,
356250013115SPeter Maydell                                DEVICE_BIG_ENDIAN);
3563aab33094Sbellard }
3564aab33094Sbellard 
35655ce5944dSEdgar E. Iglesias void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35661e78bcc1SAlexander Graf {
356750013115SPeter Maydell     address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35681e78bcc1SAlexander Graf }
35691e78bcc1SAlexander Graf 
35705ce5944dSEdgar E. Iglesias void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35711e78bcc1SAlexander Graf {
357250013115SPeter Maydell     address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35731e78bcc1SAlexander Graf }
35741e78bcc1SAlexander Graf 
35755ce5944dSEdgar E. Iglesias void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
35761e78bcc1SAlexander Graf {
357750013115SPeter Maydell     address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
35781e78bcc1SAlexander Graf }
35791e78bcc1SAlexander Graf 
3580aab33094Sbellard /* XXX: optimize */
358150013115SPeter Maydell void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
358250013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
358350013115SPeter Maydell {
358450013115SPeter Maydell     MemTxResult r;
358550013115SPeter Maydell     val = tswap64(val);
358650013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
358750013115SPeter Maydell     if (result) {
358850013115SPeter Maydell         *result = r;
358950013115SPeter Maydell     }
359050013115SPeter Maydell }
359150013115SPeter Maydell 
359250013115SPeter Maydell void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
359350013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
359450013115SPeter Maydell {
359550013115SPeter Maydell     MemTxResult r;
359650013115SPeter Maydell     val = cpu_to_le64(val);
359750013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
359850013115SPeter Maydell     if (result) {
359950013115SPeter Maydell         *result = r;
360050013115SPeter Maydell     }
360150013115SPeter Maydell }
360250013115SPeter Maydell void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
360350013115SPeter Maydell                        MemTxAttrs attrs, MemTxResult *result)
360450013115SPeter Maydell {
360550013115SPeter Maydell     MemTxResult r;
360650013115SPeter Maydell     val = cpu_to_be64(val);
360750013115SPeter Maydell     r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
360850013115SPeter Maydell     if (result) {
360950013115SPeter Maydell         *result = r;
361050013115SPeter Maydell     }
361150013115SPeter Maydell }
361250013115SPeter Maydell 
3613f606604fSEdgar E. Iglesias void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3614aab33094Sbellard {
361550013115SPeter Maydell     address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3616aab33094Sbellard }
3617aab33094Sbellard 
3618f606604fSEdgar E. Iglesias void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
36191e78bcc1SAlexander Graf {
362050013115SPeter Maydell     address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
36211e78bcc1SAlexander Graf }
36221e78bcc1SAlexander Graf 
3623f606604fSEdgar E. Iglesias void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
36241e78bcc1SAlexander Graf {
362550013115SPeter Maydell     address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
36261e78bcc1SAlexander Graf }
36271e78bcc1SAlexander Graf 
36285e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
3629f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3630b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
363113eb76e0Sbellard {
363213eb76e0Sbellard     int l;
3633a8170e5eSAvi Kivity     hwaddr phys_addr;
36349b3c35e0Sj_mayer     target_ulong page;
363513eb76e0Sbellard 
363613eb76e0Sbellard     while (len > 0) {
36375232e4c7SPeter Maydell         int asidx;
36385232e4c7SPeter Maydell         MemTxAttrs attrs;
36395232e4c7SPeter Maydell 
364013eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
36415232e4c7SPeter Maydell         phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
36425232e4c7SPeter Maydell         asidx = cpu_asidx_from_attrs(cpu, attrs);
364313eb76e0Sbellard         /* if no physical page mapped, return an error */
364413eb76e0Sbellard         if (phys_addr == -1)
364513eb76e0Sbellard             return -1;
364613eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
364713eb76e0Sbellard         if (l > len)
364813eb76e0Sbellard             l = len;
36495e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
36502e38847bSEdgar E. Iglesias         if (is_write) {
36515232e4c7SPeter Maydell             cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
36525232e4c7SPeter Maydell                                           phys_addr, buf, l);
36532e38847bSEdgar E. Iglesias         } else {
36545232e4c7SPeter Maydell             address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
36555232e4c7SPeter Maydell                              MEMTXATTRS_UNSPECIFIED,
36565c9eb028SPeter Maydell                              buf, l, 0);
36572e38847bSEdgar E. Iglesias         }
365813eb76e0Sbellard         len -= l;
365913eb76e0Sbellard         buf += l;
366013eb76e0Sbellard         addr += l;
366113eb76e0Sbellard     }
366213eb76e0Sbellard     return 0;
366313eb76e0Sbellard }
3664038629a6SDr. David Alan Gilbert 
3665038629a6SDr. David Alan Gilbert /*
3666038629a6SDr. David Alan Gilbert  * Allows code that needs to deal with migration bitmaps etc to still be built
3667038629a6SDr. David Alan Gilbert  * target independent.
3668038629a6SDr. David Alan Gilbert  */
3669038629a6SDr. David Alan Gilbert size_t qemu_target_page_bits(void)
3670038629a6SDr. David Alan Gilbert {
3671038629a6SDr. David Alan Gilbert     return TARGET_PAGE_BITS;
3672038629a6SDr. David Alan Gilbert }
3673038629a6SDr. David Alan Gilbert 
3674a68fe89cSPaul Brook #endif
367513eb76e0Sbellard 
36768e4a424bSBlue Swirl /*
36778e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
36788e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
36798e4a424bSBlue Swirl  */
368098ed8ecfSGreg Kurz bool target_words_bigendian(void);
368198ed8ecfSGreg Kurz bool target_words_bigendian(void)
36828e4a424bSBlue Swirl {
36838e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
36848e4a424bSBlue Swirl     return true;
36858e4a424bSBlue Swirl #else
36868e4a424bSBlue Swirl     return false;
36878e4a424bSBlue Swirl #endif
36888e4a424bSBlue Swirl }
36898e4a424bSBlue Swirl 
369076f35538SWen Congyang #ifndef CONFIG_USER_ONLY
3691a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
369276f35538SWen Congyang {
36935c8a00ceSPaolo Bonzini     MemoryRegion*mr;
3694149f54b5SPaolo Bonzini     hwaddr l = 1;
369541063e1eSPaolo Bonzini     bool res;
369676f35538SWen Congyang 
369741063e1eSPaolo Bonzini     rcu_read_lock();
36985c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
3699149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
370076f35538SWen Congyang 
370141063e1eSPaolo Bonzini     res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
370241063e1eSPaolo Bonzini     rcu_read_unlock();
370341063e1eSPaolo Bonzini     return res;
370476f35538SWen Congyang }
3705bd2fa51fSMichael R. Hines 
3706e3807054SDr. David Alan Gilbert int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3707bd2fa51fSMichael R. Hines {
3708bd2fa51fSMichael R. Hines     RAMBlock *block;
3709e3807054SDr. David Alan Gilbert     int ret = 0;
3710bd2fa51fSMichael R. Hines 
37110dc3f44aSMike Day     rcu_read_lock();
37120dc3f44aSMike Day     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3713e3807054SDr. David Alan Gilbert         ret = func(block->idstr, block->host, block->offset,
3714e3807054SDr. David Alan Gilbert                    block->used_length, opaque);
3715e3807054SDr. David Alan Gilbert         if (ret) {
3716e3807054SDr. David Alan Gilbert             break;
3717e3807054SDr. David Alan Gilbert         }
3718bd2fa51fSMichael R. Hines     }
37190dc3f44aSMike Day     rcu_read_unlock();
3720e3807054SDr. David Alan Gilbert     return ret;
3721bd2fa51fSMichael R. Hines }
3722ec3f8c99SPeter Maydell #endif
3723