xref: /qemu/system/physmem.c (revision 24bec42f3d6eae035d5df48c057157f83b260e17)
154936004Sbellard /*
2d9f24bf5SPaolo Bonzini  * RAM allocation and memory access
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
961f3c91aSChetan Pant  * version 2.1 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1914a48c1dSMarkus Armbruster 
207b31bbc2SPeter Maydell #include "qemu/osdep.h"
21ec5f7ca8SMarc-André Lureau #include "exec/page-vary.h"
22da34e65cSMarkus Armbruster #include "qapi/error.h"
2354936004Sbellard 
24f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
25084cfca1SRichard Henderson #include "qemu/cacheflush.h"
26e2c1c34fSMarkus Armbruster #include "qemu/hbitmap.h"
27b85ea5faSPeter Maydell #include "qemu/madvise.h"
28d5e26819SPhilippe Mathieu-Daudé #include "qemu/lockable.h"
2978271684SClaudio Fontana 
3078271684SClaudio Fontana #ifdef CONFIG_TCG
3178271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h"
3278271684SClaudio Fontana #endif /* CONFIG_TCG */
3378271684SClaudio Fontana 
3463c91552SPaolo Bonzini #include "exec/exec-all.h"
3574781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h"
3651180423SJuan Quintela #include "exec/target_page.h"
37741da0d3SPaolo Bonzini #include "hw/qdev-core.h"
38c7e002c5SFam Zheng #include "hw/qdev-properties.h"
3947c8ca53SMarcel Apfelbaum #include "hw/boards.h"
405d5bb9c8SPhilippe Mathieu-Daudé #include "sysemu/xen.h"
419c17d615SPaolo Bonzini #include "sysemu/kvm.h"
4214a48c1dSMarkus Armbruster #include "sysemu/tcg.h"
43a028edeaSAlexander Bulekov #include "sysemu/qtest.h"
441de7afc9SPaolo Bonzini #include "qemu/timer.h"
451de7afc9SPaolo Bonzini #include "qemu/config-file.h"
4675a34036SAndreas Färber #include "qemu/error-report.h"
47b6b71cb5SMarkus Armbruster #include "qemu/qemu-print.h"
483ab6fdc9SPhilippe Mathieu-Daudé #include "qemu/log.h"
495df022cfSPeter Maydell #include "qemu/memalign.h"
50741da0d3SPaolo Bonzini #include "exec/memory.h"
51df43d49cSPaolo Bonzini #include "exec/ioport.h"
52741da0d3SPaolo Bonzini #include "sysemu/dma.h"
53b58c5c2dSMarkus Armbruster #include "sysemu/hostmem.h"
5479ca7a1bSChristian Borntraeger #include "sysemu/hw_accel.h"
559c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
56d44fe13bSAlex Bennée #include "trace.h"
57d3a5038cSDr. David Alan Gilbert 
58e2fa71f5SDr. David Alan Gilbert #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
59e2fa71f5SDr. David Alan Gilbert #include <linux/falloc.h>
60e2fa71f5SDr. David Alan Gilbert #endif
61e2fa71f5SDr. David Alan Gilbert 
620dc3f44aSMike Day #include "qemu/rcu_queue.h"
634840f10eSJan Kiszka #include "qemu/main-loop.h"
643b9bd3f4SPaolo Bonzini #include "exec/translate-all.h"
657615936eSPavel Dovgalyuk #include "sysemu/replay.h"
660cac1b66SBlue Swirl 
67022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
68220c3ebdSJuan Quintela #include "exec/ram_addr.h"
6967d95c15SAvi Kivity 
7061c490e2SBeata Michalska #include "qemu/pmem.h"
7161c490e2SBeata Michalska 
729dfeca7cSBharata B Rao #include "migration/vmstate.h"
739dfeca7cSBharata B Rao 
74b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
75794e8f30SMichael S. Tsirkin #ifndef _WIN32
76794e8f30SMichael S. Tsirkin #include "qemu/mmap-alloc.h"
77794e8f30SMichael S. Tsirkin #endif
78b35ba30fSMichael S. Tsirkin 
79be9b23c4SPeter Xu #include "monitor/monitor.h"
80be9b23c4SPeter Xu 
81ce317be9SJingqi Liu #ifdef CONFIG_LIBDAXCTL
82ce317be9SJingqi Liu #include <daxctl/libdaxctl.h>
83ce317be9SJingqi Liu #endif
84ce317be9SJingqi Liu 
85db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
861196be37Sths 
870dc3f44aSMike Day /* ram_list is read under rcu_read_lock()/rcu_read_unlock().  Writes
880dc3f44aSMike Day  * are protected by the ramlist lock.
890dc3f44aSMike Day  */
900d53d9feSMike Day RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
9162152b8aSAvi Kivity 
9262152b8aSAvi Kivity static MemoryRegion *system_memory;
93309cb471SAvi Kivity static MemoryRegion *system_io;
9462152b8aSAvi Kivity 
95f6790af6SAvi Kivity AddressSpace address_space_io;
96f6790af6SAvi Kivity AddressSpace address_space_memory;
972673a5daSAvi Kivity 
98acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
994346ae3eSAvi Kivity 
1001db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
1011db8abb1SPaolo Bonzini 
1021db8abb1SPaolo Bonzini struct PhysPageEntry {
1039736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
1048b795765SMichael S. Tsirkin     uint32_t skip : 6;
1059736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
1068b795765SMichael S. Tsirkin     uint32_t ptr : 26;
1071db8abb1SPaolo Bonzini };
1081db8abb1SPaolo Bonzini 
1098b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
1108b795765SMichael S. Tsirkin 
11103f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
11257271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
11303f49957SPaolo Bonzini 
114026736ceSMichael S. Tsirkin #define P_L2_BITS 9
11503f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
11603f49957SPaolo Bonzini 
11703f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
11803f49957SPaolo Bonzini 
11903f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1200475d94fSPaolo Bonzini 
12153cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
12279e2b9aeSPaolo Bonzini     struct rcu_head rcu;
12379e2b9aeSPaolo Bonzini 
12453cb28cbSMarcel Apfelbaum     unsigned sections_nb;
12553cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
12653cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
12753cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
12853cb28cbSMarcel Apfelbaum     Node *nodes;
12953cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
13053cb28cbSMarcel Apfelbaum } PhysPageMap;
13153cb28cbSMarcel Apfelbaum 
1321db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
133729633c2SFam Zheng     MemoryRegionSection *mru_section;
1341db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1351db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1361db8abb1SPaolo Bonzini      */
1371db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
13853cb28cbSMarcel Apfelbaum     PhysPageMap map;
1391db8abb1SPaolo Bonzini };
1401db8abb1SPaolo Bonzini 
14190260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
14290260c6cSJan Kiszka typedef struct subpage_t {
14390260c6cSJan Kiszka     MemoryRegion iomem;
14416620684SAlexey Kardashevskiy     FlatView *fv;
14590260c6cSJan Kiszka     hwaddr base;
1462615fabdSVijaya Kumar K     uint16_t sub_section[];
14790260c6cSJan Kiszka } subpage_t;
14890260c6cSJan Kiszka 
149b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
1505312bd8bSAvi Kivity 
151e2eef170Spbrook static void io_mem_init(void);
15262152b8aSAvi Kivity static void memory_map_init(void);
1539458a9a1SPaolo Bonzini static void tcg_log_global_after_sync(MemoryListener *listener);
15409daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener);
155e2eef170Spbrook 
15632857f4dSPeter Maydell /**
15732857f4dSPeter Maydell  * CPUAddressSpace: all the information a CPU needs about an AddressSpace
15832857f4dSPeter Maydell  * @cpu: the CPU whose AddressSpace this is
15932857f4dSPeter Maydell  * @as: the AddressSpace itself
16032857f4dSPeter Maydell  * @memory_dispatch: its dispatch pointer (cached, RCU protected)
16132857f4dSPeter Maydell  * @tcg_as_listener: listener for tracking changes to the AddressSpace
16232857f4dSPeter Maydell  */
16315d62536SPaolo Bonzini typedef struct CPUAddressSpace {
16432857f4dSPeter Maydell     CPUState *cpu;
16532857f4dSPeter Maydell     AddressSpace *as;
16632857f4dSPeter Maydell     struct AddressSpaceDispatch *memory_dispatch;
16732857f4dSPeter Maydell     MemoryListener tcg_as_listener;
16815d62536SPaolo Bonzini } CPUAddressSpace;
16932857f4dSPeter Maydell 
1708deaf12cSGerd Hoffmann struct DirtyBitmapSnapshot {
1718deaf12cSGerd Hoffmann     ram_addr_t start;
1728deaf12cSGerd Hoffmann     ram_addr_t end;
1738deaf12cSGerd Hoffmann     unsigned long dirty[];
1748deaf12cSGerd Hoffmann };
1758deaf12cSGerd Hoffmann 
17653cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
177f7bf5461SAvi Kivity {
178101420b8SPeter Lieven     static unsigned alloc_hint = 16;
17953cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
180c95cfd04SWei Yang         map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes);
18153cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
182101420b8SPeter Lieven         alloc_hint = map->nodes_nb_alloc;
183f7bf5461SAvi Kivity     }
184f7bf5461SAvi Kivity }
185f7bf5461SAvi Kivity 
186db94604bSPaolo Bonzini static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
187d6f2ea22SAvi Kivity {
188d6f2ea22SAvi Kivity     unsigned i;
1898b795765SMichael S. Tsirkin     uint32_t ret;
190db94604bSPaolo Bonzini     PhysPageEntry e;
191db94604bSPaolo Bonzini     PhysPageEntry *p;
192d6f2ea22SAvi Kivity 
19353cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
194db94604bSPaolo Bonzini     p = map->nodes[ret];
195d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
19653cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
197db94604bSPaolo Bonzini 
198db94604bSPaolo Bonzini     e.skip = leaf ? 0 : 1;
199db94604bSPaolo Bonzini     e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
20003f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
201db94604bSPaolo Bonzini         memcpy(&p[i], &e, sizeof(e));
202d6f2ea22SAvi Kivity     }
203f7bf5461SAvi Kivity     return ret;
204d6f2ea22SAvi Kivity }
205d6f2ea22SAvi Kivity 
20653cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
20756b15076SWei Yang                                 hwaddr *index, uint64_t *nb, uint16_t leaf,
2082999097bSAvi Kivity                                 int level)
20992e873b9Sbellard {
210f7bf5461SAvi Kivity     PhysPageEntry *p;
21103f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
2125cd2c5b6SRichard Henderson 
2139736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
214db94604bSPaolo Bonzini         lp->ptr = phys_map_node_alloc(map, level == 0);
215db94604bSPaolo Bonzini     }
21653cb28cbSMarcel Apfelbaum     p = map->nodes[lp->ptr];
21703f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
218f7bf5461SAvi Kivity 
21903f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
22007f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2219736e55bSMichael S. Tsirkin             lp->skip = 0;
222c19e8800SAvi Kivity             lp->ptr = leaf;
22307f07b31SAvi Kivity             *index += step;
22407f07b31SAvi Kivity             *nb -= step;
225f7bf5461SAvi Kivity         } else {
22653cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2272999097bSAvi Kivity         }
2282999097bSAvi Kivity         ++lp;
229f7bf5461SAvi Kivity     }
2304346ae3eSAvi Kivity }
2315cd2c5b6SRichard Henderson 
232ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
23356b15076SWei Yang                           hwaddr index, uint64_t nb,
2342999097bSAvi Kivity                           uint16_t leaf)
235f7bf5461SAvi Kivity {
2362999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
23753cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
238f7bf5461SAvi Kivity 
23953cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
24092e873b9Sbellard }
24192e873b9Sbellard 
242b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
243b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
244b35ba30fSMichael S. Tsirkin  */
245efee678dSMarc-André Lureau static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
246b35ba30fSMichael S. Tsirkin {
247b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
248b35ba30fSMichael S. Tsirkin     int valid = 0;
249b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
250b35ba30fSMichael S. Tsirkin     int i;
251b35ba30fSMichael S. Tsirkin 
252b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
253b35ba30fSMichael S. Tsirkin         return;
254b35ba30fSMichael S. Tsirkin     }
255b35ba30fSMichael S. Tsirkin 
256b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
257b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
258b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
259b35ba30fSMichael S. Tsirkin             continue;
260b35ba30fSMichael S. Tsirkin         }
261b35ba30fSMichael S. Tsirkin 
262b35ba30fSMichael S. Tsirkin         valid_ptr = i;
263b35ba30fSMichael S. Tsirkin         valid++;
264b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
265efee678dSMarc-André Lureau             phys_page_compact(&p[i], nodes);
266b35ba30fSMichael S. Tsirkin         }
267b35ba30fSMichael S. Tsirkin     }
268b35ba30fSMichael S. Tsirkin 
269b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
270b35ba30fSMichael S. Tsirkin     if (valid != 1) {
271b35ba30fSMichael S. Tsirkin         return;
272b35ba30fSMichael S. Tsirkin     }
273b35ba30fSMichael S. Tsirkin 
274b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
275b35ba30fSMichael S. Tsirkin 
276b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
277526ca236SWei Yang     if (P_L2_LEVELS >= (1 << 6) &&
278526ca236SWei Yang         lp->skip + p[valid_ptr].skip >= (1 << 6)) {
279b35ba30fSMichael S. Tsirkin         return;
280b35ba30fSMichael S. Tsirkin     }
281b35ba30fSMichael S. Tsirkin 
282b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
283b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
284b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
285b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
286b35ba30fSMichael S. Tsirkin          * should never reach here.
287b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
288b35ba30fSMichael S. Tsirkin          * change this rule.
289b35ba30fSMichael S. Tsirkin          */
290b35ba30fSMichael S. Tsirkin         lp->skip = 0;
291b35ba30fSMichael S. Tsirkin     } else {
292b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
293b35ba30fSMichael S. Tsirkin     }
294b35ba30fSMichael S. Tsirkin }
295b35ba30fSMichael S. Tsirkin 
2968629d3fcSAlexey Kardashevskiy void address_space_dispatch_compact(AddressSpaceDispatch *d)
297b35ba30fSMichael S. Tsirkin {
298b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
299efee678dSMarc-André Lureau         phys_page_compact(&d->phys_map, d->map.nodes);
300b35ba30fSMichael S. Tsirkin     }
301b35ba30fSMichael S. Tsirkin }
302b35ba30fSMichael S. Tsirkin 
30329cb533dSFam Zheng static inline bool section_covers_addr(const MemoryRegionSection *section,
30429cb533dSFam Zheng                                        hwaddr addr)
30529cb533dSFam Zheng {
30629cb533dSFam Zheng     /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
30729cb533dSFam Zheng      * the section must cover the entire address space.
30829cb533dSFam Zheng      */
309258dfaaaSRichard Henderson     return int128_gethi(section->size) ||
31029cb533dSFam Zheng            range_covers_byte(section->offset_within_address_space,
311258dfaaaSRichard Henderson                              int128_getlo(section->size), addr);
31229cb533dSFam Zheng }
31329cb533dSFam Zheng 
314003a0cf2SPeter Xu static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
31592e873b9Sbellard {
316003a0cf2SPeter Xu     PhysPageEntry lp = d->phys_map, *p;
317003a0cf2SPeter Xu     Node *nodes = d->map.nodes;
318003a0cf2SPeter Xu     MemoryRegionSection *sections = d->map.sections;
31997115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
32031ab2b4aSAvi Kivity     int i;
321f1f6e3b8SAvi Kivity 
3229736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
323c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
3249affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
325f1f6e3b8SAvi Kivity         }
3269affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
32703f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
32831ab2b4aSAvi Kivity     }
329b35ba30fSMichael S. Tsirkin 
33029cb533dSFam Zheng     if (section_covers_addr(&sections[lp.ptr], addr)) {
3319affd6fcSPaolo Bonzini         return &sections[lp.ptr];
332b35ba30fSMichael S. Tsirkin     } else {
333b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
334b35ba30fSMichael S. Tsirkin     }
335f3705d53SAvi Kivity }
336f3705d53SAvi Kivity 
33779e2b9aeSPaolo Bonzini /* Called from RCU critical section */
338c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
33990260c6cSJan Kiszka                                                         hwaddr addr,
34090260c6cSJan Kiszka                                                         bool resolve_subpage)
3419f029603SJan Kiszka {
342d73415a3SStefan Hajnoczi     MemoryRegionSection *section = qatomic_read(&d->mru_section);
34390260c6cSJan Kiszka     subpage_t *subpage;
34490260c6cSJan Kiszka 
34507c114bbSPaolo Bonzini     if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
34607c114bbSPaolo Bonzini         !section_covers_addr(section, addr)) {
347003a0cf2SPeter Xu         section = phys_page_find(d, addr);
348d73415a3SStefan Hajnoczi         qatomic_set(&d->mru_section, section);
349729633c2SFam Zheng     }
35090260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
35190260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
35253cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
35390260c6cSJan Kiszka     }
35490260c6cSJan Kiszka     return section;
3559f029603SJan Kiszka }
3569f029603SJan Kiszka 
35779e2b9aeSPaolo Bonzini /* Called from RCU critical section */
35890260c6cSJan Kiszka static MemoryRegionSection *
359c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
36090260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
361149f54b5SPaolo Bonzini {
362149f54b5SPaolo Bonzini     MemoryRegionSection *section;
363965eb2fcSPaolo Bonzini     MemoryRegion *mr;
364a87f3954SPaolo Bonzini     Int128 diff;
365149f54b5SPaolo Bonzini 
366c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
367149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
368149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
369149f54b5SPaolo Bonzini 
370149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
371149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
372149f54b5SPaolo Bonzini 
373965eb2fcSPaolo Bonzini     mr = section->mr;
374b242e0e0SPaolo Bonzini 
375b242e0e0SPaolo Bonzini     /* MMIO registers can be expected to perform full-width accesses based only
376b242e0e0SPaolo Bonzini      * on their address, without considering adjacent registers that could
377b242e0e0SPaolo Bonzini      * decode to completely different MemoryRegions.  When such registers
378b242e0e0SPaolo Bonzini      * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379b242e0e0SPaolo Bonzini      * regions overlap wildly.  For this reason we cannot clamp the accesses
380b242e0e0SPaolo Bonzini      * here.
381b242e0e0SPaolo Bonzini      *
382b242e0e0SPaolo Bonzini      * If the length is small (as is the case for address_space_ldl/stl),
383b242e0e0SPaolo Bonzini      * everything works fine.  If the incoming length is large, however,
384b242e0e0SPaolo Bonzini      * the caller really has to do the clamping through memory_access_size.
385b242e0e0SPaolo Bonzini      */
386965eb2fcSPaolo Bonzini     if (memory_region_is_ram(mr)) {
387e4a511f8SPaolo Bonzini         diff = int128_sub(section->size, int128_make64(addr));
3883752a036SPeter Maydell         *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
389965eb2fcSPaolo Bonzini     }
390149f54b5SPaolo Bonzini     return section;
391149f54b5SPaolo Bonzini }
39290260c6cSJan Kiszka 
393d5e5fafdSPeter Xu /**
394a411c84bSPaolo Bonzini  * address_space_translate_iommu - translate an address through an IOMMU
395a411c84bSPaolo Bonzini  * memory region and then through the target address space.
396a411c84bSPaolo Bonzini  *
397a411c84bSPaolo Bonzini  * @iommu_mr: the IOMMU memory region that we start the translation from
398a411c84bSPaolo Bonzini  * @addr: the address to be translated through the MMU
399a411c84bSPaolo Bonzini  * @xlat: the translated address offset within the destination memory region.
400a411c84bSPaolo Bonzini  *        It cannot be %NULL.
401a411c84bSPaolo Bonzini  * @plen_out: valid read/write length of the translated address. It
402a411c84bSPaolo Bonzini  *            cannot be %NULL.
403a411c84bSPaolo Bonzini  * @page_mask_out: page mask for the translated address. This
404a411c84bSPaolo Bonzini  *            should only be meaningful for IOMMU translated
405a411c84bSPaolo Bonzini  *            addresses, since there may be huge pages that this bit
406a411c84bSPaolo Bonzini  *            would tell. It can be %NULL if we don't care about it.
407a411c84bSPaolo Bonzini  * @is_write: whether the translation operation is for write
408a411c84bSPaolo Bonzini  * @is_mmio: whether this can be MMIO, set true if it can
409a411c84bSPaolo Bonzini  * @target_as: the address space targeted by the IOMMU
4102f7b009cSPeter Maydell  * @attrs: transaction attributes
411a411c84bSPaolo Bonzini  *
412a411c84bSPaolo Bonzini  * This function is called from RCU critical section.  It is the common
413a411c84bSPaolo Bonzini  * part of flatview_do_translate and address_space_translate_cached.
414a411c84bSPaolo Bonzini  */
415a411c84bSPaolo Bonzini static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr,
416a411c84bSPaolo Bonzini                                                          hwaddr *xlat,
417a411c84bSPaolo Bonzini                                                          hwaddr *plen_out,
418a411c84bSPaolo Bonzini                                                          hwaddr *page_mask_out,
419a411c84bSPaolo Bonzini                                                          bool is_write,
420a411c84bSPaolo Bonzini                                                          bool is_mmio,
4212f7b009cSPeter Maydell                                                          AddressSpace **target_as,
4222f7b009cSPeter Maydell                                                          MemTxAttrs attrs)
423a411c84bSPaolo Bonzini {
424a411c84bSPaolo Bonzini     MemoryRegionSection *section;
425a411c84bSPaolo Bonzini     hwaddr page_mask = (hwaddr)-1;
426a411c84bSPaolo Bonzini 
427a411c84bSPaolo Bonzini     do {
428a411c84bSPaolo Bonzini         hwaddr addr = *xlat;
429a411c84bSPaolo Bonzini         IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
4302c91bcf2SPeter Maydell         int iommu_idx = 0;
4312c91bcf2SPeter Maydell         IOMMUTLBEntry iotlb;
4322c91bcf2SPeter Maydell 
4332c91bcf2SPeter Maydell         if (imrc->attrs_to_index) {
4342c91bcf2SPeter Maydell             iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
4352c91bcf2SPeter Maydell         }
4362c91bcf2SPeter Maydell 
4372c91bcf2SPeter Maydell         iotlb = imrc->translate(iommu_mr, addr, is_write ?
4382c91bcf2SPeter Maydell                                 IOMMU_WO : IOMMU_RO, iommu_idx);
439a411c84bSPaolo Bonzini 
440a411c84bSPaolo Bonzini         if (!(iotlb.perm & (1 << is_write))) {
441a411c84bSPaolo Bonzini             goto unassigned;
442a411c84bSPaolo Bonzini         }
443a411c84bSPaolo Bonzini 
444a411c84bSPaolo Bonzini         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
445a411c84bSPaolo Bonzini                 | (addr & iotlb.addr_mask));
446a411c84bSPaolo Bonzini         page_mask &= iotlb.addr_mask;
447a411c84bSPaolo Bonzini         *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
448a411c84bSPaolo Bonzini         *target_as = iotlb.target_as;
449a411c84bSPaolo Bonzini 
450a411c84bSPaolo Bonzini         section = address_space_translate_internal(
451a411c84bSPaolo Bonzini                 address_space_to_dispatch(iotlb.target_as), addr, xlat,
452a411c84bSPaolo Bonzini                 plen_out, is_mmio);
453a411c84bSPaolo Bonzini 
454a411c84bSPaolo Bonzini         iommu_mr = memory_region_get_iommu(section->mr);
455a411c84bSPaolo Bonzini     } while (unlikely(iommu_mr));
456a411c84bSPaolo Bonzini 
457a411c84bSPaolo Bonzini     if (page_mask_out) {
458a411c84bSPaolo Bonzini         *page_mask_out = page_mask;
459a411c84bSPaolo Bonzini     }
460a411c84bSPaolo Bonzini     return *section;
461a411c84bSPaolo Bonzini 
462a411c84bSPaolo Bonzini unassigned:
463a411c84bSPaolo Bonzini     return (MemoryRegionSection) { .mr = &io_mem_unassigned };
464a411c84bSPaolo Bonzini }
465a411c84bSPaolo Bonzini 
466a411c84bSPaolo Bonzini /**
467d5e5fafdSPeter Xu  * flatview_do_translate - translate an address in FlatView
468d5e5fafdSPeter Xu  *
469d5e5fafdSPeter Xu  * @fv: the flat view that we want to translate on
470d5e5fafdSPeter Xu  * @addr: the address to be translated in above address space
471d5e5fafdSPeter Xu  * @xlat: the translated address offset within memory region. It
472d5e5fafdSPeter Xu  *        cannot be @NULL.
473d5e5fafdSPeter Xu  * @plen_out: valid read/write length of the translated address. It
474d5e5fafdSPeter Xu  *            can be @NULL when we don't care about it.
475d5e5fafdSPeter Xu  * @page_mask_out: page mask for the translated address. This
476d5e5fafdSPeter Xu  *            should only be meaningful for IOMMU translated
477d5e5fafdSPeter Xu  *            addresses, since there may be huge pages that this bit
478d5e5fafdSPeter Xu  *            would tell. It can be @NULL if we don't care about it.
479d5e5fafdSPeter Xu  * @is_write: whether the translation operation is for write
480d5e5fafdSPeter Xu  * @is_mmio: whether this can be MMIO, set true if it can
481ad2804d9SPaolo Bonzini  * @target_as: the address space targeted by the IOMMU
48249e14aa8SPeter Maydell  * @attrs: memory transaction attributes
483d5e5fafdSPeter Xu  *
484d5e5fafdSPeter Xu  * This function is called from RCU critical section
485d5e5fafdSPeter Xu  */
48616620684SAlexey Kardashevskiy static MemoryRegionSection flatview_do_translate(FlatView *fv,
487a764040cSPeter Xu                                                  hwaddr addr,
488a764040cSPeter Xu                                                  hwaddr *xlat,
489d5e5fafdSPeter Xu                                                  hwaddr *plen_out,
490d5e5fafdSPeter Xu                                                  hwaddr *page_mask_out,
491a764040cSPeter Xu                                                  bool is_write,
492e76bb18fSAlexey Kardashevskiy                                                  bool is_mmio,
49349e14aa8SPeter Maydell                                                  AddressSpace **target_as,
49449e14aa8SPeter Maydell                                                  MemTxAttrs attrs)
49590260c6cSJan Kiszka {
49630951157SAvi Kivity     MemoryRegionSection *section;
4973df9d748SAlexey Kardashevskiy     IOMMUMemoryRegion *iommu_mr;
498d5e5fafdSPeter Xu     hwaddr plen = (hwaddr)(-1);
499d5e5fafdSPeter Xu 
500ad2804d9SPaolo Bonzini     if (!plen_out) {
501ad2804d9SPaolo Bonzini         plen_out = &plen;
502d5e5fafdSPeter Xu     }
50330951157SAvi Kivity 
50416620684SAlexey Kardashevskiy     section = address_space_translate_internal(
505ad2804d9SPaolo Bonzini             flatview_to_dispatch(fv), addr, xlat,
506ad2804d9SPaolo Bonzini             plen_out, is_mmio);
50730951157SAvi Kivity 
5083df9d748SAlexey Kardashevskiy     iommu_mr = memory_region_get_iommu(section->mr);
509a411c84bSPaolo Bonzini     if (unlikely(iommu_mr)) {
510a411c84bSPaolo Bonzini         return address_space_translate_iommu(iommu_mr, xlat,
511a411c84bSPaolo Bonzini                                              plen_out, page_mask_out,
512a411c84bSPaolo Bonzini                                              is_write, is_mmio,
5132f7b009cSPeter Maydell                                              target_as, attrs);
51430951157SAvi Kivity     }
515ad2804d9SPaolo Bonzini     if (page_mask_out) {
516d5e5fafdSPeter Xu         /* Not behind an IOMMU, use default page size. */
517a411c84bSPaolo Bonzini         *page_mask_out = ~TARGET_PAGE_MASK;
518d5e5fafdSPeter Xu     }
519d5e5fafdSPeter Xu 
520a764040cSPeter Xu     return *section;
521a764040cSPeter Xu }
522a764040cSPeter Xu 
523a764040cSPeter Xu /* Called from RCU critical section */
524a764040cSPeter Xu IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
5257446eb07SPeter Maydell                                             bool is_write, MemTxAttrs attrs)
526a764040cSPeter Xu {
527a764040cSPeter Xu     MemoryRegionSection section;
528076a93d7SPeter Xu     hwaddr xlat, page_mask;
529a764040cSPeter Xu 
530076a93d7SPeter Xu     /*
531076a93d7SPeter Xu      * This can never be MMIO, and we don't really care about plen,
532076a93d7SPeter Xu      * but page mask.
533076a93d7SPeter Xu      */
534076a93d7SPeter Xu     section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
53549e14aa8SPeter Maydell                                     NULL, &page_mask, is_write, false, &as,
53649e14aa8SPeter Maydell                                     attrs);
537a764040cSPeter Xu 
538a764040cSPeter Xu     /* Illegal translation */
539a764040cSPeter Xu     if (section.mr == &io_mem_unassigned) {
540a764040cSPeter Xu         goto iotlb_fail;
541a764040cSPeter Xu     }
542a764040cSPeter Xu 
543a764040cSPeter Xu     /* Convert memory region offset into address space offset */
544a764040cSPeter Xu     xlat += section.offset_within_address_space -
545a764040cSPeter Xu         section.offset_within_region;
546a764040cSPeter Xu 
547a764040cSPeter Xu     return (IOMMUTLBEntry) {
548e76bb18fSAlexey Kardashevskiy         .target_as = as,
549076a93d7SPeter Xu         .iova = addr & ~page_mask,
550076a93d7SPeter Xu         .translated_addr = xlat & ~page_mask,
551076a93d7SPeter Xu         .addr_mask = page_mask,
552a764040cSPeter Xu         /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
553a764040cSPeter Xu         .perm = IOMMU_RW,
554a764040cSPeter Xu     };
555a764040cSPeter Xu 
556a764040cSPeter Xu iotlb_fail:
557a764040cSPeter Xu     return (IOMMUTLBEntry) {0};
558a764040cSPeter Xu }
559a764040cSPeter Xu 
560a764040cSPeter Xu /* Called from RCU critical section */
56116620684SAlexey Kardashevskiy MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
562efa99a2fSPeter Maydell                                  hwaddr *plen, bool is_write,
563efa99a2fSPeter Maydell                                  MemTxAttrs attrs)
564a764040cSPeter Xu {
565a764040cSPeter Xu     MemoryRegion *mr;
566a764040cSPeter Xu     MemoryRegionSection section;
56716620684SAlexey Kardashevskiy     AddressSpace *as = NULL;
568a764040cSPeter Xu 
569a764040cSPeter Xu     /* This can be MMIO, so setup MMIO bit. */
570d5e5fafdSPeter Xu     section = flatview_do_translate(fv, addr, xlat, plen, NULL,
57149e14aa8SPeter Maydell                                     is_write, true, &as, attrs);
572a764040cSPeter Xu     mr = section.mr;
573a764040cSPeter Xu 
574fe680d0dSAlexey Kardashevskiy     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
575a87f3954SPaolo Bonzini         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
57623820dbfSPeter Crosthwaite         *plen = MIN(page, *plen);
577a87f3954SPaolo Bonzini     }
578a87f3954SPaolo Bonzini 
57930951157SAvi Kivity     return mr;
58090260c6cSJan Kiszka }
58190260c6cSJan Kiszka 
5821f871c5eSPeter Maydell typedef struct TCGIOMMUNotifier {
5831f871c5eSPeter Maydell     IOMMUNotifier n;
5841f871c5eSPeter Maydell     MemoryRegion *mr;
5851f871c5eSPeter Maydell     CPUState *cpu;
5861f871c5eSPeter Maydell     int iommu_idx;
5871f871c5eSPeter Maydell     bool active;
5881f871c5eSPeter Maydell } TCGIOMMUNotifier;
5891f871c5eSPeter Maydell 
5901f871c5eSPeter Maydell static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
5911f871c5eSPeter Maydell {
5921f871c5eSPeter Maydell     TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n);
5931f871c5eSPeter Maydell 
5941f871c5eSPeter Maydell     if (!notifier->active) {
5951f871c5eSPeter Maydell         return;
5961f871c5eSPeter Maydell     }
5971f871c5eSPeter Maydell     tlb_flush(notifier->cpu);
5981f871c5eSPeter Maydell     notifier->active = false;
5991f871c5eSPeter Maydell     /* We leave the notifier struct on the list to avoid reallocating it later.
6001f871c5eSPeter Maydell      * Generally the number of IOMMUs a CPU deals with will be small.
6011f871c5eSPeter Maydell      * In any case we can't unregister the iommu notifier from a notify
6021f871c5eSPeter Maydell      * callback.
6031f871c5eSPeter Maydell      */
6041f871c5eSPeter Maydell }
6051f871c5eSPeter Maydell 
6061f871c5eSPeter Maydell static void tcg_register_iommu_notifier(CPUState *cpu,
6071f871c5eSPeter Maydell                                         IOMMUMemoryRegion *iommu_mr,
6081f871c5eSPeter Maydell                                         int iommu_idx)
6091f871c5eSPeter Maydell {
6101f871c5eSPeter Maydell     /* Make sure this CPU has an IOMMU notifier registered for this
6111f871c5eSPeter Maydell      * IOMMU/IOMMU index combination, so that we can flush its TLB
6121f871c5eSPeter Maydell      * when the IOMMU tells us the mappings we've cached have changed.
6131f871c5eSPeter Maydell      */
6141f871c5eSPeter Maydell     MemoryRegion *mr = MEMORY_REGION(iommu_mr);
615bbf90191SPhilippe Mathieu-Daudé     TCGIOMMUNotifier *notifier = NULL;
616805d4496SMarkus Armbruster     int i;
6171f871c5eSPeter Maydell 
6181f871c5eSPeter Maydell     for (i = 0; i < cpu->iommu_notifiers->len; i++) {
6195601be3bSPeter Maydell         notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
6201f871c5eSPeter Maydell         if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) {
6211f871c5eSPeter Maydell             break;
6221f871c5eSPeter Maydell         }
6231f871c5eSPeter Maydell     }
6241f871c5eSPeter Maydell     if (i == cpu->iommu_notifiers->len) {
6251f871c5eSPeter Maydell         /* Not found, add a new entry at the end of the array */
6261f871c5eSPeter Maydell         cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1);
6275601be3bSPeter Maydell         notifier = g_new0(TCGIOMMUNotifier, 1);
6285601be3bSPeter Maydell         g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier;
6291f871c5eSPeter Maydell 
6301f871c5eSPeter Maydell         notifier->mr = mr;
6311f871c5eSPeter Maydell         notifier->iommu_idx = iommu_idx;
6321f871c5eSPeter Maydell         notifier->cpu = cpu;
6331f871c5eSPeter Maydell         /* Rather than trying to register interest in the specific part
6341f871c5eSPeter Maydell          * of the iommu's address space that we've accessed and then
6351f871c5eSPeter Maydell          * expand it later as subsequent accesses touch more of it, we
6361f871c5eSPeter Maydell          * just register interest in the whole thing, on the assumption
6371f871c5eSPeter Maydell          * that iommu reconfiguration will be rare.
6381f871c5eSPeter Maydell          */
6391f871c5eSPeter Maydell         iommu_notifier_init(&notifier->n,
6401f871c5eSPeter Maydell                             tcg_iommu_unmap_notify,
6411f871c5eSPeter Maydell                             IOMMU_NOTIFIER_UNMAP,
6421f871c5eSPeter Maydell                             0,
6431f871c5eSPeter Maydell                             HWADDR_MAX,
6441f871c5eSPeter Maydell                             iommu_idx);
645805d4496SMarkus Armbruster         memory_region_register_iommu_notifier(notifier->mr, &notifier->n,
646805d4496SMarkus Armbruster                                               &error_fatal);
6471f871c5eSPeter Maydell     }
6481f871c5eSPeter Maydell 
6491f871c5eSPeter Maydell     if (!notifier->active) {
6501f871c5eSPeter Maydell         notifier->active = true;
6511f871c5eSPeter Maydell     }
6521f871c5eSPeter Maydell }
6531f871c5eSPeter Maydell 
654d9f24bf5SPaolo Bonzini void tcg_iommu_free_notifier_list(CPUState *cpu)
6551f871c5eSPeter Maydell {
6561f871c5eSPeter Maydell     /* Destroy the CPU's notifier list */
6571f871c5eSPeter Maydell     int i;
6581f871c5eSPeter Maydell     TCGIOMMUNotifier *notifier;
6591f871c5eSPeter Maydell 
6601f871c5eSPeter Maydell     for (i = 0; i < cpu->iommu_notifiers->len; i++) {
6615601be3bSPeter Maydell         notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
6621f871c5eSPeter Maydell         memory_region_unregister_iommu_notifier(notifier->mr, &notifier->n);
6635601be3bSPeter Maydell         g_free(notifier);
6641f871c5eSPeter Maydell     }
6651f871c5eSPeter Maydell     g_array_free(cpu->iommu_notifiers, true);
6661f871c5eSPeter Maydell }
6671f871c5eSPeter Maydell 
668d9f24bf5SPaolo Bonzini void tcg_iommu_init_notifier_list(CPUState *cpu)
669d9f24bf5SPaolo Bonzini {
670d9f24bf5SPaolo Bonzini     cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *));
671d9f24bf5SPaolo Bonzini }
672d9f24bf5SPaolo Bonzini 
67379e2b9aeSPaolo Bonzini /* Called from RCU critical section */
67490260c6cSJan Kiszka MemoryRegionSection *
675418ade78SRichard Henderson address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
6761f871c5eSPeter Maydell                                   hwaddr *xlat, hwaddr *plen,
6771f871c5eSPeter Maydell                                   MemTxAttrs attrs, int *prot)
67890260c6cSJan Kiszka {
67930951157SAvi Kivity     MemoryRegionSection *section;
6801f871c5eSPeter Maydell     IOMMUMemoryRegion *iommu_mr;
6811f871c5eSPeter Maydell     IOMMUMemoryRegionClass *imrc;
6821f871c5eSPeter Maydell     IOMMUTLBEntry iotlb;
6831f871c5eSPeter Maydell     int iommu_idx;
684418ade78SRichard Henderson     hwaddr addr = orig_addr;
6850d58c660SRichard Henderson     AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
686d7898cdaSPeter Maydell 
6871f871c5eSPeter Maydell     for (;;) {
6881f871c5eSPeter Maydell         section = address_space_translate_internal(d, addr, &addr, plen, false);
6891f871c5eSPeter Maydell 
6901f871c5eSPeter Maydell         iommu_mr = memory_region_get_iommu(section->mr);
6911f871c5eSPeter Maydell         if (!iommu_mr) {
6921f871c5eSPeter Maydell             break;
6931f871c5eSPeter Maydell         }
6941f871c5eSPeter Maydell 
6951f871c5eSPeter Maydell         imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
6961f871c5eSPeter Maydell 
6971f871c5eSPeter Maydell         iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
6981f871c5eSPeter Maydell         tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
6991f871c5eSPeter Maydell         /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
7001f871c5eSPeter Maydell          * doesn't short-cut its translation table walk.
7011f871c5eSPeter Maydell          */
7021f871c5eSPeter Maydell         iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
7031f871c5eSPeter Maydell         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
7041f871c5eSPeter Maydell                 | (addr & iotlb.addr_mask));
7051f871c5eSPeter Maydell         /* Update the caller's prot bits to remove permissions the IOMMU
7061f871c5eSPeter Maydell          * is giving us a failure response for. If we get down to no
7071f871c5eSPeter Maydell          * permissions left at all we can give up now.
7081f871c5eSPeter Maydell          */
7091f871c5eSPeter Maydell         if (!(iotlb.perm & IOMMU_RO)) {
7101f871c5eSPeter Maydell             *prot &= ~(PAGE_READ | PAGE_EXEC);
7111f871c5eSPeter Maydell         }
7121f871c5eSPeter Maydell         if (!(iotlb.perm & IOMMU_WO)) {
7131f871c5eSPeter Maydell             *prot &= ~PAGE_WRITE;
7141f871c5eSPeter Maydell         }
7151f871c5eSPeter Maydell 
7161f871c5eSPeter Maydell         if (!*prot) {
7171f871c5eSPeter Maydell             goto translate_fail;
7181f871c5eSPeter Maydell         }
7191f871c5eSPeter Maydell 
7201f871c5eSPeter Maydell         d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
7211f871c5eSPeter Maydell     }
72230951157SAvi Kivity 
7233df9d748SAlexey Kardashevskiy     assert(!memory_region_is_iommu(section->mr));
7241f871c5eSPeter Maydell     *xlat = addr;
72530951157SAvi Kivity     return section;
7261f871c5eSPeter Maydell 
7271f871c5eSPeter Maydell translate_fail:
728418ade78SRichard Henderson     /*
729418ade78SRichard Henderson      * We should be given a page-aligned address -- certainly
730418ade78SRichard Henderson      * tlb_set_page_with_attrs() does so.  The page offset of xlat
731418ade78SRichard Henderson      * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0.
732418ade78SRichard Henderson      * The page portion of xlat will be logged by memory_region_access_valid()
733418ade78SRichard Henderson      * when this memory access is rejected, so use the original untranslated
734418ade78SRichard Henderson      * physical address.
735418ade78SRichard Henderson      */
736418ade78SRichard Henderson     assert((orig_addr & ~TARGET_PAGE_MASK) == 0);
737418ade78SRichard Henderson     *xlat = orig_addr;
7381f871c5eSPeter Maydell     return &d->map.sections[PHYS_SECTION_UNASSIGNED];
73990260c6cSJan Kiszka }
7401a1562f5SAndreas Färber 
74180ceb07aSPeter Xu void cpu_address_space_init(CPUState *cpu, int asidx,
74280ceb07aSPeter Xu                             const char *prefix, MemoryRegion *mr)
74309daed84SEdgar E. Iglesias {
74412ebc9a7SPeter Maydell     CPUAddressSpace *newas;
74580ceb07aSPeter Xu     AddressSpace *as = g_new0(AddressSpace, 1);
74687a621d8SPeter Xu     char *as_name;
74780ceb07aSPeter Xu 
74880ceb07aSPeter Xu     assert(mr);
74987a621d8SPeter Xu     as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index);
75087a621d8SPeter Xu     address_space_init(as, mr, as_name);
75187a621d8SPeter Xu     g_free(as_name);
75212ebc9a7SPeter Maydell 
75312ebc9a7SPeter Maydell     /* Target code should have set num_ases before calling us */
75412ebc9a7SPeter Maydell     assert(asidx < cpu->num_ases);
75512ebc9a7SPeter Maydell 
75656943e8cSPeter Maydell     if (asidx == 0) {
75756943e8cSPeter Maydell         /* address space 0 gets the convenience alias */
75856943e8cSPeter Maydell         cpu->as = as;
75956943e8cSPeter Maydell     }
76056943e8cSPeter Maydell 
76112ebc9a7SPeter Maydell     /* KVM cannot currently support multiple address spaces. */
76212ebc9a7SPeter Maydell     assert(asidx == 0 || !kvm_enabled());
76309daed84SEdgar E. Iglesias 
76412ebc9a7SPeter Maydell     if (!cpu->cpu_ases) {
76512ebc9a7SPeter Maydell         cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
766*24bec42fSSalil Mehta         cpu->cpu_ases_count = cpu->num_ases;
76709daed84SEdgar E. Iglesias     }
76832857f4dSPeter Maydell 
76912ebc9a7SPeter Maydell     newas = &cpu->cpu_ases[asidx];
77012ebc9a7SPeter Maydell     newas->cpu = cpu;
77112ebc9a7SPeter Maydell     newas->as = as;
77256943e8cSPeter Maydell     if (tcg_enabled()) {
7739458a9a1SPaolo Bonzini         newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync;
77412ebc9a7SPeter Maydell         newas->tcg_as_listener.commit = tcg_commit;
775142518bdSPeter Xu         newas->tcg_as_listener.name = "tcg";
77612ebc9a7SPeter Maydell         memory_listener_register(&newas->tcg_as_listener, as);
77709daed84SEdgar E. Iglesias     }
77856943e8cSPeter Maydell }
779651a5bc0SPeter Maydell 
780*24bec42fSSalil Mehta void cpu_address_space_destroy(CPUState *cpu, int asidx)
781*24bec42fSSalil Mehta {
782*24bec42fSSalil Mehta     CPUAddressSpace *cpuas;
783*24bec42fSSalil Mehta 
784*24bec42fSSalil Mehta     assert(cpu->cpu_ases);
785*24bec42fSSalil Mehta     assert(asidx >= 0 && asidx < cpu->num_ases);
786*24bec42fSSalil Mehta     /* KVM cannot currently support multiple address spaces. */
787*24bec42fSSalil Mehta     assert(asidx == 0 || !kvm_enabled());
788*24bec42fSSalil Mehta 
789*24bec42fSSalil Mehta     cpuas = &cpu->cpu_ases[asidx];
790*24bec42fSSalil Mehta     if (tcg_enabled()) {
791*24bec42fSSalil Mehta         memory_listener_unregister(&cpuas->tcg_as_listener);
792*24bec42fSSalil Mehta     }
793*24bec42fSSalil Mehta 
794*24bec42fSSalil Mehta     address_space_destroy(cpuas->as);
795*24bec42fSSalil Mehta     g_free_rcu(cpuas->as, rcu);
796*24bec42fSSalil Mehta 
797*24bec42fSSalil Mehta     if (asidx == 0) {
798*24bec42fSSalil Mehta         /* reset the convenience alias for address space 0 */
799*24bec42fSSalil Mehta         cpu->as = NULL;
800*24bec42fSSalil Mehta     }
801*24bec42fSSalil Mehta 
802*24bec42fSSalil Mehta     if (--cpu->cpu_ases_count == 0) {
803*24bec42fSSalil Mehta         g_free(cpu->cpu_ases);
804*24bec42fSSalil Mehta         cpu->cpu_ases = NULL;
805*24bec42fSSalil Mehta     }
806*24bec42fSSalil Mehta }
807*24bec42fSSalil Mehta 
808651a5bc0SPeter Maydell AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
809651a5bc0SPeter Maydell {
810651a5bc0SPeter Maydell     /* Return the AddressSpace corresponding to the specified index */
811651a5bc0SPeter Maydell     return cpu->cpu_ases[asidx].as;
812651a5bc0SPeter Maydell }
81309daed84SEdgar E. Iglesias 
8140dc3f44aSMike Day /* Called from RCU critical section */
815041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
816041603feSPaolo Bonzini {
817041603feSPaolo Bonzini     RAMBlock *block;
818041603feSPaolo Bonzini 
819d73415a3SStefan Hajnoczi     block = qatomic_rcu_read(&ram_list.mru_block);
8209b8424d5SMichael S. Tsirkin     if (block && addr - block->offset < block->max_length) {
82168851b98SPaolo Bonzini         return block;
822041603feSPaolo Bonzini     }
82399e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
8249b8424d5SMichael S. Tsirkin         if (addr - block->offset < block->max_length) {
825041603feSPaolo Bonzini             goto found;
826041603feSPaolo Bonzini         }
827041603feSPaolo Bonzini     }
828041603feSPaolo Bonzini 
829041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
830041603feSPaolo Bonzini     abort();
831041603feSPaolo Bonzini 
832041603feSPaolo Bonzini found:
833a4a411fbSStefan Hajnoczi     /* It is safe to write mru_block outside the BQL.  This
83443771539SPaolo Bonzini      * is what happens:
83543771539SPaolo Bonzini      *
83643771539SPaolo Bonzini      *     mru_block = xxx
83743771539SPaolo Bonzini      *     rcu_read_unlock()
83843771539SPaolo Bonzini      *                                        xxx removed from list
83943771539SPaolo Bonzini      *                  rcu_read_lock()
84043771539SPaolo Bonzini      *                  read mru_block
84143771539SPaolo Bonzini      *                                        mru_block = NULL;
84243771539SPaolo Bonzini      *                                        call_rcu(reclaim_ramblock, xxx);
84343771539SPaolo Bonzini      *                  rcu_read_unlock()
84443771539SPaolo Bonzini      *
845d73415a3SStefan Hajnoczi      * qatomic_rcu_set is not needed here.  The block was already published
84643771539SPaolo Bonzini      * when it was placed into the list.  Here we're just making an extra
84743771539SPaolo Bonzini      * copy of the pointer.
84843771539SPaolo Bonzini      */
849041603feSPaolo Bonzini     ram_list.mru_block = block;
850041603feSPaolo Bonzini     return block;
851041603feSPaolo Bonzini }
852041603feSPaolo Bonzini 
8537e8ccf99SPhilippe Mathieu-Daudé void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
8541ccde1cbSbellard {
8559a13565dSPeter Crosthwaite     CPUState *cpu;
856041603feSPaolo Bonzini     ram_addr_t start1;
857a2f4d5beSJuan Quintela     RAMBlock *block;
858a2f4d5beSJuan Quintela     ram_addr_t end;
859a2f4d5beSJuan Quintela 
860f28d0dfdSEmilio G. Cota     assert(tcg_enabled());
861a2f4d5beSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length);
862a2f4d5beSJuan Quintela     start &= TARGET_PAGE_MASK;
863f23db169Sbellard 
864694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
865041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
866041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
8671240be24SMichael S. Tsirkin     start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
8689a13565dSPeter Crosthwaite     CPU_FOREACH(cpu) {
8699a13565dSPeter Crosthwaite         tlb_reset_dirty(cpu, start1, length);
8709a13565dSPeter Crosthwaite     }
871d24981d3SJuan Quintela }
872d24981d3SJuan Quintela 
873d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
87403eebc9eSStefan Hajnoczi bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
87503eebc9eSStefan Hajnoczi                                               ram_addr_t length,
87652159192SJuan Quintela                                               unsigned client)
877d24981d3SJuan Quintela {
8785b82b703SStefan Hajnoczi     DirtyMemoryBlocks *blocks;
87925aa6b37SMatt Borgerson     unsigned long end, page, start_page;
8805b82b703SStefan Hajnoczi     bool dirty = false;
881077874e0SPeter Xu     RAMBlock *ramblock;
882077874e0SPeter Xu     uint64_t mr_offset, mr_size;
883d24981d3SJuan Quintela 
88403eebc9eSStefan Hajnoczi     if (length == 0) {
88503eebc9eSStefan Hajnoczi         return false;
88603eebc9eSStefan Hajnoczi     }
88703eebc9eSStefan Hajnoczi 
88803eebc9eSStefan Hajnoczi     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
88925aa6b37SMatt Borgerson     start_page = start >> TARGET_PAGE_BITS;
89025aa6b37SMatt Borgerson     page = start_page;
8915b82b703SStefan Hajnoczi 
892694ea274SDr. David Alan Gilbert     WITH_RCU_READ_LOCK_GUARD() {
893d73415a3SStefan Hajnoczi         blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
894077874e0SPeter Xu         ramblock = qemu_get_ram_block(start);
895077874e0SPeter Xu         /* Range sanity check on the ramblock */
896077874e0SPeter Xu         assert(start >= ramblock->offset &&
897077874e0SPeter Xu                start + length <= ramblock->offset + ramblock->used_length);
8985b82b703SStefan Hajnoczi 
8995b82b703SStefan Hajnoczi         while (page < end) {
9005b82b703SStefan Hajnoczi             unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
9015b82b703SStefan Hajnoczi             unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
902694ea274SDr. David Alan Gilbert             unsigned long num = MIN(end - page,
903694ea274SDr. David Alan Gilbert                                     DIRTY_MEMORY_BLOCK_SIZE - offset);
9045b82b703SStefan Hajnoczi 
9055b82b703SStefan Hajnoczi             dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
9065b82b703SStefan Hajnoczi                                                   offset, num);
9075b82b703SStefan Hajnoczi             page += num;
9085b82b703SStefan Hajnoczi         }
9095b82b703SStefan Hajnoczi 
91025aa6b37SMatt Borgerson         mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset;
91125aa6b37SMatt Borgerson         mr_size = (end - start_page) << TARGET_PAGE_BITS;
912077874e0SPeter Xu         memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
913694ea274SDr. David Alan Gilbert     }
91403eebc9eSStefan Hajnoczi 
91586a9ae80SNicholas Piggin     if (dirty) {
91686a9ae80SNicholas Piggin         cpu_physical_memory_dirty_bits_cleared(start, length);
917d24981d3SJuan Quintela     }
91803eebc9eSStefan Hajnoczi 
91903eebc9eSStefan Hajnoczi     return dirty;
9201ccde1cbSbellard }
9211ccde1cbSbellard 
9228deaf12cSGerd Hoffmann DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
9235dea4079SPeter Xu     (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
9248deaf12cSGerd Hoffmann {
9258deaf12cSGerd Hoffmann     DirtyMemoryBlocks *blocks;
9265dea4079SPeter Xu     ram_addr_t start = memory_region_get_ram_addr(mr) + offset;
9278deaf12cSGerd Hoffmann     unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
9288deaf12cSGerd Hoffmann     ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
9298deaf12cSGerd Hoffmann     ram_addr_t last  = QEMU_ALIGN_UP(start + length, align);
9308deaf12cSGerd Hoffmann     DirtyBitmapSnapshot *snap;
9318deaf12cSGerd Hoffmann     unsigned long page, end, dest;
9328deaf12cSGerd Hoffmann 
9338deaf12cSGerd Hoffmann     snap = g_malloc0(sizeof(*snap) +
9348deaf12cSGerd Hoffmann                      ((last - first) >> (TARGET_PAGE_BITS + 3)));
9358deaf12cSGerd Hoffmann     snap->start = first;
9368deaf12cSGerd Hoffmann     snap->end   = last;
9378deaf12cSGerd Hoffmann 
9388deaf12cSGerd Hoffmann     page = first >> TARGET_PAGE_BITS;
9398deaf12cSGerd Hoffmann     end  = last  >> TARGET_PAGE_BITS;
9408deaf12cSGerd Hoffmann     dest = 0;
9418deaf12cSGerd Hoffmann 
942694ea274SDr. David Alan Gilbert     WITH_RCU_READ_LOCK_GUARD() {
943d73415a3SStefan Hajnoczi         blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
9448deaf12cSGerd Hoffmann 
9458deaf12cSGerd Hoffmann         while (page < end) {
9468deaf12cSGerd Hoffmann             unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
9476ba9b60aSPhilippe Mathieu-Daudé             unsigned long ofs = page % DIRTY_MEMORY_BLOCK_SIZE;
948694ea274SDr. David Alan Gilbert             unsigned long num = MIN(end - page,
9496ba9b60aSPhilippe Mathieu-Daudé                                     DIRTY_MEMORY_BLOCK_SIZE - ofs);
9508deaf12cSGerd Hoffmann 
9516ba9b60aSPhilippe Mathieu-Daudé             assert(QEMU_IS_ALIGNED(ofs, (1 << BITS_PER_LEVEL)));
9528deaf12cSGerd Hoffmann             assert(QEMU_IS_ALIGNED(num,    (1 << BITS_PER_LEVEL)));
9536ba9b60aSPhilippe Mathieu-Daudé             ofs >>= BITS_PER_LEVEL;
9548deaf12cSGerd Hoffmann 
9558deaf12cSGerd Hoffmann             bitmap_copy_and_clear_atomic(snap->dirty + dest,
9566ba9b60aSPhilippe Mathieu-Daudé                                          blocks->blocks[idx] + ofs,
9578deaf12cSGerd Hoffmann                                          num);
9588deaf12cSGerd Hoffmann             page += num;
9598deaf12cSGerd Hoffmann             dest += num >> BITS_PER_LEVEL;
9608deaf12cSGerd Hoffmann         }
961694ea274SDr. David Alan Gilbert     }
9628deaf12cSGerd Hoffmann 
96386a9ae80SNicholas Piggin     cpu_physical_memory_dirty_bits_cleared(start, length);
9648deaf12cSGerd Hoffmann 
965077874e0SPeter Xu     memory_region_clear_dirty_bitmap(mr, offset, length);
966077874e0SPeter Xu 
9678deaf12cSGerd Hoffmann     return snap;
9688deaf12cSGerd Hoffmann }
9698deaf12cSGerd Hoffmann 
9708deaf12cSGerd Hoffmann bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
9718deaf12cSGerd Hoffmann                                             ram_addr_t start,
9728deaf12cSGerd Hoffmann                                             ram_addr_t length)
9738deaf12cSGerd Hoffmann {
9748deaf12cSGerd Hoffmann     unsigned long page, end;
9758deaf12cSGerd Hoffmann 
9768deaf12cSGerd Hoffmann     assert(start >= snap->start);
9778deaf12cSGerd Hoffmann     assert(start + length <= snap->end);
9788deaf12cSGerd Hoffmann 
9798deaf12cSGerd Hoffmann     end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS;
9808deaf12cSGerd Hoffmann     page = (start - snap->start) >> TARGET_PAGE_BITS;
9818deaf12cSGerd Hoffmann 
9828deaf12cSGerd Hoffmann     while (page < end) {
9838deaf12cSGerd Hoffmann         if (test_bit(page, snap->dirty)) {
9848deaf12cSGerd Hoffmann             return true;
9858deaf12cSGerd Hoffmann         }
9868deaf12cSGerd Hoffmann         page++;
9878deaf12cSGerd Hoffmann     }
9888deaf12cSGerd Hoffmann     return false;
9898deaf12cSGerd Hoffmann }
9908deaf12cSGerd Hoffmann 
99179e2b9aeSPaolo Bonzini /* Called from RCU critical section */
992bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu,
9938f5db641SRichard Henderson                                        MemoryRegionSection *section)
994e5548617SBlue Swirl {
9958f5db641SRichard Henderson     AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
9968f5db641SRichard Henderson     return section - d->map.sections;
997e5548617SBlue Swirl }
9988da3ff18Spbrook 
999c227f099SAnthony Liguori static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
10005312bd8bSAvi Kivity                             uint16_t section);
100116620684SAlexey Kardashevskiy static subpage_t *subpage_init(FlatView *fv, hwaddr base);
100254688b1eSAvi Kivity 
100353cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
100453cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
10055312bd8bSAvi Kivity {
100668f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
100768f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
100868f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
100968f3f65bSPaolo Bonzini      */
101053cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
101168f3f65bSPaolo Bonzini 
101253cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
101353cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
101453cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
101553cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
10165312bd8bSAvi Kivity     }
101753cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
1018dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
101953cb28cbSMarcel Apfelbaum     return map->sections_nb++;
10205312bd8bSAvi Kivity }
10215312bd8bSAvi Kivity 
1022058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
1023058bc4b5SPaolo Bonzini {
102455b4e80bSDon Slutz     bool have_sub_page = mr->subpage;
102555b4e80bSDon Slutz 
1026dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
1027dfde4e6eSPaolo Bonzini 
102855b4e80bSDon Slutz     if (have_sub_page) {
1029058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
1030b4fefef9SPeter Crosthwaite         object_unref(OBJECT(&subpage->iomem));
1031058bc4b5SPaolo Bonzini         g_free(subpage);
1032058bc4b5SPaolo Bonzini     }
1033058bc4b5SPaolo Bonzini }
1034058bc4b5SPaolo Bonzini 
10356092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
10365312bd8bSAvi Kivity {
10379affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
10389affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
1039058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
1040058bc4b5SPaolo Bonzini     }
10419affd6fcSPaolo Bonzini     g_free(map->sections);
10429affd6fcSPaolo Bonzini     g_free(map->nodes);
10435312bd8bSAvi Kivity }
10445312bd8bSAvi Kivity 
10459950322aSAlexey Kardashevskiy static void register_subpage(FlatView *fv, MemoryRegionSection *section)
10460f0cb164SAvi Kivity {
10479950322aSAlexey Kardashevskiy     AddressSpaceDispatch *d = flatview_to_dispatch(fv);
10480f0cb164SAvi Kivity     subpage_t *subpage;
1049a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
10500f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
1051003a0cf2SPeter Xu     MemoryRegionSection *existing = phys_page_find(d, base);
10520f0cb164SAvi Kivity     MemoryRegionSection subsection = {
10530f0cb164SAvi Kivity         .offset_within_address_space = base,
1054052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
10550f0cb164SAvi Kivity     };
1056a8170e5eSAvi Kivity     hwaddr start, end;
10570f0cb164SAvi Kivity 
1058f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
10590f0cb164SAvi Kivity 
1060f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
106116620684SAlexey Kardashevskiy         subpage = subpage_init(fv, base);
106216620684SAlexey Kardashevskiy         subsection.fv = fv;
10630f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
1064ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
106553cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
10660f0cb164SAvi Kivity     } else {
1067f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
10680f0cb164SAvi Kivity     }
10690f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1070052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
107153cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
107253cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
10730f0cb164SAvi Kivity }
10740f0cb164SAvi Kivity 
10750f0cb164SAvi Kivity 
10769950322aSAlexey Kardashevskiy static void register_multipage(FlatView *fv,
1077052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
107833417e70Sbellard {
10799950322aSAlexey Kardashevskiy     AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1080a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
108153cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
1082052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
1083052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
1084dd81124bSAvi Kivity 
1085733d5ef5SPaolo Bonzini     assert(num_pages);
1086733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
108733417e70Sbellard }
108833417e70Sbellard 
1089494d1997SWei Yang /*
1090494d1997SWei Yang  * The range in *section* may look like this:
1091494d1997SWei Yang  *
1092494d1997SWei Yang  *      |s|PPPPPPP|s|
1093494d1997SWei Yang  *
1094494d1997SWei Yang  * where s stands for subpage and P for page.
1095494d1997SWei Yang  */
10968629d3fcSAlexey Kardashevskiy void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
10970f0cb164SAvi Kivity {
1098494d1997SWei Yang     MemoryRegionSection remain = *section;
1099052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
11000f0cb164SAvi Kivity 
1101494d1997SWei Yang     /* register first subpage */
1102494d1997SWei Yang     if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1103494d1997SWei Yang         uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
1104494d1997SWei Yang                         - remain.offset_within_address_space;
1105733d5ef5SPaolo Bonzini 
1106494d1997SWei Yang         MemoryRegionSection now = remain;
1107052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
11089950322aSAlexey Kardashevskiy         register_subpage(fv, &now);
1109494d1997SWei Yang         if (int128_eq(remain.size, now.size)) {
1110494d1997SWei Yang             return;
1111733d5ef5SPaolo Bonzini         }
1112052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
1113052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
1114052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
1115494d1997SWei Yang     }
1116494d1997SWei Yang 
1117494d1997SWei Yang     /* register whole pages */
1118494d1997SWei Yang     if (int128_ge(remain.size, page_size)) {
1119494d1997SWei Yang         MemoryRegionSection now = remain;
1120052e87b0SPaolo Bonzini         now.size = int128_and(now.size, int128_neg(page_size));
11219950322aSAlexey Kardashevskiy         register_multipage(fv, &now);
1122494d1997SWei Yang         if (int128_eq(remain.size, now.size)) {
1123494d1997SWei Yang             return;
112469b67646STyler Hall         }
1125494d1997SWei Yang         remain.size = int128_sub(remain.size, now.size);
1126494d1997SWei Yang         remain.offset_within_address_space += int128_get64(now.size);
1127494d1997SWei Yang         remain.offset_within_region += int128_get64(now.size);
11280f0cb164SAvi Kivity     }
1129494d1997SWei Yang 
1130494d1997SWei Yang     /* register last subpage */
1131494d1997SWei Yang     register_subpage(fv, &remain);
11320f0cb164SAvi Kivity }
11330f0cb164SAvi Kivity 
113462a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
113562a2744cSSheng Yang {
113662a2744cSSheng Yang     if (kvm_enabled())
113762a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
113862a2744cSSheng Yang }
113962a2744cSSheng Yang 
1140b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
1141b2a8658eSUmesh Deshpande {
1142b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
1143b2a8658eSUmesh Deshpande }
1144b2a8658eSUmesh Deshpande 
1145b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
1146b2a8658eSUmesh Deshpande {
1147b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
1148b2a8658eSUmesh Deshpande }
1149b2a8658eSUmesh Deshpande 
1150ca411b7cSDaniel P. Berrangé GString *ram_block_format(void)
1151be9b23c4SPeter Xu {
1152be9b23c4SPeter Xu     RAMBlock *block;
1153be9b23c4SPeter Xu     char *psize;
1154ca411b7cSDaniel P. Berrangé     GString *buf = g_string_new("");
1155be9b23c4SPeter Xu 
1156694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
1157dbc6ae9cSTed Chen     g_string_append_printf(buf, "%24s %8s  %18s %18s %18s %18s %3s\n",
1158dbc6ae9cSTed Chen                            "Block Name", "PSize", "Offset", "Used", "Total",
1159dbc6ae9cSTed Chen                            "HVA", "RO");
1160dbc6ae9cSTed Chen 
1161be9b23c4SPeter Xu     RAMBLOCK_FOREACH(block) {
1162be9b23c4SPeter Xu         psize = size_to_str(block->page_size);
1163ca411b7cSDaniel P. Berrangé         g_string_append_printf(buf, "%24s %8s  0x%016" PRIx64 " 0x%016" PRIx64
1164dbc6ae9cSTed Chen                                " 0x%016" PRIx64 " 0x%016" PRIx64 " %3s\n",
1165dbc6ae9cSTed Chen                                block->idstr, psize,
1166be9b23c4SPeter Xu                                (uint64_t)block->offset,
1167be9b23c4SPeter Xu                                (uint64_t)block->used_length,
1168dbc6ae9cSTed Chen                                (uint64_t)block->max_length,
1169dbc6ae9cSTed Chen                                (uint64_t)(uintptr_t)block->host,
1170dbc6ae9cSTed Chen                                block->mr->readonly ? "ro" : "rw");
1171dbc6ae9cSTed Chen 
1172be9b23c4SPeter Xu         g_free(psize);
1173be9b23c4SPeter Xu     }
1174ca411b7cSDaniel P. Berrangé 
1175ca411b7cSDaniel P. Berrangé     return buf;
1176be9b23c4SPeter Xu }
1177be9b23c4SPeter Xu 
1178905b7ee4SDavid Hildenbrand static int find_min_backend_pagesize(Object *obj, void *opaque)
11799c607668SAlexey Kardashevskiy {
11809c607668SAlexey Kardashevskiy     long *hpsize_min = opaque;
11819c607668SAlexey Kardashevskiy 
11829c607668SAlexey Kardashevskiy     if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
11837d5489e6SDavid Gibson         HostMemoryBackend *backend = MEMORY_BACKEND(obj);
11847d5489e6SDavid Gibson         long hpsize = host_memory_backend_pagesize(backend);
11852b108085SDavid Gibson 
11867d5489e6SDavid Gibson         if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) {
11879c607668SAlexey Kardashevskiy             *hpsize_min = hpsize;
11889c607668SAlexey Kardashevskiy         }
11899c607668SAlexey Kardashevskiy     }
11909c607668SAlexey Kardashevskiy 
11919c607668SAlexey Kardashevskiy     return 0;
11929c607668SAlexey Kardashevskiy }
11939c607668SAlexey Kardashevskiy 
1194905b7ee4SDavid Hildenbrand static int find_max_backend_pagesize(Object *obj, void *opaque)
1195905b7ee4SDavid Hildenbrand {
1196905b7ee4SDavid Hildenbrand     long *hpsize_max = opaque;
1197905b7ee4SDavid Hildenbrand 
1198905b7ee4SDavid Hildenbrand     if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1199905b7ee4SDavid Hildenbrand         HostMemoryBackend *backend = MEMORY_BACKEND(obj);
1200905b7ee4SDavid Hildenbrand         long hpsize = host_memory_backend_pagesize(backend);
1201905b7ee4SDavid Hildenbrand 
1202905b7ee4SDavid Hildenbrand         if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) {
1203905b7ee4SDavid Hildenbrand             *hpsize_max = hpsize;
1204905b7ee4SDavid Hildenbrand         }
1205905b7ee4SDavid Hildenbrand     }
1206905b7ee4SDavid Hildenbrand 
1207905b7ee4SDavid Hildenbrand     return 0;
1208905b7ee4SDavid Hildenbrand }
1209905b7ee4SDavid Hildenbrand 
1210905b7ee4SDavid Hildenbrand /*
1211905b7ee4SDavid Hildenbrand  * TODO: We assume right now that all mapped host memory backends are
1212905b7ee4SDavid Hildenbrand  * used as RAM, however some might be used for different purposes.
1213905b7ee4SDavid Hildenbrand  */
1214905b7ee4SDavid Hildenbrand long qemu_minrampagesize(void)
12159c607668SAlexey Kardashevskiy {
12169c607668SAlexey Kardashevskiy     long hpsize = LONG_MAX;
1217ad1172d8SIgor Mammedov     Object *memdev_root = object_resolve_path("/objects", NULL);
12189c607668SAlexey Kardashevskiy 
1219905b7ee4SDavid Hildenbrand     object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize);
12209c607668SAlexey Kardashevskiy     return hpsize;
12219c607668SAlexey Kardashevskiy }
1222905b7ee4SDavid Hildenbrand 
1223905b7ee4SDavid Hildenbrand long qemu_maxrampagesize(void)
1224905b7ee4SDavid Hildenbrand {
1225ad1172d8SIgor Mammedov     long pagesize = 0;
1226905b7ee4SDavid Hildenbrand     Object *memdev_root = object_resolve_path("/objects", NULL);
1227905b7ee4SDavid Hildenbrand 
1228ad1172d8SIgor Mammedov     object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize);
1229905b7ee4SDavid Hildenbrand     return pagesize;
1230905b7ee4SDavid Hildenbrand }
12319c607668SAlexey Kardashevskiy 
1232d5dbde46SHikaru Nishida #ifdef CONFIG_POSIX
1233d6af99c9SHaozhong Zhang static int64_t get_file_size(int fd)
1234d6af99c9SHaozhong Zhang {
123572d41eb4SStefan Hajnoczi     int64_t size;
123672d41eb4SStefan Hajnoczi #if defined(__linux__)
123772d41eb4SStefan Hajnoczi     struct stat st;
123872d41eb4SStefan Hajnoczi 
123972d41eb4SStefan Hajnoczi     if (fstat(fd, &st) < 0) {
124072d41eb4SStefan Hajnoczi         return -errno;
124172d41eb4SStefan Hajnoczi     }
124272d41eb4SStefan Hajnoczi 
124372d41eb4SStefan Hajnoczi     /* Special handling for devdax character devices */
124472d41eb4SStefan Hajnoczi     if (S_ISCHR(st.st_mode)) {
124572d41eb4SStefan Hajnoczi         g_autofree char *subsystem_path = NULL;
124672d41eb4SStefan Hajnoczi         g_autofree char *subsystem = NULL;
124772d41eb4SStefan Hajnoczi 
124872d41eb4SStefan Hajnoczi         subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem",
124972d41eb4SStefan Hajnoczi                                          major(st.st_rdev), minor(st.st_rdev));
125072d41eb4SStefan Hajnoczi         subsystem = g_file_read_link(subsystem_path, NULL);
125172d41eb4SStefan Hajnoczi 
125272d41eb4SStefan Hajnoczi         if (subsystem && g_str_has_suffix(subsystem, "/dax")) {
125372d41eb4SStefan Hajnoczi             g_autofree char *size_path = NULL;
125472d41eb4SStefan Hajnoczi             g_autofree char *size_str = NULL;
125572d41eb4SStefan Hajnoczi 
125672d41eb4SStefan Hajnoczi             size_path = g_strdup_printf("/sys/dev/char/%d:%d/size",
125772d41eb4SStefan Hajnoczi                                     major(st.st_rdev), minor(st.st_rdev));
125872d41eb4SStefan Hajnoczi 
125972d41eb4SStefan Hajnoczi             if (g_file_get_contents(size_path, &size_str, NULL, NULL)) {
126072d41eb4SStefan Hajnoczi                 return g_ascii_strtoll(size_str, NULL, 0);
126172d41eb4SStefan Hajnoczi             }
126272d41eb4SStefan Hajnoczi         }
126372d41eb4SStefan Hajnoczi     }
126472d41eb4SStefan Hajnoczi #endif /* defined(__linux__) */
126572d41eb4SStefan Hajnoczi 
126672d41eb4SStefan Hajnoczi     /* st.st_size may be zero for special files yet lseek(2) works */
126772d41eb4SStefan Hajnoczi     size = lseek(fd, 0, SEEK_END);
1268d6af99c9SHaozhong Zhang     if (size < 0) {
1269d6af99c9SHaozhong Zhang         return -errno;
1270d6af99c9SHaozhong Zhang     }
1271d6af99c9SHaozhong Zhang     return size;
1272d6af99c9SHaozhong Zhang }
1273d6af99c9SHaozhong Zhang 
1274ce317be9SJingqi Liu static int64_t get_file_align(int fd)
1275ce317be9SJingqi Liu {
1276ce317be9SJingqi Liu     int64_t align = -1;
1277ce317be9SJingqi Liu #if defined(__linux__) && defined(CONFIG_LIBDAXCTL)
1278ce317be9SJingqi Liu     struct stat st;
1279ce317be9SJingqi Liu 
1280ce317be9SJingqi Liu     if (fstat(fd, &st) < 0) {
1281ce317be9SJingqi Liu         return -errno;
1282ce317be9SJingqi Liu     }
1283ce317be9SJingqi Liu 
1284ce317be9SJingqi Liu     /* Special handling for devdax character devices */
1285ce317be9SJingqi Liu     if (S_ISCHR(st.st_mode)) {
1286ce317be9SJingqi Liu         g_autofree char *path = NULL;
1287ce317be9SJingqi Liu         g_autofree char *rpath = NULL;
1288ce317be9SJingqi Liu         struct daxctl_ctx *ctx;
1289ce317be9SJingqi Liu         struct daxctl_region *region;
1290ce317be9SJingqi Liu         int rc = 0;
1291ce317be9SJingqi Liu 
1292ce317be9SJingqi Liu         path = g_strdup_printf("/sys/dev/char/%d:%d",
1293ce317be9SJingqi Liu                     major(st.st_rdev), minor(st.st_rdev));
1294ce317be9SJingqi Liu         rpath = realpath(path, NULL);
12958efdb7baSPeter Maydell         if (!rpath) {
12968efdb7baSPeter Maydell             return -errno;
12978efdb7baSPeter Maydell         }
1298ce317be9SJingqi Liu 
1299ce317be9SJingqi Liu         rc = daxctl_new(&ctx);
1300ce317be9SJingqi Liu         if (rc) {
1301ce317be9SJingqi Liu             return -1;
1302ce317be9SJingqi Liu         }
1303ce317be9SJingqi Liu 
1304ce317be9SJingqi Liu         daxctl_region_foreach(ctx, region) {
1305ce317be9SJingqi Liu             if (strstr(rpath, daxctl_region_get_path(region))) {
1306ce317be9SJingqi Liu                 align = daxctl_region_get_align(region);
1307ce317be9SJingqi Liu                 break;
1308ce317be9SJingqi Liu             }
1309ce317be9SJingqi Liu         }
1310ce317be9SJingqi Liu         daxctl_unref(ctx);
1311ce317be9SJingqi Liu     }
1312ce317be9SJingqi Liu #endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */
1313ce317be9SJingqi Liu 
1314ce317be9SJingqi Liu     return align;
1315ce317be9SJingqi Liu }
1316ce317be9SJingqi Liu 
13178d37b030SMarc-André Lureau static int file_ram_open(const char *path,
13188d37b030SMarc-André Lureau                          const char *region_name,
1319369d6dc4SStefan Hajnoczi                          bool readonly,
13204d6b23f7SDavid Hildenbrand                          bool *created)
1321c902760fSMarcelo Tosatti {
1322c902760fSMarcelo Tosatti     char *filename;
13238ca761f6SPeter Feiner     char *sanitized_name;
13248ca761f6SPeter Feiner     char *c;
13255c3ece79SPaolo Bonzini     int fd = -1;
1326c902760fSMarcelo Tosatti 
13278d37b030SMarc-André Lureau     *created = false;
1328fd97fd44SMarkus Armbruster     for (;;) {
1329369d6dc4SStefan Hajnoczi         fd = open(path, readonly ? O_RDONLY : O_RDWR);
1330fd97fd44SMarkus Armbruster         if (fd >= 0) {
1331ca01f1b8SDavid Hildenbrand             /*
1332ca01f1b8SDavid Hildenbrand              * open(O_RDONLY) won't fail with EISDIR. Check manually if we
1333ca01f1b8SDavid Hildenbrand              * opened a directory and fail similarly to how we fail ENOENT
1334ca01f1b8SDavid Hildenbrand              * in readonly mode. Note that mkstemp() would imply O_RDWR.
1335ca01f1b8SDavid Hildenbrand              */
1336ca01f1b8SDavid Hildenbrand             if (readonly) {
1337ca01f1b8SDavid Hildenbrand                 struct stat file_stat;
1338ca01f1b8SDavid Hildenbrand 
1339ca01f1b8SDavid Hildenbrand                 if (fstat(fd, &file_stat)) {
1340ca01f1b8SDavid Hildenbrand                     close(fd);
1341ca01f1b8SDavid Hildenbrand                     if (errno == EINTR) {
1342ca01f1b8SDavid Hildenbrand                         continue;
1343ca01f1b8SDavid Hildenbrand                     }
1344ca01f1b8SDavid Hildenbrand                     return -errno;
1345ca01f1b8SDavid Hildenbrand                 } else if (S_ISDIR(file_stat.st_mode)) {
1346ca01f1b8SDavid Hildenbrand                     close(fd);
1347ca01f1b8SDavid Hildenbrand                     return -EISDIR;
1348ca01f1b8SDavid Hildenbrand                 }
1349ca01f1b8SDavid Hildenbrand             }
1350fd97fd44SMarkus Armbruster             /* @path names an existing file, use it */
1351fd97fd44SMarkus Armbruster             break;
1352fd97fd44SMarkus Armbruster         }
1353fd97fd44SMarkus Armbruster         if (errno == ENOENT) {
13544d6b23f7SDavid Hildenbrand             if (readonly) {
13554d6b23f7SDavid Hildenbrand                 /* Refuse to create new, readonly files. */
13564d6b23f7SDavid Hildenbrand                 return -ENOENT;
13574d6b23f7SDavid Hildenbrand             }
1358fd97fd44SMarkus Armbruster             /* @path names a file that doesn't exist, create it */
1359fd97fd44SMarkus Armbruster             fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1360fd97fd44SMarkus Armbruster             if (fd >= 0) {
13618d37b030SMarc-André Lureau                 *created = true;
1362fd97fd44SMarkus Armbruster                 break;
1363fd97fd44SMarkus Armbruster             }
1364fd97fd44SMarkus Armbruster         } else if (errno == EISDIR) {
1365fd97fd44SMarkus Armbruster             /* @path names a directory, create a file there */
13668ca761f6SPeter Feiner             /* Make name safe to use with mkstemp by replacing '/' with '_'. */
13678d37b030SMarc-André Lureau             sanitized_name = g_strdup(region_name);
13688ca761f6SPeter Feiner             for (c = sanitized_name; *c != '\0'; c++) {
13698d31d6b6SPavel Fedin                 if (*c == '/') {
13708ca761f6SPeter Feiner                     *c = '_';
13718ca761f6SPeter Feiner                 }
13728d31d6b6SPavel Fedin             }
13738ca761f6SPeter Feiner 
13748ca761f6SPeter Feiner             filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
13758ca761f6SPeter Feiner                                        sanitized_name);
13768ca761f6SPeter Feiner             g_free(sanitized_name);
1377c902760fSMarcelo Tosatti 
1378c902760fSMarcelo Tosatti             fd = mkstemp(filename);
13798d31d6b6SPavel Fedin             if (fd >= 0) {
13808d31d6b6SPavel Fedin                 unlink(filename);
1381fd97fd44SMarkus Armbruster                 g_free(filename);
1382fd97fd44SMarkus Armbruster                 break;
13838d31d6b6SPavel Fedin             }
13848d31d6b6SPavel Fedin             g_free(filename);
1385fd97fd44SMarkus Armbruster         }
1386fd97fd44SMarkus Armbruster         if (errno != EEXIST && errno != EINTR) {
13874d6b23f7SDavid Hildenbrand             return -errno;
1388fd97fd44SMarkus Armbruster         }
1389fd97fd44SMarkus Armbruster         /*
1390fd97fd44SMarkus Armbruster          * Try again on EINTR and EEXIST.  The latter happens when
1391fd97fd44SMarkus Armbruster          * something else creates the file between our two open().
1392fd97fd44SMarkus Armbruster          */
13938d31d6b6SPavel Fedin     }
13948d31d6b6SPavel Fedin 
13958d37b030SMarc-André Lureau     return fd;
13968d37b030SMarc-André Lureau }
13978d37b030SMarc-André Lureau 
13988d37b030SMarc-André Lureau static void *file_ram_alloc(RAMBlock *block,
13998d37b030SMarc-André Lureau                             ram_addr_t memory,
14008d37b030SMarc-André Lureau                             int fd,
14018d37b030SMarc-André Lureau                             bool truncate,
140244a4ff31SJagannathan Raman                             off_t offset,
14038d37b030SMarc-André Lureau                             Error **errp)
14048d37b030SMarc-André Lureau {
1405b444f5c0SDavid Hildenbrand     uint32_t qemu_map_flags;
14068d37b030SMarc-André Lureau     void *area;
14078d37b030SMarc-André Lureau 
1408863e9621SDr. David Alan Gilbert     block->page_size = qemu_fd_getpagesize(fd);
140998376843SHaozhong Zhang     if (block->mr->align % block->page_size) {
141098376843SHaozhong Zhang         error_setg(errp, "alignment 0x%" PRIx64
141198376843SHaozhong Zhang                    " must be multiples of page size 0x%zx",
141298376843SHaozhong Zhang                    block->mr->align, block->page_size);
141398376843SHaozhong Zhang         return NULL;
141461362b71SDavid Hildenbrand     } else if (block->mr->align && !is_power_of_2(block->mr->align)) {
141561362b71SDavid Hildenbrand         error_setg(errp, "alignment 0x%" PRIx64
141661362b71SDavid Hildenbrand                    " must be a power of two", block->mr->align);
141761362b71SDavid Hildenbrand         return NULL;
14184b870dc4SAlexander Graf     } else if (offset % block->page_size) {
14194b870dc4SAlexander Graf         error_setg(errp, "offset 0x%" PRIx64
14204b870dc4SAlexander Graf                    " must be multiples of page size 0x%zx",
14214b870dc4SAlexander Graf                    offset, block->page_size);
14224b870dc4SAlexander Graf         return NULL;
142398376843SHaozhong Zhang     }
142498376843SHaozhong Zhang     block->mr->align = MAX(block->page_size, block->mr->align);
14258360668eSHaozhong Zhang #if defined(__s390x__)
14268360668eSHaozhong Zhang     if (kvm_enabled()) {
14278360668eSHaozhong Zhang         block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
14288360668eSHaozhong Zhang     }
14298360668eSHaozhong Zhang #endif
1430fd97fd44SMarkus Armbruster 
1431863e9621SDr. David Alan Gilbert     if (memory < block->page_size) {
1432fd97fd44SMarkus Armbruster         error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1433863e9621SDr. David Alan Gilbert                    "or larger than page size 0x%zx",
1434863e9621SDr. David Alan Gilbert                    memory, block->page_size);
14358d37b030SMarc-André Lureau         return NULL;
14361775f111SHaozhong Zhang     }
14371775f111SHaozhong Zhang 
1438863e9621SDr. David Alan Gilbert     memory = ROUND_UP(memory, block->page_size);
1439c902760fSMarcelo Tosatti 
1440c902760fSMarcelo Tosatti     /*
1441c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1442c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1443c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1444c902760fSMarcelo Tosatti      * mmap will fail.
1445d6af99c9SHaozhong Zhang      *
1446d6af99c9SHaozhong Zhang      * Do not truncate the non-empty backend file to avoid corrupting
1447d6af99c9SHaozhong Zhang      * the existing data in the file. Disabling shrinking is not
1448d6af99c9SHaozhong Zhang      * enough. For example, the current vNVDIMM implementation stores
1449d6af99c9SHaozhong Zhang      * the guest NVDIMM labels at the end of the backend file. If the
1450d6af99c9SHaozhong Zhang      * backend file is later extended, QEMU will not be able to find
1451d6af99c9SHaozhong Zhang      * those labels. Therefore, extending the non-empty backend file
1452d6af99c9SHaozhong Zhang      * is disabled as well.
1453c902760fSMarcelo Tosatti      */
14544b870dc4SAlexander Graf     if (truncate && ftruncate(fd, offset + memory)) {
1455c902760fSMarcelo Tosatti         perror("ftruncate");
14567f56e740SPaolo Bonzini     }
1457c902760fSMarcelo Tosatti 
14585c52a219SDavid Hildenbrand     qemu_map_flags = (block->flags & RAM_READONLY) ? QEMU_MAP_READONLY : 0;
1459b444f5c0SDavid Hildenbrand     qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0;
1460b444f5c0SDavid Hildenbrand     qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0;
14618dbe22c6SDavid Hildenbrand     qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0;
1462b444f5c0SDavid Hildenbrand     area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset);
1463c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
14647f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
1465fd97fd44SMarkus Armbruster                          "unable to map backing store for guest RAM");
14668d37b030SMarc-André Lureau         return NULL;
1467c902760fSMarcelo Tosatti     }
1468ef36fa14SMarcelo Tosatti 
146904b16653SAlex Williamson     block->fd = fd;
14704b870dc4SAlexander Graf     block->fd_offset = offset;
1471c902760fSMarcelo Tosatti     return area;
1472c902760fSMarcelo Tosatti }
1473c902760fSMarcelo Tosatti #endif
1474c902760fSMarcelo Tosatti 
1475154cc9eaSDr. David Alan Gilbert /* Allocate space within the ram_addr_t space that governs the
1476154cc9eaSDr. David Alan Gilbert  * dirty bitmaps.
1477154cc9eaSDr. David Alan Gilbert  * Called with the ramlist lock held.
1478154cc9eaSDr. David Alan Gilbert  */
1479d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1480d17b5288SAlex Williamson {
148104b16653SAlex Williamson     RAMBlock *block, *next_block;
14823e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
148304b16653SAlex Williamson 
148449cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
148549cd9ac6SStefan Hajnoczi 
14860dc3f44aSMike Day     if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
148704b16653SAlex Williamson         return 0;
14880d53d9feSMike Day     }
148904b16653SAlex Williamson 
149099e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
1491154cc9eaSDr. David Alan Gilbert         ram_addr_t candidate, next = RAM_ADDR_MAX;
149204b16653SAlex Williamson 
1493801110abSDr. David Alan Gilbert         /* Align blocks to start on a 'long' in the bitmap
1494801110abSDr. David Alan Gilbert          * which makes the bitmap sync'ing take the fast path.
1495801110abSDr. David Alan Gilbert          */
1496154cc9eaSDr. David Alan Gilbert         candidate = block->offset + block->max_length;
1497801110abSDr. David Alan Gilbert         candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS);
149804b16653SAlex Williamson 
1499154cc9eaSDr. David Alan Gilbert         /* Search for the closest following block
1500154cc9eaSDr. David Alan Gilbert          * and find the gap.
1501154cc9eaSDr. David Alan Gilbert          */
150299e15582SPeter Xu         RAMBLOCK_FOREACH(next_block) {
1503154cc9eaSDr. David Alan Gilbert             if (next_block->offset >= candidate) {
150404b16653SAlex Williamson                 next = MIN(next, next_block->offset);
150504b16653SAlex Williamson             }
150604b16653SAlex Williamson         }
1507154cc9eaSDr. David Alan Gilbert 
1508154cc9eaSDr. David Alan Gilbert         /* If it fits remember our place and remember the size
1509154cc9eaSDr. David Alan Gilbert          * of gap, but keep going so that we might find a smaller
1510154cc9eaSDr. David Alan Gilbert          * gap to fill so avoiding fragmentation.
1511154cc9eaSDr. David Alan Gilbert          */
1512154cc9eaSDr. David Alan Gilbert         if (next - candidate >= size && next - candidate < mingap) {
1513154cc9eaSDr. David Alan Gilbert             offset = candidate;
1514154cc9eaSDr. David Alan Gilbert             mingap = next - candidate;
151504b16653SAlex Williamson         }
1516154cc9eaSDr. David Alan Gilbert 
1517154cc9eaSDr. David Alan Gilbert         trace_find_ram_offset_loop(size, candidate, offset, next, mingap);
151804b16653SAlex Williamson     }
15193e837b2cSAlex Williamson 
15203e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
15213e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
15223e837b2cSAlex Williamson                 (uint64_t)size);
15233e837b2cSAlex Williamson         abort();
15243e837b2cSAlex Williamson     }
15253e837b2cSAlex Williamson 
1526154cc9eaSDr. David Alan Gilbert     trace_find_ram_offset(size, offset);
1527154cc9eaSDr. David Alan Gilbert 
152804b16653SAlex Williamson     return offset;
152904b16653SAlex Williamson }
153004b16653SAlex Williamson 
1531c136180cSDavid Hildenbrand static unsigned long last_ram_page(void)
153204b16653SAlex Williamson {
1533d17b5288SAlex Williamson     RAMBlock *block;
1534d17b5288SAlex Williamson     ram_addr_t last = 0;
1535d17b5288SAlex Williamson 
1536694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
153799e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
153862be4e3aSMichael S. Tsirkin         last = MAX(last, block->offset + block->max_length);
15390d53d9feSMike Day     }
1540b8c48993SJuan Quintela     return last >> TARGET_PAGE_BITS;
1541d17b5288SAlex Williamson }
1542d17b5288SAlex Williamson 
1543ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1544ddb97f1dSJason Baron {
1545ddb97f1dSJason Baron     int ret;
1546ddb97f1dSJason Baron 
1547ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
154847c8ca53SMarcel Apfelbaum     if (!machine_dump_guest_core(current_machine)) {
1549ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1550ddb97f1dSJason Baron         if (ret) {
1551ddb97f1dSJason Baron             perror("qemu_madvise");
1552ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
15530ff3243aSAkihiko Odaki                             "but dump-guest-core=off specified\n");
1554ddb97f1dSJason Baron         }
1555ddb97f1dSJason Baron     }
1556ddb97f1dSJason Baron }
1557ddb97f1dSJason Baron 
1558422148d3SDr. David Alan Gilbert const char *qemu_ram_get_idstr(RAMBlock *rb)
1559422148d3SDr. David Alan Gilbert {
1560422148d3SDr. David Alan Gilbert     return rb->idstr;
1561422148d3SDr. David Alan Gilbert }
1562422148d3SDr. David Alan Gilbert 
1563754cb9c0SYury Kotov void *qemu_ram_get_host_addr(RAMBlock *rb)
1564754cb9c0SYury Kotov {
1565754cb9c0SYury Kotov     return rb->host;
1566754cb9c0SYury Kotov }
1567754cb9c0SYury Kotov 
1568754cb9c0SYury Kotov ram_addr_t qemu_ram_get_offset(RAMBlock *rb)
1569754cb9c0SYury Kotov {
1570754cb9c0SYury Kotov     return rb->offset;
1571754cb9c0SYury Kotov }
1572754cb9c0SYury Kotov 
1573754cb9c0SYury Kotov ram_addr_t qemu_ram_get_used_length(RAMBlock *rb)
1574754cb9c0SYury Kotov {
1575754cb9c0SYury Kotov     return rb->used_length;
1576754cb9c0SYury Kotov }
1577754cb9c0SYury Kotov 
1578082851a3SDavid Hildenbrand ram_addr_t qemu_ram_get_max_length(RAMBlock *rb)
1579082851a3SDavid Hildenbrand {
1580082851a3SDavid Hildenbrand     return rb->max_length;
1581082851a3SDavid Hildenbrand }
1582082851a3SDavid Hildenbrand 
1583463a4ac2SDr. David Alan Gilbert bool qemu_ram_is_shared(RAMBlock *rb)
1584463a4ac2SDr. David Alan Gilbert {
1585463a4ac2SDr. David Alan Gilbert     return rb->flags & RAM_SHARED;
1586463a4ac2SDr. David Alan Gilbert }
1587463a4ac2SDr. David Alan Gilbert 
15888dbe22c6SDavid Hildenbrand bool qemu_ram_is_noreserve(RAMBlock *rb)
15898dbe22c6SDavid Hildenbrand {
15908dbe22c6SDavid Hildenbrand     return rb->flags & RAM_NORESERVE;
15918dbe22c6SDavid Hildenbrand }
15928dbe22c6SDavid Hildenbrand 
15932ce16640SDr. David Alan Gilbert /* Note: Only set at the start of postcopy */
15942ce16640SDr. David Alan Gilbert bool qemu_ram_is_uf_zeroable(RAMBlock *rb)
15952ce16640SDr. David Alan Gilbert {
15962ce16640SDr. David Alan Gilbert     return rb->flags & RAM_UF_ZEROPAGE;
15972ce16640SDr. David Alan Gilbert }
15982ce16640SDr. David Alan Gilbert 
15992ce16640SDr. David Alan Gilbert void qemu_ram_set_uf_zeroable(RAMBlock *rb)
16002ce16640SDr. David Alan Gilbert {
16012ce16640SDr. David Alan Gilbert     rb->flags |= RAM_UF_ZEROPAGE;
16022ce16640SDr. David Alan Gilbert }
16032ce16640SDr. David Alan Gilbert 
1604b895de50SCédric Le Goater bool qemu_ram_is_migratable(RAMBlock *rb)
1605b895de50SCédric Le Goater {
1606b895de50SCédric Le Goater     return rb->flags & RAM_MIGRATABLE;
1607b895de50SCédric Le Goater }
1608b895de50SCédric Le Goater 
1609b895de50SCédric Le Goater void qemu_ram_set_migratable(RAMBlock *rb)
1610b895de50SCédric Le Goater {
1611b895de50SCédric Le Goater     rb->flags |= RAM_MIGRATABLE;
1612b895de50SCédric Le Goater }
1613b895de50SCédric Le Goater 
1614b895de50SCédric Le Goater void qemu_ram_unset_migratable(RAMBlock *rb)
1615b895de50SCédric Le Goater {
1616b895de50SCédric Le Goater     rb->flags &= ~RAM_MIGRATABLE;
1617b895de50SCédric Le Goater }
1618b895de50SCédric Le Goater 
1619b0182e53SSteve Sistare bool qemu_ram_is_named_file(RAMBlock *rb)
1620b0182e53SSteve Sistare {
1621b0182e53SSteve Sistare     return rb->flags & RAM_NAMED_FILE;
1622b0182e53SSteve Sistare }
1623b0182e53SSteve Sistare 
16246d998f3cSStefan Hajnoczi int qemu_ram_get_fd(RAMBlock *rb)
16256d998f3cSStefan Hajnoczi {
16266d998f3cSStefan Hajnoczi     return rb->fd;
16276d998f3cSStefan Hajnoczi }
16286d998f3cSStefan Hajnoczi 
1629a4a411fbSStefan Hajnoczi /* Called with the BQL held.  */
1630fa53a0e5SGonglei void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
163120cfe881SHu Tao {
1632fa53a0e5SGonglei     RAMBlock *block;
163320cfe881SHu Tao 
1634c5705a77SAvi Kivity     assert(new_block);
1635c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
163684b89d78SCam Macdonell 
163709e5ab63SAnthony Liguori     if (dev) {
163809e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
163984b89d78SCam Macdonell         if (id) {
164084b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
16417267c094SAnthony Liguori             g_free(id);
164284b89d78SCam Macdonell         }
164384b89d78SCam Macdonell     }
164484b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
164584b89d78SCam Macdonell 
1646694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
164799e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
1648fa53a0e5SGonglei         if (block != new_block &&
1649fa53a0e5SGonglei             !strcmp(block->idstr, new_block->idstr)) {
165084b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
165184b89d78SCam Macdonell                     new_block->idstr);
165284b89d78SCam Macdonell             abort();
165384b89d78SCam Macdonell         }
165484b89d78SCam Macdonell     }
1655c5705a77SAvi Kivity }
1656c5705a77SAvi Kivity 
1657a4a411fbSStefan Hajnoczi /* Called with the BQL held.  */
1658fa53a0e5SGonglei void qemu_ram_unset_idstr(RAMBlock *block)
165920cfe881SHu Tao {
1660ae3a7047SMike Day     /* FIXME: arch_init.c assumes that this is not called throughout
1661ae3a7047SMike Day      * migration.  Ignore the problem since hot-unplug during migration
1662ae3a7047SMike Day      * does not work anyway.
1663ae3a7047SMike Day      */
166420cfe881SHu Tao     if (block) {
166520cfe881SHu Tao         memset(block->idstr, 0, sizeof(block->idstr));
166620cfe881SHu Tao     }
166720cfe881SHu Tao }
166820cfe881SHu Tao 
1669863e9621SDr. David Alan Gilbert size_t qemu_ram_pagesize(RAMBlock *rb)
1670863e9621SDr. David Alan Gilbert {
1671863e9621SDr. David Alan Gilbert     return rb->page_size;
1672863e9621SDr. David Alan Gilbert }
1673863e9621SDr. David Alan Gilbert 
167467f11b5cSDr. David Alan Gilbert /* Returns the largest size of page in use */
167567f11b5cSDr. David Alan Gilbert size_t qemu_ram_pagesize_largest(void)
167667f11b5cSDr. David Alan Gilbert {
167767f11b5cSDr. David Alan Gilbert     RAMBlock *block;
167867f11b5cSDr. David Alan Gilbert     size_t largest = 0;
167967f11b5cSDr. David Alan Gilbert 
168099e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
168167f11b5cSDr. David Alan Gilbert         largest = MAX(largest, qemu_ram_pagesize(block));
168267f11b5cSDr. David Alan Gilbert     }
168367f11b5cSDr. David Alan Gilbert 
168467f11b5cSDr. David Alan Gilbert     return largest;
168567f11b5cSDr. David Alan Gilbert }
168667f11b5cSDr. David Alan Gilbert 
16878490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
16888490fc78SLuiz Capitulino {
168975cc7f01SMarcel Apfelbaum     if (!machine_mem_merge(current_machine)) {
16908490fc78SLuiz Capitulino         /* disabled by the user */
16918490fc78SLuiz Capitulino         return 0;
16928490fc78SLuiz Capitulino     }
16938490fc78SLuiz Capitulino 
16948490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
16958490fc78SLuiz Capitulino }
16968490fc78SLuiz Capitulino 
1697c7c0e724SDavid Hildenbrand /*
1698c7c0e724SDavid Hildenbrand  * Resizing RAM while migrating can result in the migration being canceled.
1699c7c0e724SDavid Hildenbrand  * Care has to be taken if the guest might have already detected the memory.
170062be4e3aSMichael S. Tsirkin  *
170162be4e3aSMichael S. Tsirkin  * As memory core doesn't know how is memory accessed, it is up to
170262be4e3aSMichael S. Tsirkin  * resize callback to update device state and/or add assertions to detect
170362be4e3aSMichael S. Tsirkin  * misuse, if necessary.
170462be4e3aSMichael S. Tsirkin  */
1705fa53a0e5SGonglei int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
170662be4e3aSMichael S. Tsirkin {
17078f44304cSDavid Hildenbrand     const ram_addr_t oldsize = block->used_length;
1708ce4adc0bSDavid Hildenbrand     const ram_addr_t unaligned_size = newsize;
1709ce4adc0bSDavid Hildenbrand 
171062be4e3aSMichael S. Tsirkin     assert(block);
171162be4e3aSMichael S. Tsirkin 
17129260bd40SRichard Henderson     newsize = TARGET_PAGE_ALIGN(newsize);
17139260bd40SRichard Henderson     newsize = REAL_HOST_PAGE_ALIGN(newsize);
1714129ddaf3SMichael S. Tsirkin 
171562be4e3aSMichael S. Tsirkin     if (block->used_length == newsize) {
1716ce4adc0bSDavid Hildenbrand         /*
1717ce4adc0bSDavid Hildenbrand          * We don't have to resize the ram block (which only knows aligned
1718ce4adc0bSDavid Hildenbrand          * sizes), however, we have to notify if the unaligned size changed.
1719ce4adc0bSDavid Hildenbrand          */
1720ce4adc0bSDavid Hildenbrand         if (unaligned_size != memory_region_size(block->mr)) {
1721ce4adc0bSDavid Hildenbrand             memory_region_set_size(block->mr, unaligned_size);
1722ce4adc0bSDavid Hildenbrand             if (block->resized) {
1723ce4adc0bSDavid Hildenbrand                 block->resized(block->idstr, unaligned_size, block->host);
1724ce4adc0bSDavid Hildenbrand             }
1725ce4adc0bSDavid Hildenbrand         }
172662be4e3aSMichael S. Tsirkin         return 0;
172762be4e3aSMichael S. Tsirkin     }
172862be4e3aSMichael S. Tsirkin 
172962be4e3aSMichael S. Tsirkin     if (!(block->flags & RAM_RESIZEABLE)) {
173062be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
1731a3a92908SPankaj Gupta                          "Size mismatch: %s: 0x" RAM_ADDR_FMT
1732a3a92908SPankaj Gupta                          " != 0x" RAM_ADDR_FMT, block->idstr,
173362be4e3aSMichael S. Tsirkin                          newsize, block->used_length);
173462be4e3aSMichael S. Tsirkin         return -EINVAL;
173562be4e3aSMichael S. Tsirkin     }
173662be4e3aSMichael S. Tsirkin 
173762be4e3aSMichael S. Tsirkin     if (block->max_length < newsize) {
173862be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
1739a3a92908SPankaj Gupta                          "Size too large: %s: 0x" RAM_ADDR_FMT
174062be4e3aSMichael S. Tsirkin                          " > 0x" RAM_ADDR_FMT, block->idstr,
174162be4e3aSMichael S. Tsirkin                          newsize, block->max_length);
174262be4e3aSMichael S. Tsirkin         return -EINVAL;
174362be4e3aSMichael S. Tsirkin     }
174462be4e3aSMichael S. Tsirkin 
17458f44304cSDavid Hildenbrand     /* Notify before modifying the ram block and touching the bitmaps. */
17468f44304cSDavid Hildenbrand     if (block->host) {
17478f44304cSDavid Hildenbrand         ram_block_notify_resize(block->host, oldsize, newsize);
17488f44304cSDavid Hildenbrand     }
17498f44304cSDavid Hildenbrand 
175062be4e3aSMichael S. Tsirkin     cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
175162be4e3aSMichael S. Tsirkin     block->used_length = newsize;
175258d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
175358d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
1754ce4adc0bSDavid Hildenbrand     memory_region_set_size(block->mr, unaligned_size);
175562be4e3aSMichael S. Tsirkin     if (block->resized) {
1756ce4adc0bSDavid Hildenbrand         block->resized(block->idstr, unaligned_size, block->host);
175762be4e3aSMichael S. Tsirkin     }
175862be4e3aSMichael S. Tsirkin     return 0;
175962be4e3aSMichael S. Tsirkin }
176062be4e3aSMichael S. Tsirkin 
176161c490e2SBeata Michalska /*
176261c490e2SBeata Michalska  * Trigger sync on the given ram block for range [start, start + length]
176361c490e2SBeata Michalska  * with the backing store if one is available.
176461c490e2SBeata Michalska  * Otherwise no-op.
176561c490e2SBeata Michalska  * @Note: this is supposed to be a synchronous op.
176661c490e2SBeata Michalska  */
1767ab7e41e6SPhilippe Mathieu-Daudé void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
176861c490e2SBeata Michalska {
176961c490e2SBeata Michalska     /* The requested range should fit in within the block range */
177061c490e2SBeata Michalska     g_assert((start + length) <= block->used_length);
177161c490e2SBeata Michalska 
177261c490e2SBeata Michalska #ifdef CONFIG_LIBPMEM
177361c490e2SBeata Michalska     /* The lack of support for pmem should not block the sync */
177461c490e2SBeata Michalska     if (ramblock_is_pmem(block)) {
17755d4c9549SAnthony PERARD         void *addr = ramblock_ptr(block, start);
177661c490e2SBeata Michalska         pmem_persist(addr, length);
177761c490e2SBeata Michalska         return;
177861c490e2SBeata Michalska     }
177961c490e2SBeata Michalska #endif
178061c490e2SBeata Michalska     if (block->fd >= 0) {
178161c490e2SBeata Michalska         /**
178261c490e2SBeata Michalska          * Case there is no support for PMEM or the memory has not been
178361c490e2SBeata Michalska          * specified as persistent (or is not one) - use the msync.
178461c490e2SBeata Michalska          * Less optimal but still achieves the same goal
178561c490e2SBeata Michalska          */
17865d4c9549SAnthony PERARD         void *addr = ramblock_ptr(block, start);
178761c490e2SBeata Michalska         if (qemu_msync(addr, length, block->fd)) {
178861c490e2SBeata Michalska             warn_report("%s: failed to sync memory range: start: "
178961c490e2SBeata Michalska                     RAM_ADDR_FMT " length: " RAM_ADDR_FMT,
179061c490e2SBeata Michalska                     __func__, start, length);
179161c490e2SBeata Michalska         }
179261c490e2SBeata Michalska     }
179361c490e2SBeata Michalska }
179461c490e2SBeata Michalska 
17955b82b703SStefan Hajnoczi /* Called with ram_list.mutex held */
17965b82b703SStefan Hajnoczi static void dirty_memory_extend(ram_addr_t old_ram_size,
17975b82b703SStefan Hajnoczi                                 ram_addr_t new_ram_size)
17985b82b703SStefan Hajnoczi {
17995b82b703SStefan Hajnoczi     ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
18005b82b703SStefan Hajnoczi                                              DIRTY_MEMORY_BLOCK_SIZE);
18015b82b703SStefan Hajnoczi     ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
18025b82b703SStefan Hajnoczi                                              DIRTY_MEMORY_BLOCK_SIZE);
18035b82b703SStefan Hajnoczi     int i;
18045b82b703SStefan Hajnoczi 
18055b82b703SStefan Hajnoczi     /* Only need to extend if block count increased */
18065b82b703SStefan Hajnoczi     if (new_num_blocks <= old_num_blocks) {
18075b82b703SStefan Hajnoczi         return;
18085b82b703SStefan Hajnoczi     }
18095b82b703SStefan Hajnoczi 
18105b82b703SStefan Hajnoczi     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
18115b82b703SStefan Hajnoczi         DirtyMemoryBlocks *old_blocks;
18125b82b703SStefan Hajnoczi         DirtyMemoryBlocks *new_blocks;
18135b82b703SStefan Hajnoczi         int j;
18145b82b703SStefan Hajnoczi 
1815d73415a3SStefan Hajnoczi         old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]);
18165b82b703SStefan Hajnoczi         new_blocks = g_malloc(sizeof(*new_blocks) +
18175b82b703SStefan Hajnoczi                               sizeof(new_blocks->blocks[0]) * new_num_blocks);
18185b82b703SStefan Hajnoczi 
18195b82b703SStefan Hajnoczi         if (old_num_blocks) {
18205b82b703SStefan Hajnoczi             memcpy(new_blocks->blocks, old_blocks->blocks,
18215b82b703SStefan Hajnoczi                    old_num_blocks * sizeof(old_blocks->blocks[0]));
18225b82b703SStefan Hajnoczi         }
18235b82b703SStefan Hajnoczi 
18245b82b703SStefan Hajnoczi         for (j = old_num_blocks; j < new_num_blocks; j++) {
18255b82b703SStefan Hajnoczi             new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
18265b82b703SStefan Hajnoczi         }
18275b82b703SStefan Hajnoczi 
1828d73415a3SStefan Hajnoczi         qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
18295b82b703SStefan Hajnoczi 
18305b82b703SStefan Hajnoczi         if (old_blocks) {
18315b82b703SStefan Hajnoczi             g_free_rcu(old_blocks, rcu);
18325b82b703SStefan Hajnoczi         }
18335b82b703SStefan Hajnoczi     }
18345b82b703SStefan Hajnoczi }
18355b82b703SStefan Hajnoczi 
18367ce18ca0SDavid Hildenbrand static void ram_block_add(RAMBlock *new_block, Error **errp)
1837c5705a77SAvi Kivity {
18388dbe22c6SDavid Hildenbrand     const bool noreserve = qemu_ram_is_noreserve(new_block);
18397ce18ca0SDavid Hildenbrand     const bool shared = qemu_ram_is_shared(new_block);
1840e1c57ab8SPaolo Bonzini     RAMBlock *block;
18410d53d9feSMike Day     RAMBlock *last_block = NULL;
184215f7a80cSXiaoyao Li     bool free_on_error = false;
18432152f5caSJuan Quintela     ram_addr_t old_ram_size, new_ram_size;
184437aa7a0eSMarkus Armbruster     Error *err = NULL;
18452152f5caSJuan Quintela 
1846b8c48993SJuan Quintela     old_ram_size = last_ram_page();
1847c5705a77SAvi Kivity 
1848b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
18499b8424d5SMichael S. Tsirkin     new_block->offset = find_ram_offset(new_block->max_length);
1850e1c57ab8SPaolo Bonzini 
18510628c182SMarkus Armbruster     if (!new_block->host) {
1852e1c57ab8SPaolo Bonzini         if (xen_enabled()) {
18539b8424d5SMichael S. Tsirkin             xen_ram_alloc(new_block->offset, new_block->max_length,
185437aa7a0eSMarkus Armbruster                           new_block->mr, &err);
185537aa7a0eSMarkus Armbruster             if (err) {
185637aa7a0eSMarkus Armbruster                 error_propagate(errp, err);
185737aa7a0eSMarkus Armbruster                 qemu_mutex_unlock_ramlist();
185839c350eeSPaolo Bonzini                 return;
185937aa7a0eSMarkus Armbruster             }
1860e1c57ab8SPaolo Bonzini         } else {
186125459eb7SDavid Hildenbrand             new_block->host = qemu_anon_ram_alloc(new_block->max_length,
186225459eb7SDavid Hildenbrand                                                   &new_block->mr->align,
18638dbe22c6SDavid Hildenbrand                                                   shared, noreserve);
186439228250SMarkus Armbruster             if (!new_block->host) {
1865ef701d7bSHu Tao                 error_setg_errno(errp, errno,
1866ef701d7bSHu Tao                                  "cannot set up guest memory '%s'",
1867ef701d7bSHu Tao                                  memory_region_name(new_block->mr));
1868ef701d7bSHu Tao                 qemu_mutex_unlock_ramlist();
186939c350eeSPaolo Bonzini                 return;
187039228250SMarkus Armbruster             }
18719b8424d5SMichael S. Tsirkin             memory_try_enable_merging(new_block->host, new_block->max_length);
187215f7a80cSXiaoyao Li             free_on_error = true;
187315f7a80cSXiaoyao Li         }
187415f7a80cSXiaoyao Li     }
187515f7a80cSXiaoyao Li 
187615f7a80cSXiaoyao Li     if (new_block->flags & RAM_GUEST_MEMFD) {
1877644a5277SZhenzhong Duan         int ret;
1878644a5277SZhenzhong Duan 
187915f7a80cSXiaoyao Li         assert(kvm_enabled());
188015f7a80cSXiaoyao Li         assert(new_block->guest_memfd < 0);
188115f7a80cSXiaoyao Li 
1882644a5277SZhenzhong Duan         ret = ram_block_discard_require(true);
1883644a5277SZhenzhong Duan         if (ret < 0) {
1884644a5277SZhenzhong Duan             error_setg_errno(errp, -ret,
1885852f0048SPaolo Bonzini                              "cannot set up private guest memory: discard currently blocked");
1886852f0048SPaolo Bonzini             error_append_hint(errp, "Are you using assigned devices?\n");
1887852f0048SPaolo Bonzini             goto out_free;
1888852f0048SPaolo Bonzini         }
1889852f0048SPaolo Bonzini 
189015f7a80cSXiaoyao Li         new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length,
189115f7a80cSXiaoyao Li                                                         0, errp);
189215f7a80cSXiaoyao Li         if (new_block->guest_memfd < 0) {
189315f7a80cSXiaoyao Li             qemu_mutex_unlock_ramlist();
189415f7a80cSXiaoyao Li             goto out_free;
1895c902760fSMarcelo Tosatti         }
18966977dfe6SYoshiaki Tamura     }
189794a6b54fSpbrook 
1898dd631697SLi Zhijian     new_ram_size = MAX(old_ram_size,
1899dd631697SLi Zhijian               (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1900dd631697SLi Zhijian     if (new_ram_size > old_ram_size) {
19015b82b703SStefan Hajnoczi         dirty_memory_extend(old_ram_size, new_ram_size);
1902dd631697SLi Zhijian     }
19030d53d9feSMike Day     /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
19040d53d9feSMike Day      * QLIST (which has an RCU-friendly variant) does not have insertion at
19050d53d9feSMike Day      * tail, so save the last element in last_block.
19060d53d9feSMike Day      */
190799e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
19080d53d9feSMike Day         last_block = block;
19099b8424d5SMichael S. Tsirkin         if (block->max_length < new_block->max_length) {
1910abb26d63SPaolo Bonzini             break;
1911abb26d63SPaolo Bonzini         }
1912abb26d63SPaolo Bonzini     }
1913abb26d63SPaolo Bonzini     if (block) {
19140dc3f44aSMike Day         QLIST_INSERT_BEFORE_RCU(block, new_block, next);
19150d53d9feSMike Day     } else if (last_block) {
19160dc3f44aSMike Day         QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
19170d53d9feSMike Day     } else { /* list is empty */
19180dc3f44aSMike Day         QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1919abb26d63SPaolo Bonzini     }
19200d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
192194a6b54fSpbrook 
19220dc3f44aSMike Day     /* Write list before version */
19230dc3f44aSMike Day     smp_wmb();
1924f798b07fSUmesh Deshpande     ram_list.version++;
1925b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1926f798b07fSUmesh Deshpande 
19279b8424d5SMichael S. Tsirkin     cpu_physical_memory_set_dirty_range(new_block->offset,
192858d2707eSPaolo Bonzini                                         new_block->used_length,
192958d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
193094a6b54fSpbrook 
1931a904c911SPaolo Bonzini     if (new_block->host) {
19329b8424d5SMichael S. Tsirkin         qemu_ram_setup_dump(new_block->host, new_block->max_length);
19339b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1934a028edeaSAlexander Bulekov         /*
1935a028edeaSAlexander Bulekov          * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU
1936a028edeaSAlexander Bulekov          * Configure it unless the machine is a qtest server, in which case
1937a028edeaSAlexander Bulekov          * KVM is not used and it may be forked (eg for fuzzing purposes).
1938a028edeaSAlexander Bulekov          */
1939a028edeaSAlexander Bulekov         if (!qtest_enabled()) {
1940a028edeaSAlexander Bulekov             qemu_madvise(new_block->host, new_block->max_length,
1941a028edeaSAlexander Bulekov                          QEMU_MADV_DONTFORK);
1942a028edeaSAlexander Bulekov         }
19438f44304cSDavid Hildenbrand         ram_block_notify_add(new_block->host, new_block->used_length,
19448f44304cSDavid Hildenbrand                              new_block->max_length);
1945a904c911SPaolo Bonzini     }
194615f7a80cSXiaoyao Li     return;
194715f7a80cSXiaoyao Li 
194815f7a80cSXiaoyao Li out_free:
194915f7a80cSXiaoyao Li     if (free_on_error) {
195015f7a80cSXiaoyao Li         qemu_anon_ram_free(new_block->host, new_block->max_length);
195115f7a80cSXiaoyao Li         new_block->host = NULL;
195215f7a80cSXiaoyao Li     }
195394a6b54fSpbrook }
1954e9a1ab19Sbellard 
1955d5dbde46SHikaru Nishida #ifdef CONFIG_POSIX
195638b3362dSMarc-André Lureau RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
195744a4ff31SJagannathan Raman                                  uint32_t ram_flags, int fd, off_t offset,
19585c52a219SDavid Hildenbrand                                  Error **errp)
1959e1c57ab8SPaolo Bonzini {
1960e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1961ef701d7bSHu Tao     Error *local_err = NULL;
1962ce317be9SJingqi Liu     int64_t file_size, file_align;
1963e1c57ab8SPaolo Bonzini 
1964a4de8552SJunyan He     /* Just support these ram flags by now. */
196556918a12SSean Christopherson     assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE |
19665c52a219SDavid Hildenbrand                           RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY |
196715f7a80cSXiaoyao Li                           RAM_READONLY_FD | RAM_GUEST_MEMFD)) == 0);
1968a4de8552SJunyan He 
1969e1c57ab8SPaolo Bonzini     if (xen_enabled()) {
19707f56e740SPaolo Bonzini         error_setg(errp, "-mem-path not supported with Xen");
1971528f46afSFam Zheng         return NULL;
1972e1c57ab8SPaolo Bonzini     }
1973e1c57ab8SPaolo Bonzini 
1974e45e7ae2SMarc-André Lureau     if (kvm_enabled() && !kvm_has_sync_mmu()) {
1975e45e7ae2SMarc-André Lureau         error_setg(errp,
1976e45e7ae2SMarc-André Lureau                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1977e45e7ae2SMarc-André Lureau         return NULL;
1978e45e7ae2SMarc-André Lureau     }
1979e45e7ae2SMarc-André Lureau 
19809260bd40SRichard Henderson     size = TARGET_PAGE_ALIGN(size);
19819260bd40SRichard Henderson     size = REAL_HOST_PAGE_ALIGN(size);
19829260bd40SRichard Henderson 
19838d37b030SMarc-André Lureau     file_size = get_file_size(fd);
19844b870dc4SAlexander Graf     if (file_size > offset && file_size < (offset + size)) {
1985c001c3b3SIgor Mammedov         error_setg(errp, "backing store size 0x%" PRIx64
19868d37b030SMarc-André Lureau                    " does not match 'size' option 0x" RAM_ADDR_FMT,
1987c001c3b3SIgor Mammedov                    file_size, size);
19888d37b030SMarc-André Lureau         return NULL;
19898d37b030SMarc-André Lureau     }
19908d37b030SMarc-André Lureau 
1991ce317be9SJingqi Liu     file_align = get_file_align(fd);
19928f1bdb0eSPeter Maydell     if (file_align > 0 && file_align > mr->align) {
1993ce317be9SJingqi Liu         error_setg(errp, "backing store align 0x%" PRIx64
19945f509751SJingqi Liu                    " is larger than 'align' option 0x%" PRIx64,
1995ce317be9SJingqi Liu                    file_align, mr->align);
1996ce317be9SJingqi Liu         return NULL;
1997ce317be9SJingqi Liu     }
1998ce317be9SJingqi Liu 
1999e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
2000e1c57ab8SPaolo Bonzini     new_block->mr = mr;
20019b8424d5SMichael S. Tsirkin     new_block->used_length = size;
20029b8424d5SMichael S. Tsirkin     new_block->max_length = size;
2003cbfc0171SJunyan He     new_block->flags = ram_flags;
200415f7a80cSXiaoyao Li     new_block->guest_memfd = -1;
20055c52a219SDavid Hildenbrand     new_block->host = file_ram_alloc(new_block, size, fd, !file_size, offset,
20065c52a219SDavid Hildenbrand                                      errp);
20077f56e740SPaolo Bonzini     if (!new_block->host) {
20087f56e740SPaolo Bonzini         g_free(new_block);
2009528f46afSFam Zheng         return NULL;
20107f56e740SPaolo Bonzini     }
20117f56e740SPaolo Bonzini 
20127ce18ca0SDavid Hildenbrand     ram_block_add(new_block, &local_err);
2013ef701d7bSHu Tao     if (local_err) {
2014ef701d7bSHu Tao         g_free(new_block);
2015ef701d7bSHu Tao         error_propagate(errp, local_err);
2016528f46afSFam Zheng         return NULL;
2017ef701d7bSHu Tao     }
2018528f46afSFam Zheng     return new_block;
201938b3362dSMarc-André Lureau 
202038b3362dSMarc-André Lureau }
202138b3362dSMarc-André Lureau 
202238b3362dSMarc-André Lureau 
202338b3362dSMarc-André Lureau RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
2024cbfc0171SJunyan He                                    uint32_t ram_flags, const char *mem_path,
20255c52a219SDavid Hildenbrand                                    off_t offset, Error **errp)
202638b3362dSMarc-André Lureau {
202738b3362dSMarc-André Lureau     int fd;
202838b3362dSMarc-André Lureau     bool created;
202938b3362dSMarc-André Lureau     RAMBlock *block;
203038b3362dSMarc-André Lureau 
20315c52a219SDavid Hildenbrand     fd = file_ram_open(mem_path, memory_region_name(mr),
20324d6b23f7SDavid Hildenbrand                        !!(ram_flags & RAM_READONLY_FD), &created);
203338b3362dSMarc-André Lureau     if (fd < 0) {
20344d6b23f7SDavid Hildenbrand         error_setg_errno(errp, -fd, "can't open backing store %s for guest RAM",
20354d6b23f7SDavid Hildenbrand                          mem_path);
20366da4b1c2SDavid Hildenbrand         if (!(ram_flags & RAM_READONLY_FD) && !(ram_flags & RAM_SHARED) &&
20376da4b1c2SDavid Hildenbrand             fd == -EACCES) {
20386da4b1c2SDavid Hildenbrand             /*
20396da4b1c2SDavid Hildenbrand              * If we can open the file R/O (note: will never create a new file)
20406da4b1c2SDavid Hildenbrand              * and we are dealing with a private mapping, there are still ways
20416da4b1c2SDavid Hildenbrand              * to consume such files and get RAM instead of ROM.
20426da4b1c2SDavid Hildenbrand              */
20436da4b1c2SDavid Hildenbrand             fd = file_ram_open(mem_path, memory_region_name(mr), true,
20446da4b1c2SDavid Hildenbrand                                &created);
20456da4b1c2SDavid Hildenbrand             if (fd < 0) {
20466da4b1c2SDavid Hildenbrand                 return NULL;
20476da4b1c2SDavid Hildenbrand             }
20486da4b1c2SDavid Hildenbrand             assert(!created);
20496da4b1c2SDavid Hildenbrand             close(fd);
20506da4b1c2SDavid Hildenbrand             error_append_hint(errp, "Consider opening the backing store"
20516da4b1c2SDavid Hildenbrand                 " read-only but still creating writable RAM using"
20526da4b1c2SDavid Hildenbrand                 " '-object memory-backend-file,readonly=on,rom=off...'"
20536da4b1c2SDavid Hildenbrand                 " (see \"VM templating\" documentation)\n");
20546da4b1c2SDavid Hildenbrand         }
205538b3362dSMarc-André Lureau         return NULL;
205638b3362dSMarc-André Lureau     }
205738b3362dSMarc-André Lureau 
20585c52a219SDavid Hildenbrand     block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset, errp);
205938b3362dSMarc-André Lureau     if (!block) {
206038b3362dSMarc-André Lureau         if (created) {
206138b3362dSMarc-André Lureau             unlink(mem_path);
206238b3362dSMarc-André Lureau         }
206338b3362dSMarc-André Lureau         close(fd);
206438b3362dSMarc-André Lureau         return NULL;
206538b3362dSMarc-André Lureau     }
206638b3362dSMarc-André Lureau 
206738b3362dSMarc-André Lureau     return block;
2068e1c57ab8SPaolo Bonzini }
20690b183fc8SPaolo Bonzini #endif
2070e1c57ab8SPaolo Bonzini 
207162be4e3aSMichael S. Tsirkin static
2072528f46afSFam Zheng RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
207362be4e3aSMichael S. Tsirkin                                   void (*resized)(const char*,
207462be4e3aSMichael S. Tsirkin                                                   uint64_t length,
207562be4e3aSMichael S. Tsirkin                                                   void *host),
2076ebef62d0SDavid Hildenbrand                                   void *host, uint32_t ram_flags,
2077ef701d7bSHu Tao                                   MemoryRegion *mr, Error **errp)
2078e1c57ab8SPaolo Bonzini {
2079e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
2080ef701d7bSHu Tao     Error *local_err = NULL;
20819260bd40SRichard Henderson     int align;
2082e1c57ab8SPaolo Bonzini 
20838dbe22c6SDavid Hildenbrand     assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC |
208415f7a80cSXiaoyao Li                           RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
2085ebef62d0SDavid Hildenbrand     assert(!host ^ (ram_flags & RAM_PREALLOC));
2086ebef62d0SDavid Hildenbrand 
20879260bd40SRichard Henderson     align = qemu_real_host_page_size();
20889260bd40SRichard Henderson     align = MAX(align, TARGET_PAGE_SIZE);
20899260bd40SRichard Henderson     size = ROUND_UP(size, align);
20909260bd40SRichard Henderson     max_size = ROUND_UP(max_size, align);
20919260bd40SRichard Henderson 
2092e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
2093e1c57ab8SPaolo Bonzini     new_block->mr = mr;
209462be4e3aSMichael S. Tsirkin     new_block->resized = resized;
20959b8424d5SMichael S. Tsirkin     new_block->used_length = size;
20969b8424d5SMichael S. Tsirkin     new_block->max_length = max_size;
209762be4e3aSMichael S. Tsirkin     assert(max_size >= size);
2098e1c57ab8SPaolo Bonzini     new_block->fd = -1;
209915f7a80cSXiaoyao Li     new_block->guest_memfd = -1;
21008e3b0cbbSMarc-André Lureau     new_block->page_size = qemu_real_host_page_size();
2101e1c57ab8SPaolo Bonzini     new_block->host = host;
2102ebef62d0SDavid Hildenbrand     new_block->flags = ram_flags;
21037ce18ca0SDavid Hildenbrand     ram_block_add(new_block, &local_err);
2104ef701d7bSHu Tao     if (local_err) {
2105ef701d7bSHu Tao         g_free(new_block);
2106ef701d7bSHu Tao         error_propagate(errp, local_err);
2107528f46afSFam Zheng         return NULL;
2108ef701d7bSHu Tao     }
2109528f46afSFam Zheng     return new_block;
2110e1c57ab8SPaolo Bonzini }
2111e1c57ab8SPaolo Bonzini 
2112528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
211362be4e3aSMichael S. Tsirkin                                    MemoryRegion *mr, Error **errp)
211462be4e3aSMichael S. Tsirkin {
2115ebef62d0SDavid Hildenbrand     return qemu_ram_alloc_internal(size, size, NULL, host, RAM_PREALLOC, mr,
2116ebef62d0SDavid Hildenbrand                                    errp);
211762be4e3aSMichael S. Tsirkin }
211862be4e3aSMichael S. Tsirkin 
2119ebef62d0SDavid Hildenbrand RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags,
212006329cceSMarcel Apfelbaum                          MemoryRegion *mr, Error **errp)
21216977dfe6SYoshiaki Tamura {
212215f7a80cSXiaoyao Li     assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
2123ebef62d0SDavid Hildenbrand     return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp);
212462be4e3aSMichael S. Tsirkin }
212562be4e3aSMichael S. Tsirkin 
2126528f46afSFam Zheng RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
212762be4e3aSMichael S. Tsirkin                                      void (*resized)(const char*,
212862be4e3aSMichael S. Tsirkin                                                      uint64_t length,
212962be4e3aSMichael S. Tsirkin                                                      void *host),
213062be4e3aSMichael S. Tsirkin                                      MemoryRegion *mr, Error **errp)
213162be4e3aSMichael S. Tsirkin {
2132ebef62d0SDavid Hildenbrand     return qemu_ram_alloc_internal(size, maxsz, resized, NULL,
2133ebef62d0SDavid Hildenbrand                                    RAM_RESIZEABLE, mr, errp);
21346977dfe6SYoshiaki Tamura }
21356977dfe6SYoshiaki Tamura 
213643771539SPaolo Bonzini static void reclaim_ramblock(RAMBlock *block)
2137e9a1ab19Sbellard {
21387bd4f430SPaolo Bonzini     if (block->flags & RAM_PREALLOC) {
2139cd19cfa2SHuang Ying         ;
2140dfeaf2abSMarkus Armbruster     } else if (xen_enabled()) {
2141dfeaf2abSMarkus Armbruster         xen_invalidate_map_cache_entry(block->host);
2142089f3f76SStefan Weil #ifndef _WIN32
21433435f395SMarkus Armbruster     } else if (block->fd >= 0) {
214453adb9d4SMurilo Opsfelder Araujo         qemu_ram_munmap(block->fd, block->host, block->max_length);
214504b16653SAlex Williamson         close(block->fd);
2146089f3f76SStefan Weil #endif
214704b16653SAlex Williamson     } else {
21489b8424d5SMichael S. Tsirkin         qemu_anon_ram_free(block->host, block->max_length);
214904b16653SAlex Williamson     }
215015f7a80cSXiaoyao Li 
215115f7a80cSXiaoyao Li     if (block->guest_memfd >= 0) {
215215f7a80cSXiaoyao Li         close(block->guest_memfd);
2153852f0048SPaolo Bonzini         ram_block_discard_require(false);
215415f7a80cSXiaoyao Li     }
215515f7a80cSXiaoyao Li 
21567267c094SAnthony Liguori     g_free(block);
215743771539SPaolo Bonzini }
215843771539SPaolo Bonzini 
2159f1060c55SFam Zheng void qemu_ram_free(RAMBlock *block)
216043771539SPaolo Bonzini {
216185bc2a15SMarc-André Lureau     if (!block) {
216285bc2a15SMarc-André Lureau         return;
216385bc2a15SMarc-André Lureau     }
216485bc2a15SMarc-André Lureau 
21650987d735SPaolo Bonzini     if (block->host) {
21668f44304cSDavid Hildenbrand         ram_block_notify_remove(block->host, block->used_length,
21678f44304cSDavid Hildenbrand                                 block->max_length);
21680987d735SPaolo Bonzini     }
21690987d735SPaolo Bonzini 
217043771539SPaolo Bonzini     qemu_mutex_lock_ramlist();
21710dc3f44aSMike Day     QLIST_REMOVE_RCU(block, next);
217243771539SPaolo Bonzini     ram_list.mru_block = NULL;
21730dc3f44aSMike Day     /* Write list before version */
21740dc3f44aSMike Day     smp_wmb();
217543771539SPaolo Bonzini     ram_list.version++;
217643771539SPaolo Bonzini     call_rcu(block, reclaim_ramblock, rcu);
2177b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
2178e9a1ab19Sbellard }
2179e9a1ab19Sbellard 
2180cd19cfa2SHuang Ying #ifndef _WIN32
2181cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2182cd19cfa2SHuang Ying {
2183cd19cfa2SHuang Ying     RAMBlock *block;
2184cd19cfa2SHuang Ying     ram_addr_t offset;
2185cd19cfa2SHuang Ying     int flags;
2186cd19cfa2SHuang Ying     void *area, *vaddr;
21879e6b9f37SDavid Hildenbrand     int prot;
2188cd19cfa2SHuang Ying 
218999e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
2190cd19cfa2SHuang Ying         offset = addr - block->offset;
21919b8424d5SMichael S. Tsirkin         if (offset < block->max_length) {
21921240be24SMichael S. Tsirkin             vaddr = ramblock_ptr(block, offset);
21937bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
2194cd19cfa2SHuang Ying                 ;
2195dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
2196dfeaf2abSMarkus Armbruster                 abort();
2197cd19cfa2SHuang Ying             } else {
2198cd19cfa2SHuang Ying                 flags = MAP_FIXED;
2199dbb92eeaSDavid Hildenbrand                 flags |= block->flags & RAM_SHARED ?
2200dbb92eeaSDavid Hildenbrand                          MAP_SHARED : MAP_PRIVATE;
2201d94e0bc9SDavid Hildenbrand                 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0;
22029e6b9f37SDavid Hildenbrand                 prot = PROT_READ;
22039e6b9f37SDavid Hildenbrand                 prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE;
22043435f395SMarkus Armbruster                 if (block->fd >= 0) {
22059e6b9f37SDavid Hildenbrand                     area = mmap(vaddr, length, prot, flags, block->fd,
22069e6b9f37SDavid Hildenbrand                                 offset + block->fd_offset);
2207cd19cfa2SHuang Ying                 } else {
2208dbb92eeaSDavid Hildenbrand                     flags |= MAP_ANONYMOUS;
22099e6b9f37SDavid Hildenbrand                     area = mmap(vaddr, length, prot, flags, -1, 0);
2210cd19cfa2SHuang Ying                 }
2211cd19cfa2SHuang Ying                 if (area != vaddr) {
2212493d89bfSAlistair Francis                     error_report("Could not remap addr: "
2213493d89bfSAlistair Francis                                  RAM_ADDR_FMT "@" RAM_ADDR_FMT "",
2214cd19cfa2SHuang Ying                                  length, addr);
2215cd19cfa2SHuang Ying                     exit(1);
2216cd19cfa2SHuang Ying                 }
22178490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
2218ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
2219cd19cfa2SHuang Ying             }
2220cd19cfa2SHuang Ying         }
2221cd19cfa2SHuang Ying     }
2222cd19cfa2SHuang Ying }
2223cd19cfa2SHuang Ying #endif /* !_WIN32 */
2224cd19cfa2SHuang Ying 
2225a99dd337SJuergen Gross /*
2226a99dd337SJuergen Gross  * Return a host pointer to guest's ram.
22275a5585f4SEdgar E. Iglesias  * For Xen, foreign mappings get created if they don't already exist.
22280dc3f44aSMike Day  *
22295a5585f4SEdgar E. Iglesias  * @block: block for the RAM to lookup (optional and may be NULL).
22305a5585f4SEdgar E. Iglesias  * @addr: address within the memory region.
22315a5585f4SEdgar E. Iglesias  * @size: pointer to requested size (optional and may be NULL).
22325a5585f4SEdgar E. Iglesias  *        size may get modified and return a value smaller than
22335a5585f4SEdgar E. Iglesias  *        what was requested.
22345a5585f4SEdgar E. Iglesias  * @lock: wether to lock the mapping in xen-mapcache until invalidated.
22355a5585f4SEdgar E. Iglesias  * @is_write: hint wether to map RW or RO in the xen-mapcache.
22365a5585f4SEdgar E. Iglesias  *            (optional and may always be set to true).
22370dc3f44aSMike Day  *
2238e81bcda5SPaolo Bonzini  * Called within RCU critical section.
2239ae3a7047SMike Day  */
2240aab4631aSManos Pitsidianakis static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr,
22415a5585f4SEdgar E. Iglesias                                  hwaddr *size, bool lock,
22425a5585f4SEdgar E. Iglesias                                  bool is_write)
224338bee5dcSStefano Stabellini {
2244a99dd337SJuergen Gross     hwaddr len = 0;
2245a99dd337SJuergen Gross 
2246a99dd337SJuergen Gross     if (size && *size == 0) {
22478ab934f9SStefano Stabellini         return NULL;
22488ab934f9SStefano Stabellini     }
2249e81bcda5SPaolo Bonzini 
22503655cb9cSGonglei     if (block == NULL) {
2251e81bcda5SPaolo Bonzini         block = qemu_get_ram_block(addr);
22520878d0e1SPaolo Bonzini         addr -= block->offset;
22533655cb9cSGonglei     }
2254a99dd337SJuergen Gross     if (size) {
22550878d0e1SPaolo Bonzini         *size = MIN(*size, block->max_length - addr);
2256a99dd337SJuergen Gross         len = *size;
2257a99dd337SJuergen Gross     }
2258e81bcda5SPaolo Bonzini 
2259e81bcda5SPaolo Bonzini     if (xen_enabled() && block->host == NULL) {
2260e81bcda5SPaolo Bonzini         /* We need to check if the requested address is in the RAM
2261e81bcda5SPaolo Bonzini          * because we don't want to map the entire memory in QEMU.
2262e81bcda5SPaolo Bonzini          * In that case just map the requested area.
2263e81bcda5SPaolo Bonzini          */
2264a5bdc451SEdgar E. Iglesias         if (xen_mr_is_memory(block->mr)) {
22655d1c2602SEdgar E. Iglesias             return xen_map_cache(block->mr, block->offset + addr,
226649a72029SEdgar E. Iglesias                                  len, block->offset,
226749a72029SEdgar E. Iglesias                                  lock, lock, is_write);
226838bee5dcSStefano Stabellini         }
226938bee5dcSStefano Stabellini 
22705a5585f4SEdgar E. Iglesias         block->host = xen_map_cache(block->mr, block->offset,
227149a72029SEdgar E. Iglesias                                     block->max_length,
227249a72029SEdgar E. Iglesias                                     block->offset,
227349a72029SEdgar E. Iglesias                                     1, lock, is_write);
227438bee5dcSStefano Stabellini     }
2275e81bcda5SPaolo Bonzini 
22760878d0e1SPaolo Bonzini     return ramblock_ptr(block, addr);
227738bee5dcSStefano Stabellini }
227838bee5dcSStefano Stabellini 
2279a99dd337SJuergen Gross /*
2280a99dd337SJuergen Gross  * Return a host pointer to ram allocated with qemu_ram_alloc.
2281a99dd337SJuergen Gross  * This should not be used for general purpose DMA.  Use address_space_map
2282a99dd337SJuergen Gross  * or address_space_rw instead. For local memory (e.g. video ram) that the
2283a99dd337SJuergen Gross  * device owns, use memory_region_get_ram_ptr.
2284a99dd337SJuergen Gross  *
2285a99dd337SJuergen Gross  * Called within RCU critical section.
2286a99dd337SJuergen Gross  */
2287a99dd337SJuergen Gross void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
2288a99dd337SJuergen Gross {
22895a5585f4SEdgar E. Iglesias     return qemu_ram_ptr_length(ram_block, addr, NULL, false, true);
2290a99dd337SJuergen Gross }
2291a99dd337SJuergen Gross 
2292f90bb71bSDr. David Alan Gilbert /* Return the offset of a hostpointer within a ramblock */
2293f90bb71bSDr. David Alan Gilbert ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host)
2294f90bb71bSDr. David Alan Gilbert {
2295f90bb71bSDr. David Alan Gilbert     ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host;
2296f90bb71bSDr. David Alan Gilbert     assert((uintptr_t)host >= (uintptr_t)rb->host);
2297f90bb71bSDr. David Alan Gilbert     assert(res < rb->max_length);
2298f90bb71bSDr. David Alan Gilbert 
2299f90bb71bSDr. David Alan Gilbert     return res;
2300f90bb71bSDr. David Alan Gilbert }
2301f90bb71bSDr. David Alan Gilbert 
2302422148d3SDr. David Alan Gilbert RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
2303422148d3SDr. David Alan Gilbert                                    ram_addr_t *offset)
23045579c7f3Spbrook {
230594a6b54fSpbrook     RAMBlock *block;
230694a6b54fSpbrook     uint8_t *host = ptr;
230794a6b54fSpbrook 
2308868bb33fSJan Kiszka     if (xen_enabled()) {
2309f615f396SPaolo Bonzini         ram_addr_t ram_addr;
2310694ea274SDr. David Alan Gilbert         RCU_READ_LOCK_GUARD();
2311f615f396SPaolo Bonzini         ram_addr = xen_ram_addr_from_mapcache(ptr);
2312596ccccdSEdgar E. Iglesias         if (ram_addr == RAM_ADDR_INVALID) {
2313596ccccdSEdgar E. Iglesias             return NULL;
2314596ccccdSEdgar E. Iglesias         }
2315596ccccdSEdgar E. Iglesias 
2316f615f396SPaolo Bonzini         block = qemu_get_ram_block(ram_addr);
2317422148d3SDr. David Alan Gilbert         if (block) {
2318d6b6aec4SAnthony PERARD             *offset = ram_addr - block->offset;
2319422148d3SDr. David Alan Gilbert         }
2320422148d3SDr. David Alan Gilbert         return block;
2321712c2b41SStefano Stabellini     }
2322712c2b41SStefano Stabellini 
2323694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
2324d73415a3SStefan Hajnoczi     block = qatomic_rcu_read(&ram_list.mru_block);
23259b8424d5SMichael S. Tsirkin     if (block && block->host && host - block->host < block->max_length) {
232623887b79SPaolo Bonzini         goto found;
232723887b79SPaolo Bonzini     }
232823887b79SPaolo Bonzini 
232999e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
2330432d268cSJun Nakajima         /* This case append when the block is not mapped. */
2331432d268cSJun Nakajima         if (block->host == NULL) {
2332432d268cSJun Nakajima             continue;
2333432d268cSJun Nakajima         }
23349b8424d5SMichael S. Tsirkin         if (host - block->host < block->max_length) {
233523887b79SPaolo Bonzini             goto found;
233694a6b54fSpbrook         }
2337f471a17eSAlex Williamson     }
2338432d268cSJun Nakajima 
23391b5ec234SPaolo Bonzini     return NULL;
234023887b79SPaolo Bonzini 
234123887b79SPaolo Bonzini found:
2342422148d3SDr. David Alan Gilbert     *offset = (host - block->host);
2343422148d3SDr. David Alan Gilbert     if (round_offset) {
2344422148d3SDr. David Alan Gilbert         *offset &= TARGET_PAGE_MASK;
2345422148d3SDr. David Alan Gilbert     }
2346422148d3SDr. David Alan Gilbert     return block;
2347422148d3SDr. David Alan Gilbert }
2348422148d3SDr. David Alan Gilbert 
2349e3dd7493SDr. David Alan Gilbert /*
2350e3dd7493SDr. David Alan Gilbert  * Finds the named RAMBlock
2351e3dd7493SDr. David Alan Gilbert  *
2352e3dd7493SDr. David Alan Gilbert  * name: The name of RAMBlock to find
2353e3dd7493SDr. David Alan Gilbert  *
2354e3dd7493SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
2355e3dd7493SDr. David Alan Gilbert  */
2356e3dd7493SDr. David Alan Gilbert RAMBlock *qemu_ram_block_by_name(const char *name)
2357e3dd7493SDr. David Alan Gilbert {
2358e3dd7493SDr. David Alan Gilbert     RAMBlock *block;
2359e3dd7493SDr. David Alan Gilbert 
236099e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
2361e3dd7493SDr. David Alan Gilbert         if (!strcmp(name, block->idstr)) {
2362e3dd7493SDr. David Alan Gilbert             return block;
2363e3dd7493SDr. David Alan Gilbert         }
2364e3dd7493SDr. David Alan Gilbert     }
2365e3dd7493SDr. David Alan Gilbert 
2366e3dd7493SDr. David Alan Gilbert     return NULL;
2367e3dd7493SDr. David Alan Gilbert }
2368e3dd7493SDr. David Alan Gilbert 
23698d7f2e76SPhilippe Mathieu-Daudé /*
23708d7f2e76SPhilippe Mathieu-Daudé  * Some of the system routines need to translate from a host pointer
23718d7f2e76SPhilippe Mathieu-Daudé  * (typically a TLB entry) back to a ram offset.
23728d7f2e76SPhilippe Mathieu-Daudé  */
237307bdaa41SPaolo Bonzini ram_addr_t qemu_ram_addr_from_host(void *ptr)
2374422148d3SDr. David Alan Gilbert {
2375422148d3SDr. David Alan Gilbert     RAMBlock *block;
2376f615f396SPaolo Bonzini     ram_addr_t offset;
2377422148d3SDr. David Alan Gilbert 
2378f615f396SPaolo Bonzini     block = qemu_ram_block_from_host(ptr, false, &offset);
2379422148d3SDr. David Alan Gilbert     if (!block) {
238007bdaa41SPaolo Bonzini         return RAM_ADDR_INVALID;
2381422148d3SDr. David Alan Gilbert     }
2382422148d3SDr. David Alan Gilbert 
238307bdaa41SPaolo Bonzini     return block->offset + offset;
2384e890261fSMarcelo Tosatti }
2385f471a17eSAlex Williamson 
238697e03465SRichard Henderson ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
238797e03465SRichard Henderson {
238897e03465SRichard Henderson     ram_addr_t ram_addr;
238997e03465SRichard Henderson 
239097e03465SRichard Henderson     ram_addr = qemu_ram_addr_from_host(ptr);
239197e03465SRichard Henderson     if (ram_addr == RAM_ADDR_INVALID) {
239297e03465SRichard Henderson         error_report("Bad ram pointer %p", ptr);
239397e03465SRichard Henderson         abort();
239497e03465SRichard Henderson     }
239597e03465SRichard Henderson     return ram_addr;
239697e03465SRichard Henderson }
239797e03465SRichard Henderson 
2398b2a44fcaSPaolo Bonzini static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
2399a152be43SPhilippe Mathieu-Daudé                                  MemTxAttrs attrs, void *buf, hwaddr len);
240016620684SAlexey Kardashevskiy static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2401a152be43SPhilippe Mathieu-Daudé                                   const void *buf, hwaddr len);
24020c249ff7SLi Zhijian static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
2403eace72b7SPeter Maydell                                   bool is_write, MemTxAttrs attrs);
240416620684SAlexey Kardashevskiy 
2405f25a49e0SPeter Maydell static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2406f25a49e0SPeter Maydell                                 unsigned len, MemTxAttrs attrs)
2407db7b5426Sblueswir1 {
2408acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2409ff6cff75SPaolo Bonzini     uint8_t buf[8];
24105c9eb028SPeter Maydell     MemTxResult res;
2411791af8c8SPaolo Bonzini 
2412db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2413883f2c59SPhilippe Mathieu-Daudé     printf("%s: subpage %p len %u addr " HWADDR_FMT_plx "\n", __func__,
2414acc9d80bSJan Kiszka            subpage, len, addr);
2415db7b5426Sblueswir1 #endif
241616620684SAlexey Kardashevskiy     res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
24175c9eb028SPeter Maydell     if (res) {
24185c9eb028SPeter Maydell         return res;
2419f25a49e0SPeter Maydell     }
24206d3ede54SPeter Maydell     *data = ldn_p(buf, len);
2421f25a49e0SPeter Maydell     return MEMTX_OK;
2422db7b5426Sblueswir1 }
2423db7b5426Sblueswir1 
2424f25a49e0SPeter Maydell static MemTxResult subpage_write(void *opaque, hwaddr addr,
2425f25a49e0SPeter Maydell                                  uint64_t value, unsigned len, MemTxAttrs attrs)
2426db7b5426Sblueswir1 {
2427acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2428ff6cff75SPaolo Bonzini     uint8_t buf[8];
2429acc9d80bSJan Kiszka 
2430db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2431883f2c59SPhilippe Mathieu-Daudé     printf("%s: subpage %p len %u addr " HWADDR_FMT_plx
2432acc9d80bSJan Kiszka            " value %"PRIx64"\n",
2433acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
2434db7b5426Sblueswir1 #endif
24356d3ede54SPeter Maydell     stn_p(buf, len, value);
243616620684SAlexey Kardashevskiy     return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
2437db7b5426Sblueswir1 }
2438db7b5426Sblueswir1 
2439c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
24408372d383SPeter Maydell                             unsigned len, bool is_write,
24418372d383SPeter Maydell                             MemTxAttrs attrs)
2442c353e4ccSPaolo Bonzini {
2443acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2444c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
2445883f2c59SPhilippe Mathieu-Daudé     printf("%s: subpage %p %c len %u addr " HWADDR_FMT_plx "\n",
2446acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
2447c353e4ccSPaolo Bonzini #endif
2448c353e4ccSPaolo Bonzini 
244916620684SAlexey Kardashevskiy     return flatview_access_valid(subpage->fv, addr + subpage->base,
2450eace72b7SPeter Maydell                                  len, is_write, attrs);
2451c353e4ccSPaolo Bonzini }
2452c353e4ccSPaolo Bonzini 
245370c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
2454f25a49e0SPeter Maydell     .read_with_attrs = subpage_read,
2455f25a49e0SPeter Maydell     .write_with_attrs = subpage_write,
2456ff6cff75SPaolo Bonzini     .impl.min_access_size = 1,
2457ff6cff75SPaolo Bonzini     .impl.max_access_size = 8,
2458ff6cff75SPaolo Bonzini     .valid.min_access_size = 1,
2459ff6cff75SPaolo Bonzini     .valid.max_access_size = 8,
2460c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
246170c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
2462db7b5426Sblueswir1 };
2463db7b5426Sblueswir1 
2464c227f099SAnthony Liguori static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
24655312bd8bSAvi Kivity                             uint16_t section)
2466db7b5426Sblueswir1 {
2467db7b5426Sblueswir1     int idx, eidx;
2468db7b5426Sblueswir1 
2469db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2470db7b5426Sblueswir1         return -1;
2471db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2472db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2473db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2474016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2475016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
2476db7b5426Sblueswir1 #endif
2477db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
24785312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
2479db7b5426Sblueswir1     }
2480db7b5426Sblueswir1 
2481db7b5426Sblueswir1     return 0;
2482db7b5426Sblueswir1 }
2483db7b5426Sblueswir1 
248416620684SAlexey Kardashevskiy static subpage_t *subpage_init(FlatView *fv, hwaddr base)
2485db7b5426Sblueswir1 {
2486c227f099SAnthony Liguori     subpage_t *mmio;
2487db7b5426Sblueswir1 
2488b797ab1aSWei Yang     /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */
24892615fabdSVijaya Kumar K     mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
249016620684SAlexey Kardashevskiy     mmio->fv = fv;
2491db7b5426Sblueswir1     mmio->base = base;
24922c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2493b4fefef9SPeter Crosthwaite                           NULL, TARGET_PAGE_SIZE);
2494b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
2495db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2496883f2c59SPhilippe Mathieu-Daudé     printf("%s: %p base " HWADDR_FMT_plx " len %08x\n", __func__,
2497016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
2498db7b5426Sblueswir1 #endif
2499db7b5426Sblueswir1 
2500db7b5426Sblueswir1     return mmio;
2501db7b5426Sblueswir1 }
2502db7b5426Sblueswir1 
250316620684SAlexey Kardashevskiy static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
25045312bd8bSAvi Kivity {
250516620684SAlexey Kardashevskiy     assert(fv);
25065312bd8bSAvi Kivity     MemoryRegionSection section = {
250716620684SAlexey Kardashevskiy         .fv = fv,
25085312bd8bSAvi Kivity         .mr = mr,
25095312bd8bSAvi Kivity         .offset_within_address_space = 0,
25105312bd8bSAvi Kivity         .offset_within_region = 0,
2511052e87b0SPaolo Bonzini         .size = int128_2_64(),
25125312bd8bSAvi Kivity     };
25135312bd8bSAvi Kivity 
251453cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
25155312bd8bSAvi Kivity }
25165312bd8bSAvi Kivity 
25172d54f194SPeter Maydell MemoryRegionSection *iotlb_to_section(CPUState *cpu,
25182d54f194SPeter Maydell                                       hwaddr index, MemTxAttrs attrs)
2519aa102231SAvi Kivity {
2520a54c87b6SPeter Maydell     int asidx = cpu_asidx_from_attrs(cpu, attrs);
2521a54c87b6SPeter Maydell     CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
25220d58c660SRichard Henderson     AddressSpaceDispatch *d = cpuas->memory_dispatch;
252386e4f93dSRichard Henderson     int section_index = index & ~TARGET_PAGE_MASK;
252486e4f93dSRichard Henderson     MemoryRegionSection *ret;
25259d82b5a7SPaolo Bonzini 
252686e4f93dSRichard Henderson     assert(section_index < d->map.sections_nb);
252786e4f93dSRichard Henderson     ret = d->map.sections + section_index;
252886e4f93dSRichard Henderson     assert(ret->mr);
252986e4f93dSRichard Henderson     assert(ret->mr->ops);
253086e4f93dSRichard Henderson 
253186e4f93dSRichard Henderson     return ret;
2532aa102231SAvi Kivity }
2533aa102231SAvi Kivity 
2534e9179ce1SAvi Kivity static void io_mem_init(void)
2535e9179ce1SAvi Kivity {
25362c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
25371f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
2538e9179ce1SAvi Kivity }
2539e9179ce1SAvi Kivity 
25408629d3fcSAlexey Kardashevskiy AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
2541ac1970fbSAvi Kivity {
254253cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
254353cb28cbSMarcel Apfelbaum     uint16_t n;
254453cb28cbSMarcel Apfelbaum 
254516620684SAlexey Kardashevskiy     n = dummy_section(&d->map, fv, &io_mem_unassigned);
254653cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
254700752703SPaolo Bonzini 
25489736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
254966a6df1dSAlexey Kardashevskiy 
255066a6df1dSAlexey Kardashevskiy     return d;
255100752703SPaolo Bonzini }
255200752703SPaolo Bonzini 
255366a6df1dSAlexey Kardashevskiy void address_space_dispatch_free(AddressSpaceDispatch *d)
255479e2b9aeSPaolo Bonzini {
255579e2b9aeSPaolo Bonzini     phys_sections_free(&d->map);
255679e2b9aeSPaolo Bonzini     g_free(d);
255779e2b9aeSPaolo Bonzini }
255879e2b9aeSPaolo Bonzini 
25599458a9a1SPaolo Bonzini static void do_nothing(CPUState *cpu, run_on_cpu_data d)
25609458a9a1SPaolo Bonzini {
25619458a9a1SPaolo Bonzini }
25629458a9a1SPaolo Bonzini 
25639458a9a1SPaolo Bonzini static void tcg_log_global_after_sync(MemoryListener *listener)
25649458a9a1SPaolo Bonzini {
25659458a9a1SPaolo Bonzini     CPUAddressSpace *cpuas;
25669458a9a1SPaolo Bonzini 
25679458a9a1SPaolo Bonzini     /* Wait for the CPU to end the current TB.  This avoids the following
25689458a9a1SPaolo Bonzini      * incorrect race:
25699458a9a1SPaolo Bonzini      *
25709458a9a1SPaolo Bonzini      *      vCPU                         migration
25719458a9a1SPaolo Bonzini      *      ----------------------       -------------------------
25729458a9a1SPaolo Bonzini      *      TLB check -> slow path
25739458a9a1SPaolo Bonzini      *        notdirty_mem_write
25749458a9a1SPaolo Bonzini      *          write to RAM
25759458a9a1SPaolo Bonzini      *          mark dirty
25769458a9a1SPaolo Bonzini      *                                   clear dirty flag
25779458a9a1SPaolo Bonzini      *      TLB check -> fast path
25789458a9a1SPaolo Bonzini      *                                   read memory
25799458a9a1SPaolo Bonzini      *        write to RAM
25809458a9a1SPaolo Bonzini      *
25819458a9a1SPaolo Bonzini      * by pushing the migration thread's memory read after the vCPU thread has
25829458a9a1SPaolo Bonzini      * written the memory.
25839458a9a1SPaolo Bonzini      */
258486cf9e15SPavel Dovgalyuk     if (replay_mode == REPLAY_MODE_NONE) {
258586cf9e15SPavel Dovgalyuk         /*
258686cf9e15SPavel Dovgalyuk          * VGA can make calls to this function while updating the screen.
258786cf9e15SPavel Dovgalyuk          * In record/replay mode this causes a deadlock, because
258886cf9e15SPavel Dovgalyuk          * run_on_cpu waits for rr mutex. Therefore no races are possible
258986cf9e15SPavel Dovgalyuk          * in this case and no need for making run_on_cpu when
2590f18d403fSGreg Kurz          * record/replay is enabled.
259186cf9e15SPavel Dovgalyuk          */
25929458a9a1SPaolo Bonzini         cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
25939458a9a1SPaolo Bonzini         run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL);
25949458a9a1SPaolo Bonzini     }
259586cf9e15SPavel Dovgalyuk }
25969458a9a1SPaolo Bonzini 
25970d58c660SRichard Henderson static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data)
25980d58c660SRichard Henderson {
25990d58c660SRichard Henderson     CPUAddressSpace *cpuas = data.host_ptr;
26000d58c660SRichard Henderson 
26010d58c660SRichard Henderson     cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as);
26020d58c660SRichard Henderson     tlb_flush(cpu);
26030d58c660SRichard Henderson }
26040d58c660SRichard Henderson 
26051d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
260650c1e149SAvi Kivity {
260732857f4dSPeter Maydell     CPUAddressSpace *cpuas;
26080d58c660SRichard Henderson     CPUState *cpu;
2609117712c3SAvi Kivity 
2610f28d0dfdSEmilio G. Cota     assert(tcg_enabled());
2611117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
2612117712c3SAvi Kivity        reset the modified entries */
261332857f4dSPeter Maydell     cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
26140d58c660SRichard Henderson     cpu = cpuas->cpu;
26150d58c660SRichard Henderson 
26160d58c660SRichard Henderson     /*
26170d58c660SRichard Henderson      * Defer changes to as->memory_dispatch until the cpu is quiescent.
26180d58c660SRichard Henderson      * Otherwise we race between (1) other cpu threads and (2) ongoing
26190d58c660SRichard Henderson      * i/o for the current cpu thread, with data cached by mmu_lookup().
26200d58c660SRichard Henderson      *
26210d58c660SRichard Henderson      * In addition, queueing the work function will kick the cpu back to
26220d58c660SRichard Henderson      * the main loop, which will end the RCU critical section and reclaim
26230d58c660SRichard Henderson      * the memory data structures.
26240d58c660SRichard Henderson      *
26250d58c660SRichard Henderson      * That said, the listener is also called during realize, before
26260d58c660SRichard Henderson      * all of the tcg machinery for run-on is initialized: thus halt_cond.
262732857f4dSPeter Maydell      */
26280d58c660SRichard Henderson     if (cpu->halt_cond) {
26290d58c660SRichard Henderson         async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas));
26300d58c660SRichard Henderson     } else {
26310d58c660SRichard Henderson         tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas));
26320d58c660SRichard Henderson     }
263350c1e149SAvi Kivity }
263450c1e149SAvi Kivity 
263562152b8aSAvi Kivity static void memory_map_init(void)
263662152b8aSAvi Kivity {
26377267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
263803f49957SPaolo Bonzini 
263957271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
26407dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
2641309cb471SAvi Kivity 
26427267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
26433bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
26443bb28b72SJan Kiszka                           65536);
26457dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
26462641689aSliguang }
264762152b8aSAvi Kivity 
264862152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
264962152b8aSAvi Kivity {
265062152b8aSAvi Kivity     return system_memory;
265162152b8aSAvi Kivity }
265262152b8aSAvi Kivity 
2653309cb471SAvi Kivity MemoryRegion *get_system_io(void)
2654309cb471SAvi Kivity {
2655309cb471SAvi Kivity     return system_io;
2656309cb471SAvi Kivity }
2657309cb471SAvi Kivity 
2658845b6214SPaolo Bonzini static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2659a8170e5eSAvi Kivity                                      hwaddr length)
266051d7a9ebSAnthony PERARD {
2661845b6214SPaolo Bonzini     uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
26620878d0e1SPaolo Bonzini     addr += memory_region_get_ram_addr(mr);
26630878d0e1SPaolo Bonzini 
2664e87f7778SPaolo Bonzini     /* No early return if dirty_log_mask is or becomes 0, because
2665e87f7778SPaolo Bonzini      * cpu_physical_memory_set_dirty_range will still call
2666e87f7778SPaolo Bonzini      * xen_modified_memory.
2667e87f7778SPaolo Bonzini      */
2668e87f7778SPaolo Bonzini     if (dirty_log_mask) {
2669e87f7778SPaolo Bonzini         dirty_log_mask =
2670e87f7778SPaolo Bonzini             cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2671e87f7778SPaolo Bonzini     }
2672845b6214SPaolo Bonzini     if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
26735aa1ef71SPaolo Bonzini         assert(tcg_enabled());
2674e506ad6aSRichard Henderson         tb_invalidate_phys_range(addr, addr + length - 1);
2675845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2676845b6214SPaolo Bonzini     }
267758d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
267849dfcec4SPaolo Bonzini }
267951d7a9ebSAnthony PERARD 
2680047be4edSStefan Hajnoczi void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size)
2681047be4edSStefan Hajnoczi {
2682047be4edSStefan Hajnoczi     /*
2683047be4edSStefan Hajnoczi      * In principle this function would work on other memory region types too,
2684047be4edSStefan Hajnoczi      * but the ROM device use case is the only one where this operation is
2685047be4edSStefan Hajnoczi      * necessary.  Other memory regions should use the
2686047be4edSStefan Hajnoczi      * address_space_read/write() APIs.
2687047be4edSStefan Hajnoczi      */
2688047be4edSStefan Hajnoczi     assert(memory_region_is_romd(mr));
2689047be4edSStefan Hajnoczi 
2690047be4edSStefan Hajnoczi     invalidate_and_set_dirty(mr, addr, size);
2691047be4edSStefan Hajnoczi }
2692047be4edSStefan Hajnoczi 
26933123f93dSJagannathan Raman int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
269482f2563fSPaolo Bonzini {
2695e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
269623326164SRichard Henderson 
269723326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
269823326164SRichard Henderson        otherwise specified.  */
269923326164SRichard Henderson     if (access_size_max == 0) {
270023326164SRichard Henderson         access_size_max = 4;
270182f2563fSPaolo Bonzini     }
270223326164SRichard Henderson 
270323326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
270423326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
270523326164SRichard Henderson         unsigned align_size_max = addr & -addr;
270623326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
270723326164SRichard Henderson             access_size_max = align_size_max;
270823326164SRichard Henderson         }
270923326164SRichard Henderson     }
271023326164SRichard Henderson 
271123326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
271223326164SRichard Henderson     if (l > access_size_max) {
271323326164SRichard Henderson         l = access_size_max;
271423326164SRichard Henderson     }
27156554f5c0SPeter Maydell     l = pow2floor(l);
271623326164SRichard Henderson 
271723326164SRichard Henderson     return l;
271882f2563fSPaolo Bonzini }
271982f2563fSPaolo Bonzini 
27203123f93dSJagannathan Raman bool prepare_mmio_access(MemoryRegion *mr)
2721125b3806SPaolo Bonzini {
27224840f10eSJan Kiszka     bool release_lock = false;
27234840f10eSJan Kiszka 
2724195801d7SStefan Hajnoczi     if (!bql_locked()) {
2725195801d7SStefan Hajnoczi         bql_lock();
27264840f10eSJan Kiszka         release_lock = true;
2727125b3806SPaolo Bonzini     }
27284840f10eSJan Kiszka     if (mr->flush_coalesced_mmio) {
27294840f10eSJan Kiszka         qemu_flush_coalesced_mmio_buffer();
27304840f10eSJan Kiszka     }
27314840f10eSJan Kiszka 
27324840f10eSJan Kiszka     return release_lock;
2733125b3806SPaolo Bonzini }
2734125b3806SPaolo Bonzini 
27353ab6fdc9SPhilippe Mathieu-Daudé /**
27363ab6fdc9SPhilippe Mathieu-Daudé  * flatview_access_allowed
27373ab6fdc9SPhilippe Mathieu-Daudé  * @mr: #MemoryRegion to be accessed
27383ab6fdc9SPhilippe Mathieu-Daudé  * @attrs: memory transaction attributes
27393ab6fdc9SPhilippe Mathieu-Daudé  * @addr: address within that memory region
27403ab6fdc9SPhilippe Mathieu-Daudé  * @len: the number of bytes to access
27413ab6fdc9SPhilippe Mathieu-Daudé  *
27423ab6fdc9SPhilippe Mathieu-Daudé  * Check if a memory transaction is allowed.
27433ab6fdc9SPhilippe Mathieu-Daudé  *
27443ab6fdc9SPhilippe Mathieu-Daudé  * Returns: true if transaction is allowed, false if denied.
27453ab6fdc9SPhilippe Mathieu-Daudé  */
27463ab6fdc9SPhilippe Mathieu-Daudé static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs,
27473ab6fdc9SPhilippe Mathieu-Daudé                                     hwaddr addr, hwaddr len)
27483ab6fdc9SPhilippe Mathieu-Daudé {
27493ab6fdc9SPhilippe Mathieu-Daudé     if (likely(!attrs.memory)) {
27503ab6fdc9SPhilippe Mathieu-Daudé         return true;
27513ab6fdc9SPhilippe Mathieu-Daudé     }
27523ab6fdc9SPhilippe Mathieu-Daudé     if (memory_region_is_ram(mr)) {
27533ab6fdc9SPhilippe Mathieu-Daudé         return true;
27543ab6fdc9SPhilippe Mathieu-Daudé     }
27553ab6fdc9SPhilippe Mathieu-Daudé     qemu_log_mask(LOG_GUEST_ERROR,
27563ab6fdc9SPhilippe Mathieu-Daudé                   "Invalid access to non-RAM device at "
27573ab6fdc9SPhilippe Mathieu-Daudé                   "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", "
27583ab6fdc9SPhilippe Mathieu-Daudé                   "region '%s'\n", addr, len, memory_region_name(mr));
27593ab6fdc9SPhilippe Mathieu-Daudé     return false;
27603ab6fdc9SPhilippe Mathieu-Daudé }
27613ab6fdc9SPhilippe Mathieu-Daudé 
2762e7927d33SJonathan Cameron static MemTxResult flatview_write_continue_step(MemTxAttrs attrs,
2763e7927d33SJonathan Cameron                                                 const uint8_t *buf,
2764e7927d33SJonathan Cameron                                                 hwaddr len, hwaddr mr_addr,
2765e7927d33SJonathan Cameron                                                 hwaddr *l, MemoryRegion *mr)
2766e7927d33SJonathan Cameron {
2767e7927d33SJonathan Cameron     if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) {
2768e7927d33SJonathan Cameron         return MEMTX_ACCESS_ERROR;
2769e7927d33SJonathan Cameron     }
2770e7927d33SJonathan Cameron 
2771e7927d33SJonathan Cameron     if (!memory_access_is_direct(mr, true)) {
2772e7927d33SJonathan Cameron         uint64_t val;
2773e7927d33SJonathan Cameron         MemTxResult result;
2774e7927d33SJonathan Cameron         bool release_lock = prepare_mmio_access(mr);
2775e7927d33SJonathan Cameron 
2776e7927d33SJonathan Cameron         *l = memory_access_size(mr, *l, mr_addr);
2777e7927d33SJonathan Cameron         /*
2778e7927d33SJonathan Cameron          * XXX: could force current_cpu to NULL to avoid
2779e7927d33SJonathan Cameron          * potential bugs
2780e7927d33SJonathan Cameron          */
2781e7927d33SJonathan Cameron 
2782e7927d33SJonathan Cameron         /*
2783e7927d33SJonathan Cameron          * Assure Coverity (and ourselves) that we are not going to OVERRUN
2784e7927d33SJonathan Cameron          * the buffer by following ldn_he_p().
2785e7927d33SJonathan Cameron          */
2786e7927d33SJonathan Cameron #ifdef QEMU_STATIC_ANALYSIS
2787e7927d33SJonathan Cameron         assert((*l == 1 && len >= 1) ||
2788e7927d33SJonathan Cameron                (*l == 2 && len >= 2) ||
2789e7927d33SJonathan Cameron                (*l == 4 && len >= 4) ||
2790e7927d33SJonathan Cameron                (*l == 8 && len >= 8));
2791e7927d33SJonathan Cameron #endif
2792e7927d33SJonathan Cameron         val = ldn_he_p(buf, *l);
2793e7927d33SJonathan Cameron         result = memory_region_dispatch_write(mr, mr_addr, val,
2794e7927d33SJonathan Cameron                                               size_memop(*l), attrs);
2795e7927d33SJonathan Cameron         if (release_lock) {
2796e7927d33SJonathan Cameron             bql_unlock();
2797e7927d33SJonathan Cameron         }
2798e7927d33SJonathan Cameron 
2799e7927d33SJonathan Cameron         return result;
2800e7927d33SJonathan Cameron     } else {
2801e7927d33SJonathan Cameron         /* RAM case */
2802e7927d33SJonathan Cameron         uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l,
28035a5585f4SEdgar E. Iglesias                                                false, true);
2804e7927d33SJonathan Cameron 
2805e7927d33SJonathan Cameron         memmove(ram_ptr, buf, *l);
2806e7927d33SJonathan Cameron         invalidate_and_set_dirty(mr, mr_addr, *l);
2807e7927d33SJonathan Cameron 
2808e7927d33SJonathan Cameron         return MEMTX_OK;
2809e7927d33SJonathan Cameron     }
2810e7927d33SJonathan Cameron }
2811e7927d33SJonathan Cameron 
2812a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
281316620684SAlexey Kardashevskiy static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
2814a203ac70SPaolo Bonzini                                            MemTxAttrs attrs,
2815a152be43SPhilippe Mathieu-Daudé                                            const void *ptr,
28164c7c8563SJonathan Cameron                                            hwaddr len, hwaddr mr_addr,
2817a203ac70SPaolo Bonzini                                            hwaddr l, MemoryRegion *mr)
281813eb76e0Sbellard {
28193b643495SPeter Maydell     MemTxResult result = MEMTX_OK;
2820a152be43SPhilippe Mathieu-Daudé     const uint8_t *buf = ptr;
282113eb76e0Sbellard 
2822a203ac70SPaolo Bonzini     for (;;) {
2823e7927d33SJonathan Cameron         result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l,
2824e7927d33SJonathan Cameron                                                mr);
2825eb7eeb88SPaolo Bonzini 
2826eb7eeb88SPaolo Bonzini         len -= l;
2827eb7eeb88SPaolo Bonzini         buf += l;
2828eb7eeb88SPaolo Bonzini         addr += l;
2829a203ac70SPaolo Bonzini 
2830a203ac70SPaolo Bonzini         if (!len) {
2831a203ac70SPaolo Bonzini             break;
2832eb7eeb88SPaolo Bonzini         }
2833a203ac70SPaolo Bonzini 
2834a203ac70SPaolo Bonzini         l = len;
28354c7c8563SJonathan Cameron         mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs);
2836a203ac70SPaolo Bonzini     }
2837eb7eeb88SPaolo Bonzini 
2838eb7eeb88SPaolo Bonzini     return result;
2839eb7eeb88SPaolo Bonzini }
2840eb7eeb88SPaolo Bonzini 
28414c6ebbb3SPaolo Bonzini /* Called from RCU critical section.  */
284216620684SAlexey Kardashevskiy static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2843a152be43SPhilippe Mathieu-Daudé                                   const void *buf, hwaddr len)
2844eb7eeb88SPaolo Bonzini {
2845eb7eeb88SPaolo Bonzini     hwaddr l;
28464c7c8563SJonathan Cameron     hwaddr mr_addr;
2847eb7eeb88SPaolo Bonzini     MemoryRegion *mr;
2848a203ac70SPaolo Bonzini 
2849a203ac70SPaolo Bonzini     l = len;
28504c7c8563SJonathan Cameron     mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs);
28513ab6fdc9SPhilippe Mathieu-Daudé     if (!flatview_access_allowed(mr, attrs, addr, len)) {
28523ab6fdc9SPhilippe Mathieu-Daudé         return MEMTX_ACCESS_ERROR;
28533ab6fdc9SPhilippe Mathieu-Daudé     }
285458e74682SPhilippe Mathieu-Daudé     return flatview_write_continue(fv, addr, attrs, buf, len,
28554c7c8563SJonathan Cameron                                    mr_addr, l, mr);
2856a203ac70SPaolo Bonzini }
2857a203ac70SPaolo Bonzini 
2858e7927d33SJonathan Cameron static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf,
2859e7927d33SJonathan Cameron                                                hwaddr len, hwaddr mr_addr,
2860e7927d33SJonathan Cameron                                                hwaddr *l,
2861e7927d33SJonathan Cameron                                                MemoryRegion *mr)
2862e7927d33SJonathan Cameron {
2863e7927d33SJonathan Cameron     if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) {
2864e7927d33SJonathan Cameron         return MEMTX_ACCESS_ERROR;
2865e7927d33SJonathan Cameron     }
2866e7927d33SJonathan Cameron 
2867e7927d33SJonathan Cameron     if (!memory_access_is_direct(mr, false)) {
2868e7927d33SJonathan Cameron         /* I/O case */
2869e7927d33SJonathan Cameron         uint64_t val;
2870e7927d33SJonathan Cameron         MemTxResult result;
2871e7927d33SJonathan Cameron         bool release_lock = prepare_mmio_access(mr);
2872e7927d33SJonathan Cameron 
2873e7927d33SJonathan Cameron         *l = memory_access_size(mr, *l, mr_addr);
2874e7927d33SJonathan Cameron         result = memory_region_dispatch_read(mr, mr_addr, &val, size_memop(*l),
2875e7927d33SJonathan Cameron                                              attrs);
2876e7927d33SJonathan Cameron 
2877e7927d33SJonathan Cameron         /*
2878e7927d33SJonathan Cameron          * Assure Coverity (and ourselves) that we are not going to OVERRUN
2879e7927d33SJonathan Cameron          * the buffer by following stn_he_p().
2880e7927d33SJonathan Cameron          */
2881e7927d33SJonathan Cameron #ifdef QEMU_STATIC_ANALYSIS
2882e7927d33SJonathan Cameron         assert((*l == 1 && len >= 1) ||
2883e7927d33SJonathan Cameron                (*l == 2 && len >= 2) ||
2884e7927d33SJonathan Cameron                (*l == 4 && len >= 4) ||
2885e7927d33SJonathan Cameron                (*l == 8 && len >= 8));
2886e7927d33SJonathan Cameron #endif
2887e7927d33SJonathan Cameron         stn_he_p(buf, *l, val);
2888e7927d33SJonathan Cameron 
2889e7927d33SJonathan Cameron         if (release_lock) {
2890e7927d33SJonathan Cameron             bql_unlock();
2891e7927d33SJonathan Cameron         }
2892e7927d33SJonathan Cameron         return result;
2893e7927d33SJonathan Cameron     } else {
2894e7927d33SJonathan Cameron         /* RAM case */
2895e7927d33SJonathan Cameron         uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l,
28965a5585f4SEdgar E. Iglesias                                                false, false);
2897e7927d33SJonathan Cameron 
2898e7927d33SJonathan Cameron         memcpy(buf, ram_ptr, *l);
2899e7927d33SJonathan Cameron 
2900e7927d33SJonathan Cameron         return MEMTX_OK;
2901e7927d33SJonathan Cameron     }
2902e7927d33SJonathan Cameron }
2903e7927d33SJonathan Cameron 
2904a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
290516620684SAlexey Kardashevskiy MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2906a152be43SPhilippe Mathieu-Daudé                                    MemTxAttrs attrs, void *ptr,
29074c7c8563SJonathan Cameron                                    hwaddr len, hwaddr mr_addr, hwaddr l,
2908a203ac70SPaolo Bonzini                                    MemoryRegion *mr)
2909a203ac70SPaolo Bonzini {
2910a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2911a152be43SPhilippe Mathieu-Daudé     uint8_t *buf = ptr;
2912eb7eeb88SPaolo Bonzini 
29137cac7feaSAlexander Bulekov     fuzz_dma_read_cb(addr, len, mr);
2914a203ac70SPaolo Bonzini     for (;;) {
2915e7927d33SJonathan Cameron         result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr);
29164840f10eSJan Kiszka 
291713eb76e0Sbellard         len -= l;
291813eb76e0Sbellard         buf += l;
291913eb76e0Sbellard         addr += l;
2920a203ac70SPaolo Bonzini 
2921a203ac70SPaolo Bonzini         if (!len) {
2922a203ac70SPaolo Bonzini             break;
292313eb76e0Sbellard         }
2924a203ac70SPaolo Bonzini 
2925a203ac70SPaolo Bonzini         l = len;
29264c7c8563SJonathan Cameron         mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs);
2927a203ac70SPaolo Bonzini     }
2928a203ac70SPaolo Bonzini 
2929a203ac70SPaolo Bonzini     return result;
2930a203ac70SPaolo Bonzini }
2931a203ac70SPaolo Bonzini 
2932b2a44fcaSPaolo Bonzini /* Called from RCU critical section.  */
2933b2a44fcaSPaolo Bonzini static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
2934a152be43SPhilippe Mathieu-Daudé                                  MemTxAttrs attrs, void *buf, hwaddr len)
2935a203ac70SPaolo Bonzini {
2936a203ac70SPaolo Bonzini     hwaddr l;
29374c7c8563SJonathan Cameron     hwaddr mr_addr;
2938a203ac70SPaolo Bonzini     MemoryRegion *mr;
2939a203ac70SPaolo Bonzini 
2940a203ac70SPaolo Bonzini     l = len;
29414c7c8563SJonathan Cameron     mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs);
29423ab6fdc9SPhilippe Mathieu-Daudé     if (!flatview_access_allowed(mr, attrs, addr, len)) {
29433ab6fdc9SPhilippe Mathieu-Daudé         return MEMTX_ACCESS_ERROR;
29443ab6fdc9SPhilippe Mathieu-Daudé     }
2945b2a44fcaSPaolo Bonzini     return flatview_read_continue(fv, addr, attrs, buf, len,
29464c7c8563SJonathan Cameron                                   mr_addr, l, mr);
294713eb76e0Sbellard }
29488df1cd07Sbellard 
2949b2a44fcaSPaolo Bonzini MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2950daa3dda4SPhilippe Mathieu-Daudé                                     MemTxAttrs attrs, void *buf, hwaddr len)
2951b2a44fcaSPaolo Bonzini {
2952b2a44fcaSPaolo Bonzini     MemTxResult result = MEMTX_OK;
2953b2a44fcaSPaolo Bonzini     FlatView *fv;
2954b2a44fcaSPaolo Bonzini 
2955b2a44fcaSPaolo Bonzini     if (len > 0) {
2956694ea274SDr. David Alan Gilbert         RCU_READ_LOCK_GUARD();
2957b2a44fcaSPaolo Bonzini         fv = address_space_to_flatview(as);
2958b2a44fcaSPaolo Bonzini         result = flatview_read(fv, addr, attrs, buf, len);
2959b2a44fcaSPaolo Bonzini     }
2960b2a44fcaSPaolo Bonzini 
2961b2a44fcaSPaolo Bonzini     return result;
2962b2a44fcaSPaolo Bonzini }
2963b2a44fcaSPaolo Bonzini 
29644c6ebbb3SPaolo Bonzini MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
29654c6ebbb3SPaolo Bonzini                                 MemTxAttrs attrs,
2966daa3dda4SPhilippe Mathieu-Daudé                                 const void *buf, hwaddr len)
29674c6ebbb3SPaolo Bonzini {
29684c6ebbb3SPaolo Bonzini     MemTxResult result = MEMTX_OK;
29694c6ebbb3SPaolo Bonzini     FlatView *fv;
29704c6ebbb3SPaolo Bonzini 
29714c6ebbb3SPaolo Bonzini     if (len > 0) {
2972694ea274SDr. David Alan Gilbert         RCU_READ_LOCK_GUARD();
29734c6ebbb3SPaolo Bonzini         fv = address_space_to_flatview(as);
29744c6ebbb3SPaolo Bonzini         result = flatview_write(fv, addr, attrs, buf, len);
29754c6ebbb3SPaolo Bonzini     }
29764c6ebbb3SPaolo Bonzini 
29774c6ebbb3SPaolo Bonzini     return result;
29784c6ebbb3SPaolo Bonzini }
29794c6ebbb3SPaolo Bonzini 
2980db84fd97SPaolo Bonzini MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2981daa3dda4SPhilippe Mathieu-Daudé                              void *buf, hwaddr len, bool is_write)
2982db84fd97SPaolo Bonzini {
2983db84fd97SPaolo Bonzini     if (is_write) {
2984db84fd97SPaolo Bonzini         return address_space_write(as, addr, attrs, buf, len);
2985db84fd97SPaolo Bonzini     } else {
2986db84fd97SPaolo Bonzini         return address_space_read_full(as, addr, attrs, buf, len);
2987db84fd97SPaolo Bonzini     }
2988db84fd97SPaolo Bonzini }
2989db84fd97SPaolo Bonzini 
299075f01c68SPhilippe Mathieu-Daudé MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
299175f01c68SPhilippe Mathieu-Daudé                               uint8_t c, hwaddr len, MemTxAttrs attrs)
299275f01c68SPhilippe Mathieu-Daudé {
299375f01c68SPhilippe Mathieu-Daudé #define FILLBUF_SIZE 512
299475f01c68SPhilippe Mathieu-Daudé     uint8_t fillbuf[FILLBUF_SIZE];
299575f01c68SPhilippe Mathieu-Daudé     int l;
299675f01c68SPhilippe Mathieu-Daudé     MemTxResult error = MEMTX_OK;
299775f01c68SPhilippe Mathieu-Daudé 
299875f01c68SPhilippe Mathieu-Daudé     memset(fillbuf, c, FILLBUF_SIZE);
299975f01c68SPhilippe Mathieu-Daudé     while (len > 0) {
300075f01c68SPhilippe Mathieu-Daudé         l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
300175f01c68SPhilippe Mathieu-Daudé         error |= address_space_write(as, addr, attrs, fillbuf, l);
300275f01c68SPhilippe Mathieu-Daudé         len -= l;
300375f01c68SPhilippe Mathieu-Daudé         addr += l;
300475f01c68SPhilippe Mathieu-Daudé     }
300575f01c68SPhilippe Mathieu-Daudé 
300675f01c68SPhilippe Mathieu-Daudé     return error;
300775f01c68SPhilippe Mathieu-Daudé }
300875f01c68SPhilippe Mathieu-Daudé 
3009d7ef71efSPhilippe Mathieu-Daudé void cpu_physical_memory_rw(hwaddr addr, void *buf,
301028c80bfeSPhilippe Mathieu-Daudé                             hwaddr len, bool is_write)
3011ac1970fbSAvi Kivity {
30125c9eb028SPeter Maydell     address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
30135c9eb028SPeter Maydell                      buf, len, is_write);
3014ac1970fbSAvi Kivity }
3015ac1970fbSAvi Kivity 
3016582b55a9SAlexander Graf enum write_rom_type {
3017582b55a9SAlexander Graf     WRITE_DATA,
3018582b55a9SAlexander Graf     FLUSH_CACHE,
3019582b55a9SAlexander Graf };
3020582b55a9SAlexander Graf 
302175693e14SPeter Maydell static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
302275693e14SPeter Maydell                                                            hwaddr addr,
302375693e14SPeter Maydell                                                            MemTxAttrs attrs,
3024daa3dda4SPhilippe Mathieu-Daudé                                                            const void *ptr,
30250c249ff7SLi Zhijian                                                            hwaddr len,
302675693e14SPeter Maydell                                                            enum write_rom_type type)
3027d0ecd2aaSbellard {
3028149f54b5SPaolo Bonzini     hwaddr l;
302920804676SPhilippe Mathieu-Daudé     uint8_t *ram_ptr;
3030149f54b5SPaolo Bonzini     hwaddr addr1;
30315c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3032daa3dda4SPhilippe Mathieu-Daudé     const uint8_t *buf = ptr;
3033d0ecd2aaSbellard 
3034694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
3035d0ecd2aaSbellard     while (len > 0) {
3036d0ecd2aaSbellard         l = len;
303775693e14SPeter Maydell         mr = address_space_translate(as, addr, &addr1, &l, true, attrs);
3038d0ecd2aaSbellard 
30395c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
30405c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
3041b242e0e0SPaolo Bonzini             l = memory_access_size(mr, l, addr1);
3042d0ecd2aaSbellard         } else {
3043d0ecd2aaSbellard             /* ROM/RAM case */
304420804676SPhilippe Mathieu-Daudé             ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3045582b55a9SAlexander Graf             switch (type) {
3046582b55a9SAlexander Graf             case WRITE_DATA:
304720804676SPhilippe Mathieu-Daudé                 memcpy(ram_ptr, buf, l);
3048845b6214SPaolo Bonzini                 invalidate_and_set_dirty(mr, addr1, l);
3049582b55a9SAlexander Graf                 break;
3050582b55a9SAlexander Graf             case FLUSH_CACHE:
30511da8de39SRichard Henderson                 flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l);
3052582b55a9SAlexander Graf                 break;
3053582b55a9SAlexander Graf             }
3054d0ecd2aaSbellard         }
3055d0ecd2aaSbellard         len -= l;
3056d0ecd2aaSbellard         buf += l;
3057d0ecd2aaSbellard         addr += l;
3058d0ecd2aaSbellard     }
305975693e14SPeter Maydell     return MEMTX_OK;
3060d0ecd2aaSbellard }
3061d0ecd2aaSbellard 
3062582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
30633c8133f9SPeter Maydell MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
30643c8133f9SPeter Maydell                                     MemTxAttrs attrs,
3065daa3dda4SPhilippe Mathieu-Daudé                                     const void *buf, hwaddr len)
3066582b55a9SAlexander Graf {
30673c8133f9SPeter Maydell     return address_space_write_rom_internal(as, addr, attrs,
306875693e14SPeter Maydell                                             buf, len, WRITE_DATA);
3069582b55a9SAlexander Graf }
3070582b55a9SAlexander Graf 
30710c249ff7SLi Zhijian void cpu_flush_icache_range(hwaddr start, hwaddr len)
3072582b55a9SAlexander Graf {
3073582b55a9SAlexander Graf     /*
3074582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
3075582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
3076582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
3077582b55a9SAlexander Graf      * the host's instruction cache at least.
3078582b55a9SAlexander Graf      */
3079582b55a9SAlexander Graf     if (tcg_enabled()) {
3080582b55a9SAlexander Graf         return;
3081582b55a9SAlexander Graf     }
3082582b55a9SAlexander Graf 
308375693e14SPeter Maydell     address_space_write_rom_internal(&address_space_memory,
308475693e14SPeter Maydell                                      start, MEMTXATTRS_UNSPECIFIED,
308575693e14SPeter Maydell                                      NULL, len, FLUSH_CACHE);
3086582b55a9SAlexander Graf }
3087582b55a9SAlexander Graf 
308869e78f1bSMattias Nissler static void
308969e78f1bSMattias Nissler address_space_unregister_map_client_do(AddressSpaceMapClient *client)
3090ba223c29Saliguori {
309172cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
30927267c094SAnthony Liguori     g_free(client);
3093ba223c29Saliguori }
3094ba223c29Saliguori 
30955c627197SMattias Nissler static void address_space_notify_map_clients_locked(AddressSpace *as)
3096ba223c29Saliguori {
309769e78f1bSMattias Nissler     AddressSpaceMapClient *client;
3098ba223c29Saliguori 
309969e78f1bSMattias Nissler     while (!QLIST_EMPTY(&as->map_client_list)) {
310069e78f1bSMattias Nissler         client = QLIST_FIRST(&as->map_client_list);
3101e95205e1SFam Zheng         qemu_bh_schedule(client->bh);
31025c627197SMattias Nissler         address_space_unregister_map_client_do(client);
3103ba223c29Saliguori     }
3104ba223c29Saliguori }
3105ba223c29Saliguori 
31065c627197SMattias Nissler void address_space_register_map_client(AddressSpace *as, QEMUBH *bh)
3107d0ecd2aaSbellard {
310869e78f1bSMattias Nissler     AddressSpaceMapClient *client = g_malloc(sizeof(*client));
3109d0ecd2aaSbellard 
311069e78f1bSMattias Nissler     QEMU_LOCK_GUARD(&as->map_client_list_lock);
3111e95205e1SFam Zheng     client->bh = bh;
311269e78f1bSMattias Nissler     QLIST_INSERT_HEAD(&as->map_client_list, client, link);
311333828ca1SPaolo Bonzini     /* Write map_client_list before reading in_use.  */
311433828ca1SPaolo Bonzini     smp_mb();
311569e78f1bSMattias Nissler     if (!qatomic_read(&as->bounce.in_use)) {
31165c627197SMattias Nissler         address_space_notify_map_clients_locked(as);
311733b6c2edSFam Zheng     }
3118d0ecd2aaSbellard }
3119d0ecd2aaSbellard 
312038e047b5SFam Zheng void cpu_exec_init_all(void)
312138e047b5SFam Zheng {
312238e047b5SFam Zheng     qemu_mutex_init(&ram_list.mutex);
312320bccb82SPeter Maydell     /* The data structures we set up here depend on knowing the page size,
312420bccb82SPeter Maydell      * so no more changes can be made after this point.
312520bccb82SPeter Maydell      * In an ideal world, nothing we did before we had finished the
312620bccb82SPeter Maydell      * machine setup would care about the target page size, and we could
312720bccb82SPeter Maydell      * do this much later, rather than requiring board models to state
312820bccb82SPeter Maydell      * up front what their requirements are.
312920bccb82SPeter Maydell      */
313020bccb82SPeter Maydell     finalize_target_page_bits();
313138e047b5SFam Zheng     io_mem_init();
3132680a4783SPaolo Bonzini     memory_map_init();
313338e047b5SFam Zheng }
313438e047b5SFam Zheng 
31355c627197SMattias Nissler void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh)
3136d0ecd2aaSbellard {
313769e78f1bSMattias Nissler     AddressSpaceMapClient *client;
3138d0ecd2aaSbellard 
313969e78f1bSMattias Nissler     QEMU_LOCK_GUARD(&as->map_client_list_lock);
314069e78f1bSMattias Nissler     QLIST_FOREACH(client, &as->map_client_list, link) {
3141e95205e1SFam Zheng         if (client->bh == bh) {
31425c627197SMattias Nissler             address_space_unregister_map_client_do(client);
3143e95205e1SFam Zheng             break;
3144e95205e1SFam Zheng         }
3145e95205e1SFam Zheng     }
3146d0ecd2aaSbellard }
3147d0ecd2aaSbellard 
31485c627197SMattias Nissler static void address_space_notify_map_clients(AddressSpace *as)
3149d0ecd2aaSbellard {
315069e78f1bSMattias Nissler     QEMU_LOCK_GUARD(&as->map_client_list_lock);
31515c627197SMattias Nissler     address_space_notify_map_clients_locked(as);
31526d16c2f8Saliguori }
31536d16c2f8Saliguori 
31540c249ff7SLi Zhijian static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
3155eace72b7SPeter Maydell                                   bool is_write, MemTxAttrs attrs)
315651644ab7SPaolo Bonzini {
31575c8a00ceSPaolo Bonzini     MemoryRegion *mr;
315851644ab7SPaolo Bonzini     hwaddr l, xlat;
315951644ab7SPaolo Bonzini 
316051644ab7SPaolo Bonzini     while (len > 0) {
316151644ab7SPaolo Bonzini         l = len;
3162efa99a2fSPeter Maydell         mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
31635c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
31645c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
3165eace72b7SPeter Maydell             if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) {
316651644ab7SPaolo Bonzini                 return false;
316751644ab7SPaolo Bonzini             }
316851644ab7SPaolo Bonzini         }
316951644ab7SPaolo Bonzini 
317051644ab7SPaolo Bonzini         len -= l;
317151644ab7SPaolo Bonzini         addr += l;
317251644ab7SPaolo Bonzini     }
317351644ab7SPaolo Bonzini     return true;
317451644ab7SPaolo Bonzini }
317551644ab7SPaolo Bonzini 
317616620684SAlexey Kardashevskiy bool address_space_access_valid(AddressSpace *as, hwaddr addr,
31770c249ff7SLi Zhijian                                 hwaddr len, bool is_write,
3178fddffa42SPeter Maydell                                 MemTxAttrs attrs)
317916620684SAlexey Kardashevskiy {
318011e732a5SPaolo Bonzini     FlatView *fv;
318111e732a5SPaolo Bonzini 
3182694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
318311e732a5SPaolo Bonzini     fv = address_space_to_flatview(as);
318458e74682SPhilippe Mathieu-Daudé     return flatview_access_valid(fv, addr, len, is_write, attrs);
318516620684SAlexey Kardashevskiy }
318616620684SAlexey Kardashevskiy 
3187715c31ecSPaolo Bonzini static hwaddr
318816620684SAlexey Kardashevskiy flatview_extend_translation(FlatView *fv, hwaddr addr,
318916620684SAlexey Kardashevskiy                             hwaddr target_len,
3190715c31ecSPaolo Bonzini                             MemoryRegion *mr, hwaddr base, hwaddr len,
319153d0790dSPeter Maydell                             bool is_write, MemTxAttrs attrs)
3192715c31ecSPaolo Bonzini {
3193715c31ecSPaolo Bonzini     hwaddr done = 0;
3194715c31ecSPaolo Bonzini     hwaddr xlat;
3195715c31ecSPaolo Bonzini     MemoryRegion *this_mr;
3196715c31ecSPaolo Bonzini 
3197715c31ecSPaolo Bonzini     for (;;) {
3198715c31ecSPaolo Bonzini         target_len -= len;
3199715c31ecSPaolo Bonzini         addr += len;
3200715c31ecSPaolo Bonzini         done += len;
3201715c31ecSPaolo Bonzini         if (target_len == 0) {
3202715c31ecSPaolo Bonzini             return done;
3203715c31ecSPaolo Bonzini         }
3204715c31ecSPaolo Bonzini 
3205715c31ecSPaolo Bonzini         len = target_len;
320616620684SAlexey Kardashevskiy         this_mr = flatview_translate(fv, addr, &xlat,
3207efa99a2fSPeter Maydell                                      &len, is_write, attrs);
3208715c31ecSPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
3209715c31ecSPaolo Bonzini             return done;
3210715c31ecSPaolo Bonzini         }
3211715c31ecSPaolo Bonzini     }
3212715c31ecSPaolo Bonzini }
3213715c31ecSPaolo Bonzini 
32146d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
32156d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
32166d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
32176d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
32185c627197SMattias Nissler  * Use address_space_register_map_client() to know when retrying the map
32195c627197SMattias Nissler  * operation is likely to succeed.
32206d16c2f8Saliguori  */
3221ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
3222a8170e5eSAvi Kivity                         hwaddr addr,
3223a8170e5eSAvi Kivity                         hwaddr *plen,
3224f26404fbSPeter Maydell                         bool is_write,
3225f26404fbSPeter Maydell                         MemTxAttrs attrs)
32266d16c2f8Saliguori {
3227a8170e5eSAvi Kivity     hwaddr len = *plen;
3228715c31ecSPaolo Bonzini     hwaddr l, xlat;
3229715c31ecSPaolo Bonzini     MemoryRegion *mr;
3230ad0c60faSPaolo Bonzini     FlatView *fv;
32316d16c2f8Saliguori 
3232d44fe13bSAlex Bennée     trace_address_space_map(as, addr, len, is_write, *(uint32_t *) &attrs);
3233d44fe13bSAlex Bennée 
3234e3127ae0SPaolo Bonzini     if (len == 0) {
3235e3127ae0SPaolo Bonzini         return NULL;
3236e3127ae0SPaolo Bonzini     }
3237e3127ae0SPaolo Bonzini 
32386d16c2f8Saliguori     l = len;
3239694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
3240ad0c60faSPaolo Bonzini     fv = address_space_to_flatview(as);
3241efa99a2fSPeter Maydell     mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
324241063e1eSPaolo Bonzini 
32435c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
324469e78f1bSMattias Nissler         if (qatomic_xchg(&as->bounce.in_use, true)) {
324577f55eacSPrasad J Pandit             *plen = 0;
3246e3127ae0SPaolo Bonzini             return NULL;
32476d16c2f8Saliguori         }
3248e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
3249e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
325069e78f1bSMattias Nissler         as->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
325169e78f1bSMattias Nissler         as->bounce.addr = addr;
325269e78f1bSMattias Nissler         as->bounce.len = l;
3253d3e71559SPaolo Bonzini 
3254d3e71559SPaolo Bonzini         memory_region_ref(mr);
325569e78f1bSMattias Nissler         as->bounce.mr = mr;
32566d16c2f8Saliguori         if (!is_write) {
325716620684SAlexey Kardashevskiy             flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
325869e78f1bSMattias Nissler                           as->bounce.buffer, l);
32596d16c2f8Saliguori         }
326038bee5dcSStefano Stabellini 
326138bee5dcSStefano Stabellini         *plen = l;
326269e78f1bSMattias Nissler         return as->bounce.buffer;
32636d16c2f8Saliguori     }
3264e3127ae0SPaolo Bonzini 
32656d16c2f8Saliguori 
3266d3e71559SPaolo Bonzini     memory_region_ref(mr);
326716620684SAlexey Kardashevskiy     *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
326853d0790dSPeter Maydell                                         l, is_write, attrs);
3269fc1c8344SAlexander Bulekov     fuzz_dma_read_cb(addr, *plen, mr);
32705a5585f4SEdgar E. Iglesias     return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true, is_write);
32716d16c2f8Saliguori }
32726d16c2f8Saliguori 
3273ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
3274ae5883abSPhilippe Mathieu-Daudé  * Will also mark the memory as dirty if is_write is true.  access_len gives
32756d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
32766d16c2f8Saliguori  */
3277a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3278ae5883abSPhilippe Mathieu-Daudé                          bool is_write, hwaddr access_len)
32796d16c2f8Saliguori {
328069e78f1bSMattias Nissler     if (buffer != as->bounce.buffer) {
3281d3e71559SPaolo Bonzini         MemoryRegion *mr;
32827443b437SPaolo Bonzini         ram_addr_t addr1;
3283d3e71559SPaolo Bonzini 
328407bdaa41SPaolo Bonzini         mr = memory_region_from_host(buffer, &addr1);
32851b5ec234SPaolo Bonzini         assert(mr != NULL);
3286d3e71559SPaolo Bonzini         if (is_write) {
3287845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, access_len);
32886d16c2f8Saliguori         }
3289868bb33fSJan Kiszka         if (xen_enabled()) {
3290e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
3291050a0ddfSAnthony PERARD         }
3292d3e71559SPaolo Bonzini         memory_region_unref(mr);
32936d16c2f8Saliguori         return;
32946d16c2f8Saliguori     }
32956d16c2f8Saliguori     if (is_write) {
329669e78f1bSMattias Nissler         address_space_write(as, as->bounce.addr, MEMTXATTRS_UNSPECIFIED,
329769e78f1bSMattias Nissler                             as->bounce.buffer, access_len);
32986d16c2f8Saliguori     }
329969e78f1bSMattias Nissler     qemu_vfree(as->bounce.buffer);
330069e78f1bSMattias Nissler     as->bounce.buffer = NULL;
330169e78f1bSMattias Nissler     memory_region_unref(as->bounce.mr);
330233828ca1SPaolo Bonzini     /* Clear in_use before reading map_client_list.  */
330369e78f1bSMattias Nissler     qatomic_set_mb(&as->bounce.in_use, false);
33045c627197SMattias Nissler     address_space_notify_map_clients(as);
33056d16c2f8Saliguori }
3306d0ecd2aaSbellard 
3307a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
3308a8170e5eSAvi Kivity                               hwaddr *plen,
330928c80bfeSPhilippe Mathieu-Daudé                               bool is_write)
3310ac1970fbSAvi Kivity {
3311f26404fbSPeter Maydell     return address_space_map(&address_space_memory, addr, plen, is_write,
3312f26404fbSPeter Maydell                              MEMTXATTRS_UNSPECIFIED);
3313ac1970fbSAvi Kivity }
3314ac1970fbSAvi Kivity 
3315a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
331628c80bfeSPhilippe Mathieu-Daudé                                bool is_write, hwaddr access_len)
3317ac1970fbSAvi Kivity {
3318ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3319ac1970fbSAvi Kivity }
3320ac1970fbSAvi Kivity 
33210ce265ffSPaolo Bonzini #define ARG1_DECL                AddressSpace *as
33220ce265ffSPaolo Bonzini #define ARG1                     as
33230ce265ffSPaolo Bonzini #define SUFFIX
33240ce265ffSPaolo Bonzini #define TRANSLATE(...)           address_space_translate(as, __VA_ARGS__)
33250ce265ffSPaolo Bonzini #define RCU_READ_LOCK(...)       rcu_read_lock()
33260ce265ffSPaolo Bonzini #define RCU_READ_UNLOCK(...)     rcu_read_unlock()
3327139c1837SPaolo Bonzini #include "memory_ldst.c.inc"
33281e78bcc1SAlexander Graf 
33291f4e496eSPaolo Bonzini int64_t address_space_cache_init(MemoryRegionCache *cache,
33301f4e496eSPaolo Bonzini                                  AddressSpace *as,
33311f4e496eSPaolo Bonzini                                  hwaddr addr,
33321f4e496eSPaolo Bonzini                                  hwaddr len,
33331f4e496eSPaolo Bonzini                                  bool is_write)
33341f4e496eSPaolo Bonzini {
333548564041SPaolo Bonzini     AddressSpaceDispatch *d;
333648564041SPaolo Bonzini     hwaddr l;
333748564041SPaolo Bonzini     MemoryRegion *mr;
33384bfb024bSPaolo Bonzini     Int128 diff;
333948564041SPaolo Bonzini 
334048564041SPaolo Bonzini     assert(len > 0);
334148564041SPaolo Bonzini 
334248564041SPaolo Bonzini     l = len;
334348564041SPaolo Bonzini     cache->fv = address_space_get_flatview(as);
334448564041SPaolo Bonzini     d = flatview_to_dispatch(cache->fv);
334548564041SPaolo Bonzini     cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true);
334648564041SPaolo Bonzini 
33474bfb024bSPaolo Bonzini     /*
33484bfb024bSPaolo Bonzini      * cache->xlat is now relative to cache->mrs.mr, not to the section itself.
33494bfb024bSPaolo Bonzini      * Take that into account to compute how many bytes are there between
33504bfb024bSPaolo Bonzini      * cache->xlat and the end of the section.
33514bfb024bSPaolo Bonzini      */
33524bfb024bSPaolo Bonzini     diff = int128_sub(cache->mrs.size,
33534bfb024bSPaolo Bonzini                       int128_make64(cache->xlat - cache->mrs.offset_within_region));
33544bfb024bSPaolo Bonzini     l = int128_get64(int128_min(diff, int128_make64(l)));
33554bfb024bSPaolo Bonzini 
335648564041SPaolo Bonzini     mr = cache->mrs.mr;
335748564041SPaolo Bonzini     memory_region_ref(mr);
335848564041SPaolo Bonzini     if (memory_access_is_direct(mr, is_write)) {
335953d0790dSPeter Maydell         /* We don't care about the memory attributes here as we're only
336053d0790dSPeter Maydell          * doing this if we found actual RAM, which behaves the same
336153d0790dSPeter Maydell          * regardless of attributes; so UNSPECIFIED is fine.
336253d0790dSPeter Maydell          */
336348564041SPaolo Bonzini         l = flatview_extend_translation(cache->fv, addr, len, mr,
336453d0790dSPeter Maydell                                         cache->xlat, l, is_write,
336553d0790dSPeter Maydell                                         MEMTXATTRS_UNSPECIFIED);
33665a5585f4SEdgar E. Iglesias         cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true,
33675a5585f4SEdgar E. Iglesias                                          is_write);
336848564041SPaolo Bonzini     } else {
336948564041SPaolo Bonzini         cache->ptr = NULL;
337048564041SPaolo Bonzini     }
337148564041SPaolo Bonzini 
337248564041SPaolo Bonzini     cache->len = l;
337348564041SPaolo Bonzini     cache->is_write = is_write;
337448564041SPaolo Bonzini     return l;
33751f4e496eSPaolo Bonzini }
33761f4e496eSPaolo Bonzini 
33771f4e496eSPaolo Bonzini void address_space_cache_invalidate(MemoryRegionCache *cache,
33781f4e496eSPaolo Bonzini                                     hwaddr addr,
33791f4e496eSPaolo Bonzini                                     hwaddr access_len)
33801f4e496eSPaolo Bonzini {
338148564041SPaolo Bonzini     assert(cache->is_write);
338248564041SPaolo Bonzini     if (likely(cache->ptr)) {
338348564041SPaolo Bonzini         invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len);
338448564041SPaolo Bonzini     }
33851f4e496eSPaolo Bonzini }
33861f4e496eSPaolo Bonzini 
33871f4e496eSPaolo Bonzini void address_space_cache_destroy(MemoryRegionCache *cache)
33881f4e496eSPaolo Bonzini {
338948564041SPaolo Bonzini     if (!cache->mrs.mr) {
339048564041SPaolo Bonzini         return;
339148564041SPaolo Bonzini     }
339248564041SPaolo Bonzini 
339348564041SPaolo Bonzini     if (xen_enabled()) {
339448564041SPaolo Bonzini         xen_invalidate_map_cache_entry(cache->ptr);
339548564041SPaolo Bonzini     }
339648564041SPaolo Bonzini     memory_region_unref(cache->mrs.mr);
339748564041SPaolo Bonzini     flatview_unref(cache->fv);
339848564041SPaolo Bonzini     cache->mrs.mr = NULL;
339948564041SPaolo Bonzini     cache->fv = NULL;
340048564041SPaolo Bonzini }
340148564041SPaolo Bonzini 
340248564041SPaolo Bonzini /* Called from RCU critical section.  This function has the same
340348564041SPaolo Bonzini  * semantics as address_space_translate, but it only works on a
340448564041SPaolo Bonzini  * predefined range of a MemoryRegion that was mapped with
340548564041SPaolo Bonzini  * address_space_cache_init.
340648564041SPaolo Bonzini  */
340748564041SPaolo Bonzini static inline MemoryRegion *address_space_translate_cached(
340848564041SPaolo Bonzini     MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat,
3409bc6b1cecSPeter Maydell     hwaddr *plen, bool is_write, MemTxAttrs attrs)
341048564041SPaolo Bonzini {
341148564041SPaolo Bonzini     MemoryRegionSection section;
341248564041SPaolo Bonzini     MemoryRegion *mr;
341348564041SPaolo Bonzini     IOMMUMemoryRegion *iommu_mr;
341448564041SPaolo Bonzini     AddressSpace *target_as;
341548564041SPaolo Bonzini 
341648564041SPaolo Bonzini     assert(!cache->ptr);
341748564041SPaolo Bonzini     *xlat = addr + cache->xlat;
341848564041SPaolo Bonzini 
341948564041SPaolo Bonzini     mr = cache->mrs.mr;
342048564041SPaolo Bonzini     iommu_mr = memory_region_get_iommu(mr);
342148564041SPaolo Bonzini     if (!iommu_mr) {
342248564041SPaolo Bonzini         /* MMIO region.  */
342348564041SPaolo Bonzini         return mr;
342448564041SPaolo Bonzini     }
342548564041SPaolo Bonzini 
342648564041SPaolo Bonzini     section = address_space_translate_iommu(iommu_mr, xlat, plen,
342748564041SPaolo Bonzini                                             NULL, is_write, true,
34282f7b009cSPeter Maydell                                             &target_as, attrs);
342948564041SPaolo Bonzini     return section.mr;
343048564041SPaolo Bonzini }
343148564041SPaolo Bonzini 
343247293c92SJonathan Cameron /* Called within RCU critical section.  */
343347293c92SJonathan Cameron static MemTxResult address_space_write_continue_cached(MemTxAttrs attrs,
343447293c92SJonathan Cameron                                                        const void *ptr,
343547293c92SJonathan Cameron                                                        hwaddr len,
343647293c92SJonathan Cameron                                                        hwaddr mr_addr,
343747293c92SJonathan Cameron                                                        hwaddr l,
343847293c92SJonathan Cameron                                                        MemoryRegion *mr)
343947293c92SJonathan Cameron {
344047293c92SJonathan Cameron     MemTxResult result = MEMTX_OK;
344147293c92SJonathan Cameron     const uint8_t *buf = ptr;
344247293c92SJonathan Cameron 
344347293c92SJonathan Cameron     for (;;) {
344447293c92SJonathan Cameron         result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l,
344547293c92SJonathan Cameron                                                mr);
344647293c92SJonathan Cameron 
344747293c92SJonathan Cameron         len -= l;
344847293c92SJonathan Cameron         buf += l;
344947293c92SJonathan Cameron         mr_addr += l;
345047293c92SJonathan Cameron 
345147293c92SJonathan Cameron         if (!len) {
345247293c92SJonathan Cameron             break;
345347293c92SJonathan Cameron         }
345447293c92SJonathan Cameron 
345547293c92SJonathan Cameron         l = len;
345647293c92SJonathan Cameron     }
345747293c92SJonathan Cameron 
345847293c92SJonathan Cameron     return result;
345947293c92SJonathan Cameron }
346047293c92SJonathan Cameron 
346147293c92SJonathan Cameron /* Called within RCU critical section.  */
346247293c92SJonathan Cameron static MemTxResult address_space_read_continue_cached(MemTxAttrs attrs,
346347293c92SJonathan Cameron                                                       void *ptr, hwaddr len,
346447293c92SJonathan Cameron                                                       hwaddr mr_addr, hwaddr l,
346547293c92SJonathan Cameron                                                       MemoryRegion *mr)
346647293c92SJonathan Cameron {
346747293c92SJonathan Cameron     MemTxResult result = MEMTX_OK;
346847293c92SJonathan Cameron     uint8_t *buf = ptr;
346947293c92SJonathan Cameron 
347047293c92SJonathan Cameron     for (;;) {
347147293c92SJonathan Cameron         result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr);
347247293c92SJonathan Cameron         len -= l;
347347293c92SJonathan Cameron         buf += l;
347447293c92SJonathan Cameron         mr_addr += l;
347547293c92SJonathan Cameron 
347647293c92SJonathan Cameron         if (!len) {
347747293c92SJonathan Cameron             break;
347847293c92SJonathan Cameron         }
347947293c92SJonathan Cameron         l = len;
348047293c92SJonathan Cameron     }
348147293c92SJonathan Cameron 
348247293c92SJonathan Cameron     return result;
348347293c92SJonathan Cameron }
348447293c92SJonathan Cameron 
348548564041SPaolo Bonzini /* Called from RCU critical section. address_space_read_cached uses this
348648564041SPaolo Bonzini  * out of line function when the target is an MMIO or IOMMU region.
348748564041SPaolo Bonzini  */
348838df19faSPhilippe Mathieu-Daudé MemTxResult
348948564041SPaolo Bonzini address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
34900c249ff7SLi Zhijian                                    void *buf, hwaddr len)
349148564041SPaolo Bonzini {
34924c7c8563SJonathan Cameron     hwaddr mr_addr, l;
349348564041SPaolo Bonzini     MemoryRegion *mr;
349448564041SPaolo Bonzini 
349548564041SPaolo Bonzini     l = len;
34964c7c8563SJonathan Cameron     mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false,
3497bc6b1cecSPeter Maydell                                         MEMTXATTRS_UNSPECIFIED);
349847293c92SJonathan Cameron     return address_space_read_continue_cached(MEMTXATTRS_UNSPECIFIED,
349947293c92SJonathan Cameron                                               buf, len, mr_addr, l, mr);
350048564041SPaolo Bonzini }
350148564041SPaolo Bonzini 
350248564041SPaolo Bonzini /* Called from RCU critical section. address_space_write_cached uses this
350348564041SPaolo Bonzini  * out of line function when the target is an MMIO or IOMMU region.
350448564041SPaolo Bonzini  */
350538df19faSPhilippe Mathieu-Daudé MemTxResult
350648564041SPaolo Bonzini address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
35070c249ff7SLi Zhijian                                     const void *buf, hwaddr len)
350848564041SPaolo Bonzini {
35094c7c8563SJonathan Cameron     hwaddr mr_addr, l;
351048564041SPaolo Bonzini     MemoryRegion *mr;
351148564041SPaolo Bonzini 
351248564041SPaolo Bonzini     l = len;
35134c7c8563SJonathan Cameron     mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true,
3514bc6b1cecSPeter Maydell                                         MEMTXATTRS_UNSPECIFIED);
351547293c92SJonathan Cameron     return address_space_write_continue_cached(MEMTXATTRS_UNSPECIFIED,
351647293c92SJonathan Cameron                                                buf, len, mr_addr, l, mr);
35171f4e496eSPaolo Bonzini }
35181f4e496eSPaolo Bonzini 
35191f4e496eSPaolo Bonzini #define ARG1_DECL                MemoryRegionCache *cache
35201f4e496eSPaolo Bonzini #define ARG1                     cache
352148564041SPaolo Bonzini #define SUFFIX                   _cached_slow
352248564041SPaolo Bonzini #define TRANSLATE(...)           address_space_translate_cached(cache, __VA_ARGS__)
352348564041SPaolo Bonzini #define RCU_READ_LOCK()          ((void)0)
352448564041SPaolo Bonzini #define RCU_READ_UNLOCK()        ((void)0)
3525139c1837SPaolo Bonzini #include "memory_ldst.c.inc"
35261f4e496eSPaolo Bonzini 
35275e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
352873842ef0SPhilippe Mathieu-Daudé int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
352973842ef0SPhilippe Mathieu-Daudé                         void *ptr, size_t len, bool is_write)
353013eb76e0Sbellard {
3531a8170e5eSAvi Kivity     hwaddr phys_addr;
353273842ef0SPhilippe Mathieu-Daudé     vaddr l, page;
3533d7ef71efSPhilippe Mathieu-Daudé     uint8_t *buf = ptr;
353413eb76e0Sbellard 
353579ca7a1bSChristian Borntraeger     cpu_synchronize_state(cpu);
353613eb76e0Sbellard     while (len > 0) {
35375232e4c7SPeter Maydell         int asidx;
35385232e4c7SPeter Maydell         MemTxAttrs attrs;
3539ddfc8b96SPhilippe Mathieu-Daudé         MemTxResult res;
35405232e4c7SPeter Maydell 
354113eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
35425232e4c7SPeter Maydell         phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
35435232e4c7SPeter Maydell         asidx = cpu_asidx_from_attrs(cpu, attrs);
354413eb76e0Sbellard         /* if no physical page mapped, return an error */
354513eb76e0Sbellard         if (phys_addr == -1)
354613eb76e0Sbellard             return -1;
354713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
354813eb76e0Sbellard         if (l > len)
354913eb76e0Sbellard             l = len;
35505e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
35512e38847bSEdgar E. Iglesias         if (is_write) {
3552ddfc8b96SPhilippe Mathieu-Daudé             res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
3553ea7a5330SPeter Maydell                                           attrs, buf, l);
35542e38847bSEdgar E. Iglesias         } else {
3555ddfc8b96SPhilippe Mathieu-Daudé             res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr,
3556ddfc8b96SPhilippe Mathieu-Daudé                                      attrs, buf, l);
3557ddfc8b96SPhilippe Mathieu-Daudé         }
3558ddfc8b96SPhilippe Mathieu-Daudé         if (res != MEMTX_OK) {
3559ddfc8b96SPhilippe Mathieu-Daudé             return -1;
35602e38847bSEdgar E. Iglesias         }
356113eb76e0Sbellard         len -= l;
356213eb76e0Sbellard         buf += l;
356313eb76e0Sbellard         addr += l;
356413eb76e0Sbellard     }
356513eb76e0Sbellard     return 0;
356613eb76e0Sbellard }
3567038629a6SDr. David Alan Gilbert 
3568a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
356976f35538SWen Congyang {
35705c8a00ceSPaolo Bonzini     MemoryRegion*mr;
3571149f54b5SPaolo Bonzini     hwaddr l = 1;
357276f35538SWen Congyang 
3573694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
35745c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
3575bc6b1cecSPeter Maydell                                  phys_addr, &phys_addr, &l, false,
3576bc6b1cecSPeter Maydell                                  MEMTXATTRS_UNSPECIFIED);
357776f35538SWen Congyang 
357866997c42SMarkus Armbruster     return !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
357976f35538SWen Congyang }
3580bd2fa51fSMichael R. Hines 
3581e3807054SDr. David Alan Gilbert int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3582bd2fa51fSMichael R. Hines {
3583bd2fa51fSMichael R. Hines     RAMBlock *block;
3584e3807054SDr. David Alan Gilbert     int ret = 0;
3585bd2fa51fSMichael R. Hines 
3586694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
358799e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
3588754cb9c0SYury Kotov         ret = func(block, opaque);
3589e3807054SDr. David Alan Gilbert         if (ret) {
3590e3807054SDr. David Alan Gilbert             break;
3591e3807054SDr. David Alan Gilbert         }
3592bd2fa51fSMichael R. Hines     }
3593e3807054SDr. David Alan Gilbert     return ret;
3594bd2fa51fSMichael R. Hines }
3595d3a5038cSDr. David Alan Gilbert 
3596d3a5038cSDr. David Alan Gilbert /*
3597d3a5038cSDr. David Alan Gilbert  * Unmap pages of memory from start to start+length such that
3598d3a5038cSDr. David Alan Gilbert  * they a) read as 0, b) Trigger whatever fault mechanism
3599d3a5038cSDr. David Alan Gilbert  * the OS provides for postcopy.
3600d3a5038cSDr. David Alan Gilbert  * The pages must be unmapped by the end of the function.
3601d3a5038cSDr. David Alan Gilbert  * Returns: 0 on success, none-0 on failure
3602d3a5038cSDr. David Alan Gilbert  *
3603d3a5038cSDr. David Alan Gilbert  */
3604d3a5038cSDr. David Alan Gilbert int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
3605d3a5038cSDr. David Alan Gilbert {
3606d3a5038cSDr. David Alan Gilbert     int ret = -1;
3607d3a5038cSDr. David Alan Gilbert 
3608d3a5038cSDr. David Alan Gilbert     uint8_t *host_startaddr = rb->host + start;
3609d3a5038cSDr. David Alan Gilbert 
3610619bd31dSMarc-André Lureau     if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) {
3611ea18be78SXiaoyao Li         error_report("%s: Unaligned start address: %p",
3612ea18be78SXiaoyao Li                      __func__, host_startaddr);
3613d3a5038cSDr. David Alan Gilbert         goto err;
3614d3a5038cSDr. David Alan Gilbert     }
3615d3a5038cSDr. David Alan Gilbert 
3616dcdc4607SDavid Hildenbrand     if ((start + length) <= rb->max_length) {
3617db144f70SDr. David Alan Gilbert         bool need_madvise, need_fallocate;
3618619bd31dSMarc-André Lureau         if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
3619ea18be78SXiaoyao Li             error_report("%s: Unaligned length: %zx", __func__, length);
3620d3a5038cSDr. David Alan Gilbert             goto err;
3621d3a5038cSDr. David Alan Gilbert         }
3622d3a5038cSDr. David Alan Gilbert 
3623d3a5038cSDr. David Alan Gilbert         errno = ENOTSUP; /* If we are missing MADVISE etc */
3624d3a5038cSDr. David Alan Gilbert 
3625db144f70SDr. David Alan Gilbert         /* The logic here is messy;
3626db144f70SDr. David Alan Gilbert          *    madvise DONTNEED fails for hugepages
3627db144f70SDr. David Alan Gilbert          *    fallocate works on hugepages and shmem
3628cdfa56c5SDavid Hildenbrand          *    shared anonymous memory requires madvise REMOVE
3629d3a5038cSDr. David Alan Gilbert          */
363080c3aeefSRichard Henderson         need_madvise = (rb->page_size == qemu_real_host_page_size());
3631db144f70SDr. David Alan Gilbert         need_fallocate = rb->fd != -1;
3632db144f70SDr. David Alan Gilbert         if (need_fallocate) {
3633db144f70SDr. David Alan Gilbert             /* For a file, this causes the area of the file to be zero'd
3634db144f70SDr. David Alan Gilbert              * if read, and for hugetlbfs also causes it to be unmapped
3635db144f70SDr. David Alan Gilbert              * so a userfault will trigger.
3636e2fa71f5SDr. David Alan Gilbert              */
3637e2fa71f5SDr. David Alan Gilbert #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
36381d44ff58SDavid Hildenbrand             /*
3639b2cccb52SDavid Hildenbrand              * fallocate() will fail with readonly files. Let's print a
3640b2cccb52SDavid Hildenbrand              * proper error message.
3641b2cccb52SDavid Hildenbrand              */
3642b2cccb52SDavid Hildenbrand             if (rb->flags & RAM_READONLY_FD) {
3643ea18be78SXiaoyao Li                 error_report("%s: Discarding RAM with readonly files is not"
3644ea18be78SXiaoyao Li                              " supported", __func__);
3645b2cccb52SDavid Hildenbrand                 goto err;
3646b2cccb52SDavid Hildenbrand 
3647b2cccb52SDavid Hildenbrand             }
3648b2cccb52SDavid Hildenbrand             /*
36491d44ff58SDavid Hildenbrand              * We'll discard data from the actual file, even though we only
36501d44ff58SDavid Hildenbrand              * have a MAP_PRIVATE mapping, possibly messing with other
36511d44ff58SDavid Hildenbrand              * MAP_PRIVATE/MAP_SHARED mappings. There is no easy way to
36521d44ff58SDavid Hildenbrand              * change that behavior whithout violating the promised
36531d44ff58SDavid Hildenbrand              * semantics of ram_block_discard_range().
36541d44ff58SDavid Hildenbrand              *
36551d44ff58SDavid Hildenbrand              * Only warn, because it works as long as nobody else uses that
36561d44ff58SDavid Hildenbrand              * file.
36571d44ff58SDavid Hildenbrand              */
36581d44ff58SDavid Hildenbrand             if (!qemu_ram_is_shared(rb)) {
3659ea18be78SXiaoyao Li                 warn_report_once("%s: Discarding RAM"
36601d44ff58SDavid Hildenbrand                                  " in private file mappings is possibly"
36611d44ff58SDavid Hildenbrand                                  " dangerous, because it will modify the"
36621d44ff58SDavid Hildenbrand                                  " underlying file and will affect other"
3663ea18be78SXiaoyao Li                                  " users of the file", __func__);
36641d44ff58SDavid Hildenbrand             }
36651d44ff58SDavid Hildenbrand 
3666e2fa71f5SDr. David Alan Gilbert             ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3667e2fa71f5SDr. David Alan Gilbert                             start, length);
3668db144f70SDr. David Alan Gilbert             if (ret) {
3669db144f70SDr. David Alan Gilbert                 ret = -errno;
3670ea18be78SXiaoyao Li                 error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)",
3671ea18be78SXiaoyao Li                              __func__, rb->idstr, start, length, ret);
3672db144f70SDr. David Alan Gilbert                 goto err;
3673db144f70SDr. David Alan Gilbert             }
3674db144f70SDr. David Alan Gilbert #else
3675db144f70SDr. David Alan Gilbert             ret = -ENOSYS;
3676ea18be78SXiaoyao Li             error_report("%s: fallocate not available/file"
3677db144f70SDr. David Alan Gilbert                          "%s:%" PRIx64 " +%zx (%d)",
3678ea18be78SXiaoyao Li                          __func__, rb->idstr, start, length, ret);
3679db144f70SDr. David Alan Gilbert             goto err;
3680e2fa71f5SDr. David Alan Gilbert #endif
3681e2fa71f5SDr. David Alan Gilbert         }
3682db144f70SDr. David Alan Gilbert         if (need_madvise) {
3683db144f70SDr. David Alan Gilbert             /* For normal RAM this causes it to be unmapped,
3684db144f70SDr. David Alan Gilbert              * for shared memory it causes the local mapping to disappear
3685db144f70SDr. David Alan Gilbert              * and to fall back on the file contents (which we just
3686db144f70SDr. David Alan Gilbert              * fallocate'd away).
3687db144f70SDr. David Alan Gilbert              */
3688db144f70SDr. David Alan Gilbert #if defined(CONFIG_MADVISE)
3689cdfa56c5SDavid Hildenbrand             if (qemu_ram_is_shared(rb) && rb->fd < 0) {
3690cdfa56c5SDavid Hildenbrand                 ret = madvise(host_startaddr, length, QEMU_MADV_REMOVE);
3691cdfa56c5SDavid Hildenbrand             } else {
3692cdfa56c5SDavid Hildenbrand                 ret = madvise(host_startaddr, length, QEMU_MADV_DONTNEED);
3693cdfa56c5SDavid Hildenbrand             }
3694d3a5038cSDr. David Alan Gilbert             if (ret) {
3695d3a5038cSDr. David Alan Gilbert                 ret = -errno;
3696ea18be78SXiaoyao Li                 error_report("%s: Failed to discard range "
3697d3a5038cSDr. David Alan Gilbert                              "%s:%" PRIx64 " +%zx (%d)",
3698ea18be78SXiaoyao Li                              __func__, rb->idstr, start, length, ret);
3699db144f70SDr. David Alan Gilbert                 goto err;
3700d3a5038cSDr. David Alan Gilbert             }
3701db144f70SDr. David Alan Gilbert #else
3702db144f70SDr. David Alan Gilbert             ret = -ENOSYS;
3703ea18be78SXiaoyao Li             error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)",
3704ea18be78SXiaoyao Li                          __func__, rb->idstr, start, length, ret);
3705db144f70SDr. David Alan Gilbert             goto err;
3706db144f70SDr. David Alan Gilbert #endif
3707db144f70SDr. David Alan Gilbert         }
3708db144f70SDr. David Alan Gilbert         trace_ram_block_discard_range(rb->idstr, host_startaddr, length,
3709db144f70SDr. David Alan Gilbert                                       need_madvise, need_fallocate, ret);
3710d3a5038cSDr. David Alan Gilbert     } else {
3711ea18be78SXiaoyao Li         error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")",
3712ea18be78SXiaoyao Li                      __func__, rb->idstr, start, length, rb->max_length);
3713d3a5038cSDr. David Alan Gilbert     }
3714d3a5038cSDr. David Alan Gilbert 
3715d3a5038cSDr. David Alan Gilbert err:
3716d3a5038cSDr. David Alan Gilbert     return ret;
3717d3a5038cSDr. David Alan Gilbert }
3718d3a5038cSDr. David Alan Gilbert 
3719b2e9426cSXiaoyao Li int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
3720b2e9426cSXiaoyao Li                                         size_t length)
3721b2e9426cSXiaoyao Li {
3722b2e9426cSXiaoyao Li     int ret = -1;
3723b2e9426cSXiaoyao Li 
3724b2e9426cSXiaoyao Li #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3725b2e9426cSXiaoyao Li     ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3726b2e9426cSXiaoyao Li                     start, length);
3727b2e9426cSXiaoyao Li 
3728b2e9426cSXiaoyao Li     if (ret) {
3729b2e9426cSXiaoyao Li         ret = -errno;
3730b2e9426cSXiaoyao Li         error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)",
3731b2e9426cSXiaoyao Li                      __func__, rb->idstr, start, length, ret);
3732b2e9426cSXiaoyao Li     }
3733b2e9426cSXiaoyao Li #else
3734b2e9426cSXiaoyao Li     ret = -ENOSYS;
3735b2e9426cSXiaoyao Li     error_report("%s: fallocate not available %s:%" PRIx64 " +%zx (%d)",
3736b2e9426cSXiaoyao Li                  __func__, rb->idstr, start, length, ret);
3737b2e9426cSXiaoyao Li #endif
3738b2e9426cSXiaoyao Li 
3739b2e9426cSXiaoyao Li     return ret;
3740b2e9426cSXiaoyao Li }
3741b2e9426cSXiaoyao Li 
3742a4de8552SJunyan He bool ramblock_is_pmem(RAMBlock *rb)
3743a4de8552SJunyan He {
3744a4de8552SJunyan He     return rb->flags & RAM_PMEM;
3745a4de8552SJunyan He }
3746a4de8552SJunyan He 
3747b6b71cb5SMarkus Armbruster static void mtree_print_phys_entries(int start, int end, int skip, int ptr)
37485e8fd947SAlexey Kardashevskiy {
37495e8fd947SAlexey Kardashevskiy     if (start == end - 1) {
3750b6b71cb5SMarkus Armbruster         qemu_printf("\t%3d      ", start);
37515e8fd947SAlexey Kardashevskiy     } else {
3752b6b71cb5SMarkus Armbruster         qemu_printf("\t%3d..%-3d ", start, end - 1);
37535e8fd947SAlexey Kardashevskiy     }
3754b6b71cb5SMarkus Armbruster     qemu_printf(" skip=%d ", skip);
37555e8fd947SAlexey Kardashevskiy     if (ptr == PHYS_MAP_NODE_NIL) {
3756b6b71cb5SMarkus Armbruster         qemu_printf(" ptr=NIL");
37575e8fd947SAlexey Kardashevskiy     } else if (!skip) {
3758b6b71cb5SMarkus Armbruster         qemu_printf(" ptr=#%d", ptr);
37595e8fd947SAlexey Kardashevskiy     } else {
3760b6b71cb5SMarkus Armbruster         qemu_printf(" ptr=[%d]", ptr);
37615e8fd947SAlexey Kardashevskiy     }
3762b6b71cb5SMarkus Armbruster     qemu_printf("\n");
37635e8fd947SAlexey Kardashevskiy }
37645e8fd947SAlexey Kardashevskiy 
37655e8fd947SAlexey Kardashevskiy #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
37665e8fd947SAlexey Kardashevskiy                            int128_sub((size), int128_one())) : 0)
37675e8fd947SAlexey Kardashevskiy 
3768b6b71cb5SMarkus Armbruster void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root)
37695e8fd947SAlexey Kardashevskiy {
37705e8fd947SAlexey Kardashevskiy     int i;
37715e8fd947SAlexey Kardashevskiy 
3772b6b71cb5SMarkus Armbruster     qemu_printf("  Dispatch\n");
3773b6b71cb5SMarkus Armbruster     qemu_printf("    Physical sections\n");
37745e8fd947SAlexey Kardashevskiy 
37755e8fd947SAlexey Kardashevskiy     for (i = 0; i < d->map.sections_nb; ++i) {
37765e8fd947SAlexey Kardashevskiy         MemoryRegionSection *s = d->map.sections + i;
37775e8fd947SAlexey Kardashevskiy         const char *names[] = { " [unassigned]", " [not dirty]",
37785e8fd947SAlexey Kardashevskiy                                 " [ROM]", " [watch]" };
37795e8fd947SAlexey Kardashevskiy 
3780883f2c59SPhilippe Mathieu-Daudé         qemu_printf("      #%d @" HWADDR_FMT_plx ".." HWADDR_FMT_plx
3781b6b71cb5SMarkus Armbruster                     " %s%s%s%s%s",
37825e8fd947SAlexey Kardashevskiy             i,
37835e8fd947SAlexey Kardashevskiy             s->offset_within_address_space,
3784f9c307c3SZhenzhong Duan             s->offset_within_address_space + MR_SIZE(s->size),
37855e8fd947SAlexey Kardashevskiy             s->mr->name ? s->mr->name : "(noname)",
37865e8fd947SAlexey Kardashevskiy             i < ARRAY_SIZE(names) ? names[i] : "",
37875e8fd947SAlexey Kardashevskiy             s->mr == root ? " [ROOT]" : "",
37885e8fd947SAlexey Kardashevskiy             s == d->mru_section ? " [MRU]" : "",
37895e8fd947SAlexey Kardashevskiy             s->mr->is_iommu ? " [iommu]" : "");
37905e8fd947SAlexey Kardashevskiy 
37915e8fd947SAlexey Kardashevskiy         if (s->mr->alias) {
3792b6b71cb5SMarkus Armbruster             qemu_printf(" alias=%s", s->mr->alias->name ?
37935e8fd947SAlexey Kardashevskiy                     s->mr->alias->name : "noname");
37945e8fd947SAlexey Kardashevskiy         }
3795b6b71cb5SMarkus Armbruster         qemu_printf("\n");
37965e8fd947SAlexey Kardashevskiy     }
37975e8fd947SAlexey Kardashevskiy 
3798b6b71cb5SMarkus Armbruster     qemu_printf("    Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
37995e8fd947SAlexey Kardashevskiy                P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip);
38005e8fd947SAlexey Kardashevskiy     for (i = 0; i < d->map.nodes_nb; ++i) {
38015e8fd947SAlexey Kardashevskiy         int j, jprev;
38025e8fd947SAlexey Kardashevskiy         PhysPageEntry prev;
38035e8fd947SAlexey Kardashevskiy         Node *n = d->map.nodes + i;
38045e8fd947SAlexey Kardashevskiy 
3805b6b71cb5SMarkus Armbruster         qemu_printf("      [%d]\n", i);
38065e8fd947SAlexey Kardashevskiy 
38075e8fd947SAlexey Kardashevskiy         for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) {
38085e8fd947SAlexey Kardashevskiy             PhysPageEntry *pe = *n + j;
38095e8fd947SAlexey Kardashevskiy 
38105e8fd947SAlexey Kardashevskiy             if (pe->ptr == prev.ptr && pe->skip == prev.skip) {
38115e8fd947SAlexey Kardashevskiy                 continue;
38125e8fd947SAlexey Kardashevskiy             }
38135e8fd947SAlexey Kardashevskiy 
3814b6b71cb5SMarkus Armbruster             mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr);
38155e8fd947SAlexey Kardashevskiy 
38165e8fd947SAlexey Kardashevskiy             jprev = j;
38175e8fd947SAlexey Kardashevskiy             prev = *pe;
38185e8fd947SAlexey Kardashevskiy         }
38195e8fd947SAlexey Kardashevskiy 
38205e8fd947SAlexey Kardashevskiy         if (jprev != ARRAY_SIZE(*n)) {
3821b6b71cb5SMarkus Armbruster             mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr);
38225e8fd947SAlexey Kardashevskiy         }
38235e8fd947SAlexey Kardashevskiy     }
38245e8fd947SAlexey Kardashevskiy }
38255e8fd947SAlexey Kardashevskiy 
38267e6d32ebSDavid Hildenbrand /* Require any discards to work. */
382798da491dSDavid Hildenbrand static unsigned int ram_block_discard_required_cnt;
38287e6d32ebSDavid Hildenbrand /* Require only coordinated discards to work. */
38297e6d32ebSDavid Hildenbrand static unsigned int ram_block_coordinated_discard_required_cnt;
38307e6d32ebSDavid Hildenbrand /* Disable any discards. */
383198da491dSDavid Hildenbrand static unsigned int ram_block_discard_disabled_cnt;
38327e6d32ebSDavid Hildenbrand /* Disable only uncoordinated discards. */
38337e6d32ebSDavid Hildenbrand static unsigned int ram_block_uncoordinated_discard_disabled_cnt;
383498da491dSDavid Hildenbrand static QemuMutex ram_block_discard_disable_mutex;
383598da491dSDavid Hildenbrand 
383698da491dSDavid Hildenbrand static void ram_block_discard_disable_mutex_lock(void)
383798da491dSDavid Hildenbrand {
383898da491dSDavid Hildenbrand     static gsize initialized;
383998da491dSDavid Hildenbrand 
384098da491dSDavid Hildenbrand     if (g_once_init_enter(&initialized)) {
384198da491dSDavid Hildenbrand         qemu_mutex_init(&ram_block_discard_disable_mutex);
384298da491dSDavid Hildenbrand         g_once_init_leave(&initialized, 1);
384398da491dSDavid Hildenbrand     }
384498da491dSDavid Hildenbrand     qemu_mutex_lock(&ram_block_discard_disable_mutex);
384598da491dSDavid Hildenbrand }
384698da491dSDavid Hildenbrand 
384798da491dSDavid Hildenbrand static void ram_block_discard_disable_mutex_unlock(void)
384898da491dSDavid Hildenbrand {
384998da491dSDavid Hildenbrand     qemu_mutex_unlock(&ram_block_discard_disable_mutex);
385098da491dSDavid Hildenbrand }
3851d24f31dbSDavid Hildenbrand 
3852d24f31dbSDavid Hildenbrand int ram_block_discard_disable(bool state)
3853d24f31dbSDavid Hildenbrand {
385498da491dSDavid Hildenbrand     int ret = 0;
3855d24f31dbSDavid Hildenbrand 
385698da491dSDavid Hildenbrand     ram_block_discard_disable_mutex_lock();
3857d24f31dbSDavid Hildenbrand     if (!state) {
385898da491dSDavid Hildenbrand         ram_block_discard_disabled_cnt--;
38597e6d32ebSDavid Hildenbrand     } else if (ram_block_discard_required_cnt ||
38607e6d32ebSDavid Hildenbrand                ram_block_coordinated_discard_required_cnt) {
386198da491dSDavid Hildenbrand         ret = -EBUSY;
38627e6d32ebSDavid Hildenbrand     } else {
38637e6d32ebSDavid Hildenbrand         ram_block_discard_disabled_cnt++;
38647e6d32ebSDavid Hildenbrand     }
38657e6d32ebSDavid Hildenbrand     ram_block_discard_disable_mutex_unlock();
38667e6d32ebSDavid Hildenbrand     return ret;
38677e6d32ebSDavid Hildenbrand }
38687e6d32ebSDavid Hildenbrand 
38697e6d32ebSDavid Hildenbrand int ram_block_uncoordinated_discard_disable(bool state)
38707e6d32ebSDavid Hildenbrand {
38717e6d32ebSDavid Hildenbrand     int ret = 0;
38727e6d32ebSDavid Hildenbrand 
38737e6d32ebSDavid Hildenbrand     ram_block_discard_disable_mutex_lock();
38747e6d32ebSDavid Hildenbrand     if (!state) {
38757e6d32ebSDavid Hildenbrand         ram_block_uncoordinated_discard_disabled_cnt--;
38767e6d32ebSDavid Hildenbrand     } else if (ram_block_discard_required_cnt) {
38777e6d32ebSDavid Hildenbrand         ret = -EBUSY;
38787e6d32ebSDavid Hildenbrand     } else {
38797e6d32ebSDavid Hildenbrand         ram_block_uncoordinated_discard_disabled_cnt++;
3880d24f31dbSDavid Hildenbrand     }
388198da491dSDavid Hildenbrand     ram_block_discard_disable_mutex_unlock();
388298da491dSDavid Hildenbrand     return ret;
3883d24f31dbSDavid Hildenbrand }
3884d24f31dbSDavid Hildenbrand 
3885d24f31dbSDavid Hildenbrand int ram_block_discard_require(bool state)
3886d24f31dbSDavid Hildenbrand {
388798da491dSDavid Hildenbrand     int ret = 0;
3888d24f31dbSDavid Hildenbrand 
388998da491dSDavid Hildenbrand     ram_block_discard_disable_mutex_lock();
3890d24f31dbSDavid Hildenbrand     if (!state) {
389198da491dSDavid Hildenbrand         ram_block_discard_required_cnt--;
38927e6d32ebSDavid Hildenbrand     } else if (ram_block_discard_disabled_cnt ||
38937e6d32ebSDavid Hildenbrand                ram_block_uncoordinated_discard_disabled_cnt) {
389498da491dSDavid Hildenbrand         ret = -EBUSY;
38957e6d32ebSDavid Hildenbrand     } else {
38967e6d32ebSDavid Hildenbrand         ram_block_discard_required_cnt++;
38977e6d32ebSDavid Hildenbrand     }
38987e6d32ebSDavid Hildenbrand     ram_block_discard_disable_mutex_unlock();
38997e6d32ebSDavid Hildenbrand     return ret;
39007e6d32ebSDavid Hildenbrand }
39017e6d32ebSDavid Hildenbrand 
39027e6d32ebSDavid Hildenbrand int ram_block_coordinated_discard_require(bool state)
39037e6d32ebSDavid Hildenbrand {
39047e6d32ebSDavid Hildenbrand     int ret = 0;
39057e6d32ebSDavid Hildenbrand 
39067e6d32ebSDavid Hildenbrand     ram_block_discard_disable_mutex_lock();
39077e6d32ebSDavid Hildenbrand     if (!state) {
39087e6d32ebSDavid Hildenbrand         ram_block_coordinated_discard_required_cnt--;
39097e6d32ebSDavid Hildenbrand     } else if (ram_block_discard_disabled_cnt) {
39107e6d32ebSDavid Hildenbrand         ret = -EBUSY;
39117e6d32ebSDavid Hildenbrand     } else {
39127e6d32ebSDavid Hildenbrand         ram_block_coordinated_discard_required_cnt++;
3913d24f31dbSDavid Hildenbrand     }
391498da491dSDavid Hildenbrand     ram_block_discard_disable_mutex_unlock();
391598da491dSDavid Hildenbrand     return ret;
3916d24f31dbSDavid Hildenbrand }
3917d24f31dbSDavid Hildenbrand 
3918d24f31dbSDavid Hildenbrand bool ram_block_discard_is_disabled(void)
3919d24f31dbSDavid Hildenbrand {
39207e6d32ebSDavid Hildenbrand     return qatomic_read(&ram_block_discard_disabled_cnt) ||
39217e6d32ebSDavid Hildenbrand            qatomic_read(&ram_block_uncoordinated_discard_disabled_cnt);
3922d24f31dbSDavid Hildenbrand }
3923d24f31dbSDavid Hildenbrand 
3924d24f31dbSDavid Hildenbrand bool ram_block_discard_is_required(void)
3925d24f31dbSDavid Hildenbrand {
39267e6d32ebSDavid Hildenbrand     return qatomic_read(&ram_block_discard_required_cnt) ||
39277e6d32ebSDavid Hildenbrand            qatomic_read(&ram_block_coordinated_discard_required_cnt);
3928d24f31dbSDavid Hildenbrand }
3929