xref: /qemu/system/physmem.c (revision 9fb40bb9621df9acb88a8128bee2e0f68631b245)
154936004Sbellard /*
2d9f24bf5SPaolo Bonzini  * RAM allocation and memory access
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
961f3c91aSChetan Pant  * version 2.1 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1914a48c1dSMarkus Armbruster 
207b31bbc2SPeter Maydell #include "qemu/osdep.h"
21ec5f7ca8SMarc-André Lureau #include "exec/page-vary.h"
22da34e65cSMarkus Armbruster #include "qapi/error.h"
2354936004Sbellard 
24f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
25084cfca1SRichard Henderson #include "qemu/cacheflush.h"
26e2c1c34fSMarkus Armbruster #include "qemu/hbitmap.h"
27b85ea5faSPeter Maydell #include "qemu/madvise.h"
28d5e26819SPhilippe Mathieu-Daudé #include "qemu/lockable.h"
2978271684SClaudio Fontana 
3078271684SClaudio Fontana #ifdef CONFIG_TCG
3178271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h"
3278271684SClaudio Fontana #endif /* CONFIG_TCG */
3378271684SClaudio Fontana 
3463c91552SPaolo Bonzini #include "exec/exec-all.h"
3574781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h"
3651180423SJuan Quintela #include "exec/target_page.h"
3768df8c8dSPhilippe Mathieu-Daudé #include "exec/translation-block.h"
38741da0d3SPaolo Bonzini #include "hw/qdev-core.h"
39c7e002c5SFam Zheng #include "hw/qdev-properties.h"
4047c8ca53SMarcel Apfelbaum #include "hw/boards.h"
4132cad1ffSPhilippe Mathieu-Daudé #include "system/xen.h"
4232cad1ffSPhilippe Mathieu-Daudé #include "system/kvm.h"
4332cad1ffSPhilippe Mathieu-Daudé #include "system/tcg.h"
4432cad1ffSPhilippe Mathieu-Daudé #include "system/qtest.h"
451de7afc9SPaolo Bonzini #include "qemu/timer.h"
461de7afc9SPaolo Bonzini #include "qemu/config-file.h"
4775a34036SAndreas Färber #include "qemu/error-report.h"
48b6b71cb5SMarkus Armbruster #include "qemu/qemu-print.h"
493ab6fdc9SPhilippe Mathieu-Daudé #include "qemu/log.h"
505df022cfSPeter Maydell #include "qemu/memalign.h"
51*9fb40bb9SSteve Sistare #include "qemu/memfd.h"
52741da0d3SPaolo Bonzini #include "exec/memory.h"
53df43d49cSPaolo Bonzini #include "exec/ioport.h"
5432cad1ffSPhilippe Mathieu-Daudé #include "system/dma.h"
5532cad1ffSPhilippe Mathieu-Daudé #include "system/hostmem.h"
5632cad1ffSPhilippe Mathieu-Daudé #include "system/hw_accel.h"
5732cad1ffSPhilippe Mathieu-Daudé #include "system/xen-mapcache.h"
58d44fe13bSAlex Bennée #include "trace.h"
59d3a5038cSDr. David Alan Gilbert 
60e2fa71f5SDr. David Alan Gilbert #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
61e2fa71f5SDr. David Alan Gilbert #include <linux/falloc.h>
62e2fa71f5SDr. David Alan Gilbert #endif
63e2fa71f5SDr. David Alan Gilbert 
640dc3f44aSMike Day #include "qemu/rcu_queue.h"
654840f10eSJan Kiszka #include "qemu/main-loop.h"
6632cad1ffSPhilippe Mathieu-Daudé #include "system/replay.h"
670cac1b66SBlue Swirl 
68022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
69220c3ebdSJuan Quintela #include "exec/ram_addr.h"
7067d95c15SAvi Kivity 
7161c490e2SBeata Michalska #include "qemu/pmem.h"
7261c490e2SBeata Michalska 
739dfeca7cSBharata B Rao #include "migration/vmstate.h"
749dfeca7cSBharata B Rao 
75b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
76794e8f30SMichael S. Tsirkin #ifndef _WIN32
77794e8f30SMichael S. Tsirkin #include "qemu/mmap-alloc.h"
78794e8f30SMichael S. Tsirkin #endif
79b35ba30fSMichael S. Tsirkin 
80be9b23c4SPeter Xu #include "monitor/monitor.h"
81be9b23c4SPeter Xu 
82ce317be9SJingqi Liu #ifdef CONFIG_LIBDAXCTL
83ce317be9SJingqi Liu #include <daxctl/libdaxctl.h>
84ce317be9SJingqi Liu #endif
85ce317be9SJingqi Liu 
86db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
871196be37Sths 
880dc3f44aSMike Day /* ram_list is read under rcu_read_lock()/rcu_read_unlock().  Writes
890dc3f44aSMike Day  * are protected by the ramlist lock.
900dc3f44aSMike Day  */
910d53d9feSMike Day RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
9262152b8aSAvi Kivity 
9362152b8aSAvi Kivity static MemoryRegion *system_memory;
94309cb471SAvi Kivity static MemoryRegion *system_io;
9562152b8aSAvi Kivity 
96f6790af6SAvi Kivity AddressSpace address_space_io;
97f6790af6SAvi Kivity AddressSpace address_space_memory;
982673a5daSAvi Kivity 
99acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
1004346ae3eSAvi Kivity 
1011db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
1021db8abb1SPaolo Bonzini 
1031db8abb1SPaolo Bonzini struct PhysPageEntry {
1049736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
1058b795765SMichael S. Tsirkin     uint32_t skip : 6;
1069736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
1078b795765SMichael S. Tsirkin     uint32_t ptr : 26;
1081db8abb1SPaolo Bonzini };
1091db8abb1SPaolo Bonzini 
1108b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
1118b795765SMichael S. Tsirkin 
11203f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
11357271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
11403f49957SPaolo Bonzini 
115026736ceSMichael S. Tsirkin #define P_L2_BITS 9
11603f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
11703f49957SPaolo Bonzini 
11803f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
11903f49957SPaolo Bonzini 
12003f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1210475d94fSPaolo Bonzini 
12253cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
12379e2b9aeSPaolo Bonzini     struct rcu_head rcu;
12479e2b9aeSPaolo Bonzini 
12553cb28cbSMarcel Apfelbaum     unsigned sections_nb;
12653cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
12753cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
12853cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
12953cb28cbSMarcel Apfelbaum     Node *nodes;
13053cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
13153cb28cbSMarcel Apfelbaum } PhysPageMap;
13253cb28cbSMarcel Apfelbaum 
1331db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
134729633c2SFam Zheng     MemoryRegionSection *mru_section;
1351db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1361db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1371db8abb1SPaolo Bonzini      */
1381db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
13953cb28cbSMarcel Apfelbaum     PhysPageMap map;
1401db8abb1SPaolo Bonzini };
1411db8abb1SPaolo Bonzini 
14290260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
14390260c6cSJan Kiszka typedef struct subpage_t {
14490260c6cSJan Kiszka     MemoryRegion iomem;
14516620684SAlexey Kardashevskiy     FlatView *fv;
14690260c6cSJan Kiszka     hwaddr base;
1472615fabdSVijaya Kumar K     uint16_t sub_section[];
14890260c6cSJan Kiszka } subpage_t;
14990260c6cSJan Kiszka 
150b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
1515312bd8bSAvi Kivity 
152e2eef170Spbrook static void io_mem_init(void);
15362152b8aSAvi Kivity static void memory_map_init(void);
1549458a9a1SPaolo Bonzini static void tcg_log_global_after_sync(MemoryListener *listener);
15509daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener);
156e2eef170Spbrook 
15732857f4dSPeter Maydell /**
15832857f4dSPeter Maydell  * CPUAddressSpace: all the information a CPU needs about an AddressSpace
15932857f4dSPeter Maydell  * @cpu: the CPU whose AddressSpace this is
16032857f4dSPeter Maydell  * @as: the AddressSpace itself
16132857f4dSPeter Maydell  * @memory_dispatch: its dispatch pointer (cached, RCU protected)
16232857f4dSPeter Maydell  * @tcg_as_listener: listener for tracking changes to the AddressSpace
16332857f4dSPeter Maydell  */
16415d62536SPaolo Bonzini typedef struct CPUAddressSpace {
16532857f4dSPeter Maydell     CPUState *cpu;
16632857f4dSPeter Maydell     AddressSpace *as;
16732857f4dSPeter Maydell     struct AddressSpaceDispatch *memory_dispatch;
16832857f4dSPeter Maydell     MemoryListener tcg_as_listener;
16915d62536SPaolo Bonzini } CPUAddressSpace;
17032857f4dSPeter Maydell 
1718deaf12cSGerd Hoffmann struct DirtyBitmapSnapshot {
1728deaf12cSGerd Hoffmann     ram_addr_t start;
1738deaf12cSGerd Hoffmann     ram_addr_t end;
1748deaf12cSGerd Hoffmann     unsigned long dirty[];
1758deaf12cSGerd Hoffmann };
1768deaf12cSGerd Hoffmann 
17753cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
178f7bf5461SAvi Kivity {
179101420b8SPeter Lieven     static unsigned alloc_hint = 16;
18053cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
181c95cfd04SWei Yang         map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes);
18253cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
183101420b8SPeter Lieven         alloc_hint = map->nodes_nb_alloc;
184f7bf5461SAvi Kivity     }
185f7bf5461SAvi Kivity }
186f7bf5461SAvi Kivity 
187db94604bSPaolo Bonzini static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
188d6f2ea22SAvi Kivity {
189d6f2ea22SAvi Kivity     unsigned i;
1908b795765SMichael S. Tsirkin     uint32_t ret;
191db94604bSPaolo Bonzini     PhysPageEntry e;
192db94604bSPaolo Bonzini     PhysPageEntry *p;
193d6f2ea22SAvi Kivity 
19453cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
195db94604bSPaolo Bonzini     p = map->nodes[ret];
196d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
19753cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
198db94604bSPaolo Bonzini 
199db94604bSPaolo Bonzini     e.skip = leaf ? 0 : 1;
200db94604bSPaolo Bonzini     e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
20103f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
202db94604bSPaolo Bonzini         memcpy(&p[i], &e, sizeof(e));
203d6f2ea22SAvi Kivity     }
204f7bf5461SAvi Kivity     return ret;
205d6f2ea22SAvi Kivity }
206d6f2ea22SAvi Kivity 
20753cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
20856b15076SWei Yang                                 hwaddr *index, uint64_t *nb, uint16_t leaf,
2092999097bSAvi Kivity                                 int level)
21092e873b9Sbellard {
211f7bf5461SAvi Kivity     PhysPageEntry *p;
21203f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
2135cd2c5b6SRichard Henderson 
2149736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
215db94604bSPaolo Bonzini         lp->ptr = phys_map_node_alloc(map, level == 0);
216db94604bSPaolo Bonzini     }
21753cb28cbSMarcel Apfelbaum     p = map->nodes[lp->ptr];
21803f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
219f7bf5461SAvi Kivity 
22003f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
22107f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2229736e55bSMichael S. Tsirkin             lp->skip = 0;
223c19e8800SAvi Kivity             lp->ptr = leaf;
22407f07b31SAvi Kivity             *index += step;
22507f07b31SAvi Kivity             *nb -= step;
226f7bf5461SAvi Kivity         } else {
22753cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2282999097bSAvi Kivity         }
2292999097bSAvi Kivity         ++lp;
230f7bf5461SAvi Kivity     }
2314346ae3eSAvi Kivity }
2325cd2c5b6SRichard Henderson 
233ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
23456b15076SWei Yang                           hwaddr index, uint64_t nb,
2352999097bSAvi Kivity                           uint16_t leaf)
236f7bf5461SAvi Kivity {
2372999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
23853cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
239f7bf5461SAvi Kivity 
24053cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
24192e873b9Sbellard }
24292e873b9Sbellard 
243b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
244b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
245b35ba30fSMichael S. Tsirkin  */
246efee678dSMarc-André Lureau static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
247b35ba30fSMichael S. Tsirkin {
248b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
249b35ba30fSMichael S. Tsirkin     int valid = 0;
250b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
251b35ba30fSMichael S. Tsirkin     int i;
252b35ba30fSMichael S. Tsirkin 
253b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
254b35ba30fSMichael S. Tsirkin         return;
255b35ba30fSMichael S. Tsirkin     }
256b35ba30fSMichael S. Tsirkin 
257b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
258b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
259b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
260b35ba30fSMichael S. Tsirkin             continue;
261b35ba30fSMichael S. Tsirkin         }
262b35ba30fSMichael S. Tsirkin 
263b35ba30fSMichael S. Tsirkin         valid_ptr = i;
264b35ba30fSMichael S. Tsirkin         valid++;
265b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
266efee678dSMarc-André Lureau             phys_page_compact(&p[i], nodes);
267b35ba30fSMichael S. Tsirkin         }
268b35ba30fSMichael S. Tsirkin     }
269b35ba30fSMichael S. Tsirkin 
270b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
271b35ba30fSMichael S. Tsirkin     if (valid != 1) {
272b35ba30fSMichael S. Tsirkin         return;
273b35ba30fSMichael S. Tsirkin     }
274b35ba30fSMichael S. Tsirkin 
275b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
276b35ba30fSMichael S. Tsirkin 
277b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
278526ca236SWei Yang     if (P_L2_LEVELS >= (1 << 6) &&
279526ca236SWei Yang         lp->skip + p[valid_ptr].skip >= (1 << 6)) {
280b35ba30fSMichael S. Tsirkin         return;
281b35ba30fSMichael S. Tsirkin     }
282b35ba30fSMichael S. Tsirkin 
283b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
284b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
285b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
286b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
287b35ba30fSMichael S. Tsirkin          * should never reach here.
288b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
289b35ba30fSMichael S. Tsirkin          * change this rule.
290b35ba30fSMichael S. Tsirkin          */
291b35ba30fSMichael S. Tsirkin         lp->skip = 0;
292b35ba30fSMichael S. Tsirkin     } else {
293b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
294b35ba30fSMichael S. Tsirkin     }
295b35ba30fSMichael S. Tsirkin }
296b35ba30fSMichael S. Tsirkin 
2978629d3fcSAlexey Kardashevskiy void address_space_dispatch_compact(AddressSpaceDispatch *d)
298b35ba30fSMichael S. Tsirkin {
299b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
300efee678dSMarc-André Lureau         phys_page_compact(&d->phys_map, d->map.nodes);
301b35ba30fSMichael S. Tsirkin     }
302b35ba30fSMichael S. Tsirkin }
303b35ba30fSMichael S. Tsirkin 
30429cb533dSFam Zheng static inline bool section_covers_addr(const MemoryRegionSection *section,
30529cb533dSFam Zheng                                        hwaddr addr)
30629cb533dSFam Zheng {
30729cb533dSFam Zheng     /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
30829cb533dSFam Zheng      * the section must cover the entire address space.
30929cb533dSFam Zheng      */
310258dfaaaSRichard Henderson     return int128_gethi(section->size) ||
31129cb533dSFam Zheng            range_covers_byte(section->offset_within_address_space,
312258dfaaaSRichard Henderson                              int128_getlo(section->size), addr);
31329cb533dSFam Zheng }
31429cb533dSFam Zheng 
315003a0cf2SPeter Xu static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
31692e873b9Sbellard {
317003a0cf2SPeter Xu     PhysPageEntry lp = d->phys_map, *p;
318003a0cf2SPeter Xu     Node *nodes = d->map.nodes;
319003a0cf2SPeter Xu     MemoryRegionSection *sections = d->map.sections;
32097115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
32131ab2b4aSAvi Kivity     int i;
322f1f6e3b8SAvi Kivity 
3239736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
324c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
3259affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
326f1f6e3b8SAvi Kivity         }
3279affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
32803f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
32931ab2b4aSAvi Kivity     }
330b35ba30fSMichael S. Tsirkin 
33129cb533dSFam Zheng     if (section_covers_addr(&sections[lp.ptr], addr)) {
3329affd6fcSPaolo Bonzini         return &sections[lp.ptr];
333b35ba30fSMichael S. Tsirkin     } else {
334b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
335b35ba30fSMichael S. Tsirkin     }
336f3705d53SAvi Kivity }
337f3705d53SAvi Kivity 
33879e2b9aeSPaolo Bonzini /* Called from RCU critical section */
339c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
34090260c6cSJan Kiszka                                                         hwaddr addr,
34190260c6cSJan Kiszka                                                         bool resolve_subpage)
3429f029603SJan Kiszka {
343d73415a3SStefan Hajnoczi     MemoryRegionSection *section = qatomic_read(&d->mru_section);
34490260c6cSJan Kiszka     subpage_t *subpage;
34590260c6cSJan Kiszka 
34607c114bbSPaolo Bonzini     if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
34707c114bbSPaolo Bonzini         !section_covers_addr(section, addr)) {
348003a0cf2SPeter Xu         section = phys_page_find(d, addr);
349d73415a3SStefan Hajnoczi         qatomic_set(&d->mru_section, section);
350729633c2SFam Zheng     }
35190260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
35290260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
35353cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
35490260c6cSJan Kiszka     }
35590260c6cSJan Kiszka     return section;
3569f029603SJan Kiszka }
3579f029603SJan Kiszka 
35879e2b9aeSPaolo Bonzini /* Called from RCU critical section */
35990260c6cSJan Kiszka static MemoryRegionSection *
360c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
36190260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
362149f54b5SPaolo Bonzini {
363149f54b5SPaolo Bonzini     MemoryRegionSection *section;
364965eb2fcSPaolo Bonzini     MemoryRegion *mr;
365a87f3954SPaolo Bonzini     Int128 diff;
366149f54b5SPaolo Bonzini 
367c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
368149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
369149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
370149f54b5SPaolo Bonzini 
371149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
372149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
373149f54b5SPaolo Bonzini 
374965eb2fcSPaolo Bonzini     mr = section->mr;
375b242e0e0SPaolo Bonzini 
376b242e0e0SPaolo Bonzini     /* MMIO registers can be expected to perform full-width accesses based only
377b242e0e0SPaolo Bonzini      * on their address, without considering adjacent registers that could
378b242e0e0SPaolo Bonzini      * decode to completely different MemoryRegions.  When such registers
379b242e0e0SPaolo Bonzini      * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
380b242e0e0SPaolo Bonzini      * regions overlap wildly.  For this reason we cannot clamp the accesses
381b242e0e0SPaolo Bonzini      * here.
382b242e0e0SPaolo Bonzini      *
383b242e0e0SPaolo Bonzini      * If the length is small (as is the case for address_space_ldl/stl),
384b242e0e0SPaolo Bonzini      * everything works fine.  If the incoming length is large, however,
385b242e0e0SPaolo Bonzini      * the caller really has to do the clamping through memory_access_size.
386b242e0e0SPaolo Bonzini      */
387965eb2fcSPaolo Bonzini     if (memory_region_is_ram(mr)) {
388e4a511f8SPaolo Bonzini         diff = int128_sub(section->size, int128_make64(addr));
3893752a036SPeter Maydell         *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
390965eb2fcSPaolo Bonzini     }
391149f54b5SPaolo Bonzini     return section;
392149f54b5SPaolo Bonzini }
39390260c6cSJan Kiszka 
394d5e5fafdSPeter Xu /**
395a411c84bSPaolo Bonzini  * address_space_translate_iommu - translate an address through an IOMMU
396a411c84bSPaolo Bonzini  * memory region and then through the target address space.
397a411c84bSPaolo Bonzini  *
398a411c84bSPaolo Bonzini  * @iommu_mr: the IOMMU memory region that we start the translation from
399a411c84bSPaolo Bonzini  * @addr: the address to be translated through the MMU
400a411c84bSPaolo Bonzini  * @xlat: the translated address offset within the destination memory region.
401a411c84bSPaolo Bonzini  *        It cannot be %NULL.
402a411c84bSPaolo Bonzini  * @plen_out: valid read/write length of the translated address. It
403a411c84bSPaolo Bonzini  *            cannot be %NULL.
404a411c84bSPaolo Bonzini  * @page_mask_out: page mask for the translated address. This
405a411c84bSPaolo Bonzini  *            should only be meaningful for IOMMU translated
406a411c84bSPaolo Bonzini  *            addresses, since there may be huge pages that this bit
407a411c84bSPaolo Bonzini  *            would tell. It can be %NULL if we don't care about it.
408a411c84bSPaolo Bonzini  * @is_write: whether the translation operation is for write
409a411c84bSPaolo Bonzini  * @is_mmio: whether this can be MMIO, set true if it can
410a411c84bSPaolo Bonzini  * @target_as: the address space targeted by the IOMMU
4112f7b009cSPeter Maydell  * @attrs: transaction attributes
412a411c84bSPaolo Bonzini  *
413a411c84bSPaolo Bonzini  * This function is called from RCU critical section.  It is the common
414a411c84bSPaolo Bonzini  * part of flatview_do_translate and address_space_translate_cached.
415a411c84bSPaolo Bonzini  */
416a411c84bSPaolo Bonzini static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr,
417a411c84bSPaolo Bonzini                                                          hwaddr *xlat,
418a411c84bSPaolo Bonzini                                                          hwaddr *plen_out,
419a411c84bSPaolo Bonzini                                                          hwaddr *page_mask_out,
420a411c84bSPaolo Bonzini                                                          bool is_write,
421a411c84bSPaolo Bonzini                                                          bool is_mmio,
4222f7b009cSPeter Maydell                                                          AddressSpace **target_as,
4232f7b009cSPeter Maydell                                                          MemTxAttrs attrs)
424a411c84bSPaolo Bonzini {
425a411c84bSPaolo Bonzini     MemoryRegionSection *section;
426a411c84bSPaolo Bonzini     hwaddr page_mask = (hwaddr)-1;
427a411c84bSPaolo Bonzini 
428a411c84bSPaolo Bonzini     do {
429a411c84bSPaolo Bonzini         hwaddr addr = *xlat;
430a411c84bSPaolo Bonzini         IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
4312c91bcf2SPeter Maydell         int iommu_idx = 0;
4322c91bcf2SPeter Maydell         IOMMUTLBEntry iotlb;
4332c91bcf2SPeter Maydell 
4342c91bcf2SPeter Maydell         if (imrc->attrs_to_index) {
4352c91bcf2SPeter Maydell             iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
4362c91bcf2SPeter Maydell         }
4372c91bcf2SPeter Maydell 
4382c91bcf2SPeter Maydell         iotlb = imrc->translate(iommu_mr, addr, is_write ?
4392c91bcf2SPeter Maydell                                 IOMMU_WO : IOMMU_RO, iommu_idx);
440a411c84bSPaolo Bonzini 
441a411c84bSPaolo Bonzini         if (!(iotlb.perm & (1 << is_write))) {
442a411c84bSPaolo Bonzini             goto unassigned;
443a411c84bSPaolo Bonzini         }
444a411c84bSPaolo Bonzini 
445a411c84bSPaolo Bonzini         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
446a411c84bSPaolo Bonzini                 | (addr & iotlb.addr_mask));
447a411c84bSPaolo Bonzini         page_mask &= iotlb.addr_mask;
448a411c84bSPaolo Bonzini         *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
449a411c84bSPaolo Bonzini         *target_as = iotlb.target_as;
450a411c84bSPaolo Bonzini 
451a411c84bSPaolo Bonzini         section = address_space_translate_internal(
452a411c84bSPaolo Bonzini                 address_space_to_dispatch(iotlb.target_as), addr, xlat,
453a411c84bSPaolo Bonzini                 plen_out, is_mmio);
454a411c84bSPaolo Bonzini 
455a411c84bSPaolo Bonzini         iommu_mr = memory_region_get_iommu(section->mr);
456a411c84bSPaolo Bonzini     } while (unlikely(iommu_mr));
457a411c84bSPaolo Bonzini 
458a411c84bSPaolo Bonzini     if (page_mask_out) {
459a411c84bSPaolo Bonzini         *page_mask_out = page_mask;
460a411c84bSPaolo Bonzini     }
461a411c84bSPaolo Bonzini     return *section;
462a411c84bSPaolo Bonzini 
463a411c84bSPaolo Bonzini unassigned:
464a411c84bSPaolo Bonzini     return (MemoryRegionSection) { .mr = &io_mem_unassigned };
465a411c84bSPaolo Bonzini }
466a411c84bSPaolo Bonzini 
467a411c84bSPaolo Bonzini /**
468d5e5fafdSPeter Xu  * flatview_do_translate - translate an address in FlatView
469d5e5fafdSPeter Xu  *
470d5e5fafdSPeter Xu  * @fv: the flat view that we want to translate on
471d5e5fafdSPeter Xu  * @addr: the address to be translated in above address space
472d5e5fafdSPeter Xu  * @xlat: the translated address offset within memory region. It
473d5e5fafdSPeter Xu  *        cannot be @NULL.
474d5e5fafdSPeter Xu  * @plen_out: valid read/write length of the translated address. It
475d5e5fafdSPeter Xu  *            can be @NULL when we don't care about it.
476d5e5fafdSPeter Xu  * @page_mask_out: page mask for the translated address. This
477d5e5fafdSPeter Xu  *            should only be meaningful for IOMMU translated
478d5e5fafdSPeter Xu  *            addresses, since there may be huge pages that this bit
479d5e5fafdSPeter Xu  *            would tell. It can be @NULL if we don't care about it.
480d5e5fafdSPeter Xu  * @is_write: whether the translation operation is for write
481d5e5fafdSPeter Xu  * @is_mmio: whether this can be MMIO, set true if it can
482ad2804d9SPaolo Bonzini  * @target_as: the address space targeted by the IOMMU
48349e14aa8SPeter Maydell  * @attrs: memory transaction attributes
484d5e5fafdSPeter Xu  *
485d5e5fafdSPeter Xu  * This function is called from RCU critical section
486d5e5fafdSPeter Xu  */
48716620684SAlexey Kardashevskiy static MemoryRegionSection flatview_do_translate(FlatView *fv,
488a764040cSPeter Xu                                                  hwaddr addr,
489a764040cSPeter Xu                                                  hwaddr *xlat,
490d5e5fafdSPeter Xu                                                  hwaddr *plen_out,
491d5e5fafdSPeter Xu                                                  hwaddr *page_mask_out,
492a764040cSPeter Xu                                                  bool is_write,
493e76bb18fSAlexey Kardashevskiy                                                  bool is_mmio,
49449e14aa8SPeter Maydell                                                  AddressSpace **target_as,
49549e14aa8SPeter Maydell                                                  MemTxAttrs attrs)
49690260c6cSJan Kiszka {
49730951157SAvi Kivity     MemoryRegionSection *section;
4983df9d748SAlexey Kardashevskiy     IOMMUMemoryRegion *iommu_mr;
499d5e5fafdSPeter Xu     hwaddr plen = (hwaddr)(-1);
500d5e5fafdSPeter Xu 
501ad2804d9SPaolo Bonzini     if (!plen_out) {
502ad2804d9SPaolo Bonzini         plen_out = &plen;
503d5e5fafdSPeter Xu     }
50430951157SAvi Kivity 
50516620684SAlexey Kardashevskiy     section = address_space_translate_internal(
506ad2804d9SPaolo Bonzini             flatview_to_dispatch(fv), addr, xlat,
507ad2804d9SPaolo Bonzini             plen_out, is_mmio);
50830951157SAvi Kivity 
5093df9d748SAlexey Kardashevskiy     iommu_mr = memory_region_get_iommu(section->mr);
510a411c84bSPaolo Bonzini     if (unlikely(iommu_mr)) {
511a411c84bSPaolo Bonzini         return address_space_translate_iommu(iommu_mr, xlat,
512a411c84bSPaolo Bonzini                                              plen_out, page_mask_out,
513a411c84bSPaolo Bonzini                                              is_write, is_mmio,
5142f7b009cSPeter Maydell                                              target_as, attrs);
51530951157SAvi Kivity     }
516ad2804d9SPaolo Bonzini     if (page_mask_out) {
517d5e5fafdSPeter Xu         /* Not behind an IOMMU, use default page size. */
518a411c84bSPaolo Bonzini         *page_mask_out = ~TARGET_PAGE_MASK;
519d5e5fafdSPeter Xu     }
520d5e5fafdSPeter Xu 
521a764040cSPeter Xu     return *section;
522a764040cSPeter Xu }
523a764040cSPeter Xu 
524a764040cSPeter Xu /* Called from RCU critical section */
525a764040cSPeter Xu IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
5267446eb07SPeter Maydell                                             bool is_write, MemTxAttrs attrs)
527a764040cSPeter Xu {
528a764040cSPeter Xu     MemoryRegionSection section;
529076a93d7SPeter Xu     hwaddr xlat, page_mask;
530a764040cSPeter Xu 
531076a93d7SPeter Xu     /*
532076a93d7SPeter Xu      * This can never be MMIO, and we don't really care about plen,
533076a93d7SPeter Xu      * but page mask.
534076a93d7SPeter Xu      */
535076a93d7SPeter Xu     section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
53649e14aa8SPeter Maydell                                     NULL, &page_mask, is_write, false, &as,
53749e14aa8SPeter Maydell                                     attrs);
538a764040cSPeter Xu 
539a764040cSPeter Xu     /* Illegal translation */
540a764040cSPeter Xu     if (section.mr == &io_mem_unassigned) {
541a764040cSPeter Xu         goto iotlb_fail;
542a764040cSPeter Xu     }
543a764040cSPeter Xu 
544a764040cSPeter Xu     /* Convert memory region offset into address space offset */
545a764040cSPeter Xu     xlat += section.offset_within_address_space -
546a764040cSPeter Xu         section.offset_within_region;
547a764040cSPeter Xu 
548a764040cSPeter Xu     return (IOMMUTLBEntry) {
549e76bb18fSAlexey Kardashevskiy         .target_as = as,
550076a93d7SPeter Xu         .iova = addr & ~page_mask,
551076a93d7SPeter Xu         .translated_addr = xlat & ~page_mask,
552076a93d7SPeter Xu         .addr_mask = page_mask,
553a764040cSPeter Xu         /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
554a764040cSPeter Xu         .perm = IOMMU_RW,
555a764040cSPeter Xu     };
556a764040cSPeter Xu 
557a764040cSPeter Xu iotlb_fail:
558a764040cSPeter Xu     return (IOMMUTLBEntry) {0};
559a764040cSPeter Xu }
560a764040cSPeter Xu 
561a764040cSPeter Xu /* Called from RCU critical section */
56216620684SAlexey Kardashevskiy MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
563efa99a2fSPeter Maydell                                  hwaddr *plen, bool is_write,
564efa99a2fSPeter Maydell                                  MemTxAttrs attrs)
565a764040cSPeter Xu {
566a764040cSPeter Xu     MemoryRegion *mr;
567a764040cSPeter Xu     MemoryRegionSection section;
56816620684SAlexey Kardashevskiy     AddressSpace *as = NULL;
569a764040cSPeter Xu 
570a764040cSPeter Xu     /* This can be MMIO, so setup MMIO bit. */
571d5e5fafdSPeter Xu     section = flatview_do_translate(fv, addr, xlat, plen, NULL,
57249e14aa8SPeter Maydell                                     is_write, true, &as, attrs);
573a764040cSPeter Xu     mr = section.mr;
574a764040cSPeter Xu 
575fe680d0dSAlexey Kardashevskiy     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
576a87f3954SPaolo Bonzini         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
57723820dbfSPeter Crosthwaite         *plen = MIN(page, *plen);
578a87f3954SPaolo Bonzini     }
579a87f3954SPaolo Bonzini 
58030951157SAvi Kivity     return mr;
58190260c6cSJan Kiszka }
58290260c6cSJan Kiszka 
5831f871c5eSPeter Maydell typedef struct TCGIOMMUNotifier {
5841f871c5eSPeter Maydell     IOMMUNotifier n;
5851f871c5eSPeter Maydell     MemoryRegion *mr;
5861f871c5eSPeter Maydell     CPUState *cpu;
5871f871c5eSPeter Maydell     int iommu_idx;
5881f871c5eSPeter Maydell     bool active;
5891f871c5eSPeter Maydell } TCGIOMMUNotifier;
5901f871c5eSPeter Maydell 
5911f871c5eSPeter Maydell static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
5921f871c5eSPeter Maydell {
5931f871c5eSPeter Maydell     TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n);
5941f871c5eSPeter Maydell 
5951f871c5eSPeter Maydell     if (!notifier->active) {
5961f871c5eSPeter Maydell         return;
5971f871c5eSPeter Maydell     }
5981f871c5eSPeter Maydell     tlb_flush(notifier->cpu);
5991f871c5eSPeter Maydell     notifier->active = false;
6001f871c5eSPeter Maydell     /* We leave the notifier struct on the list to avoid reallocating it later.
6011f871c5eSPeter Maydell      * Generally the number of IOMMUs a CPU deals with will be small.
6021f871c5eSPeter Maydell      * In any case we can't unregister the iommu notifier from a notify
6031f871c5eSPeter Maydell      * callback.
6041f871c5eSPeter Maydell      */
6051f871c5eSPeter Maydell }
6061f871c5eSPeter Maydell 
6071f871c5eSPeter Maydell static void tcg_register_iommu_notifier(CPUState *cpu,
6081f871c5eSPeter Maydell                                         IOMMUMemoryRegion *iommu_mr,
6091f871c5eSPeter Maydell                                         int iommu_idx)
6101f871c5eSPeter Maydell {
6111f871c5eSPeter Maydell     /* Make sure this CPU has an IOMMU notifier registered for this
6121f871c5eSPeter Maydell      * IOMMU/IOMMU index combination, so that we can flush its TLB
6131f871c5eSPeter Maydell      * when the IOMMU tells us the mappings we've cached have changed.
6141f871c5eSPeter Maydell      */
6151f871c5eSPeter Maydell     MemoryRegion *mr = MEMORY_REGION(iommu_mr);
616bbf90191SPhilippe Mathieu-Daudé     TCGIOMMUNotifier *notifier = NULL;
617805d4496SMarkus Armbruster     int i;
6181f871c5eSPeter Maydell 
6191f871c5eSPeter Maydell     for (i = 0; i < cpu->iommu_notifiers->len; i++) {
6205601be3bSPeter Maydell         notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
6211f871c5eSPeter Maydell         if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) {
6221f871c5eSPeter Maydell             break;
6231f871c5eSPeter Maydell         }
6241f871c5eSPeter Maydell     }
6251f871c5eSPeter Maydell     if (i == cpu->iommu_notifiers->len) {
6261f871c5eSPeter Maydell         /* Not found, add a new entry at the end of the array */
6271f871c5eSPeter Maydell         cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1);
6285601be3bSPeter Maydell         notifier = g_new0(TCGIOMMUNotifier, 1);
6295601be3bSPeter Maydell         g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier;
6301f871c5eSPeter Maydell 
6311f871c5eSPeter Maydell         notifier->mr = mr;
6321f871c5eSPeter Maydell         notifier->iommu_idx = iommu_idx;
6331f871c5eSPeter Maydell         notifier->cpu = cpu;
6341f871c5eSPeter Maydell         /* Rather than trying to register interest in the specific part
6351f871c5eSPeter Maydell          * of the iommu's address space that we've accessed and then
6361f871c5eSPeter Maydell          * expand it later as subsequent accesses touch more of it, we
6371f871c5eSPeter Maydell          * just register interest in the whole thing, on the assumption
6381f871c5eSPeter Maydell          * that iommu reconfiguration will be rare.
6391f871c5eSPeter Maydell          */
6401f871c5eSPeter Maydell         iommu_notifier_init(&notifier->n,
6411f871c5eSPeter Maydell                             tcg_iommu_unmap_notify,
6421f871c5eSPeter Maydell                             IOMMU_NOTIFIER_UNMAP,
6431f871c5eSPeter Maydell                             0,
6441f871c5eSPeter Maydell                             HWADDR_MAX,
6451f871c5eSPeter Maydell                             iommu_idx);
646805d4496SMarkus Armbruster         memory_region_register_iommu_notifier(notifier->mr, &notifier->n,
647805d4496SMarkus Armbruster                                               &error_fatal);
6481f871c5eSPeter Maydell     }
6491f871c5eSPeter Maydell 
6501f871c5eSPeter Maydell     if (!notifier->active) {
6511f871c5eSPeter Maydell         notifier->active = true;
6521f871c5eSPeter Maydell     }
6531f871c5eSPeter Maydell }
6541f871c5eSPeter Maydell 
655d9f24bf5SPaolo Bonzini void tcg_iommu_free_notifier_list(CPUState *cpu)
6561f871c5eSPeter Maydell {
6571f871c5eSPeter Maydell     /* Destroy the CPU's notifier list */
6581f871c5eSPeter Maydell     int i;
6591f871c5eSPeter Maydell     TCGIOMMUNotifier *notifier;
6601f871c5eSPeter Maydell 
6611f871c5eSPeter Maydell     for (i = 0; i < cpu->iommu_notifiers->len; i++) {
6625601be3bSPeter Maydell         notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
6631f871c5eSPeter Maydell         memory_region_unregister_iommu_notifier(notifier->mr, &notifier->n);
6645601be3bSPeter Maydell         g_free(notifier);
6651f871c5eSPeter Maydell     }
6661f871c5eSPeter Maydell     g_array_free(cpu->iommu_notifiers, true);
6671f871c5eSPeter Maydell }
6681f871c5eSPeter Maydell 
669d9f24bf5SPaolo Bonzini void tcg_iommu_init_notifier_list(CPUState *cpu)
670d9f24bf5SPaolo Bonzini {
671d9f24bf5SPaolo Bonzini     cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *));
672d9f24bf5SPaolo Bonzini }
673d9f24bf5SPaolo Bonzini 
67479e2b9aeSPaolo Bonzini /* Called from RCU critical section */
67590260c6cSJan Kiszka MemoryRegionSection *
676418ade78SRichard Henderson address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
6771f871c5eSPeter Maydell                                   hwaddr *xlat, hwaddr *plen,
6781f871c5eSPeter Maydell                                   MemTxAttrs attrs, int *prot)
67990260c6cSJan Kiszka {
68030951157SAvi Kivity     MemoryRegionSection *section;
6811f871c5eSPeter Maydell     IOMMUMemoryRegion *iommu_mr;
6821f871c5eSPeter Maydell     IOMMUMemoryRegionClass *imrc;
6831f871c5eSPeter Maydell     IOMMUTLBEntry iotlb;
6841f871c5eSPeter Maydell     int iommu_idx;
685418ade78SRichard Henderson     hwaddr addr = orig_addr;
6860d58c660SRichard Henderson     AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
687d7898cdaSPeter Maydell 
6881f871c5eSPeter Maydell     for (;;) {
6891f871c5eSPeter Maydell         section = address_space_translate_internal(d, addr, &addr, plen, false);
6901f871c5eSPeter Maydell 
6911f871c5eSPeter Maydell         iommu_mr = memory_region_get_iommu(section->mr);
6921f871c5eSPeter Maydell         if (!iommu_mr) {
6931f871c5eSPeter Maydell             break;
6941f871c5eSPeter Maydell         }
6951f871c5eSPeter Maydell 
6961f871c5eSPeter Maydell         imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
6971f871c5eSPeter Maydell 
6981f871c5eSPeter Maydell         iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
6991f871c5eSPeter Maydell         tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
7001f871c5eSPeter Maydell         /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
7011f871c5eSPeter Maydell          * doesn't short-cut its translation table walk.
7021f871c5eSPeter Maydell          */
7031f871c5eSPeter Maydell         iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
7041f871c5eSPeter Maydell         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
7051f871c5eSPeter Maydell                 | (addr & iotlb.addr_mask));
7061f871c5eSPeter Maydell         /* Update the caller's prot bits to remove permissions the IOMMU
7071f871c5eSPeter Maydell          * is giving us a failure response for. If we get down to no
7081f871c5eSPeter Maydell          * permissions left at all we can give up now.
7091f871c5eSPeter Maydell          */
7101f871c5eSPeter Maydell         if (!(iotlb.perm & IOMMU_RO)) {
7111f871c5eSPeter Maydell             *prot &= ~(PAGE_READ | PAGE_EXEC);
7121f871c5eSPeter Maydell         }
7131f871c5eSPeter Maydell         if (!(iotlb.perm & IOMMU_WO)) {
7141f871c5eSPeter Maydell             *prot &= ~PAGE_WRITE;
7151f871c5eSPeter Maydell         }
7161f871c5eSPeter Maydell 
7171f871c5eSPeter Maydell         if (!*prot) {
7181f871c5eSPeter Maydell             goto translate_fail;
7191f871c5eSPeter Maydell         }
7201f871c5eSPeter Maydell 
7211f871c5eSPeter Maydell         d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
7221f871c5eSPeter Maydell     }
72330951157SAvi Kivity 
7243df9d748SAlexey Kardashevskiy     assert(!memory_region_is_iommu(section->mr));
7251f871c5eSPeter Maydell     *xlat = addr;
72630951157SAvi Kivity     return section;
7271f871c5eSPeter Maydell 
7281f871c5eSPeter Maydell translate_fail:
729418ade78SRichard Henderson     /*
730418ade78SRichard Henderson      * We should be given a page-aligned address -- certainly
731418ade78SRichard Henderson      * tlb_set_page_with_attrs() does so.  The page offset of xlat
732418ade78SRichard Henderson      * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0.
733418ade78SRichard Henderson      * The page portion of xlat will be logged by memory_region_access_valid()
734418ade78SRichard Henderson      * when this memory access is rejected, so use the original untranslated
735418ade78SRichard Henderson      * physical address.
736418ade78SRichard Henderson      */
737418ade78SRichard Henderson     assert((orig_addr & ~TARGET_PAGE_MASK) == 0);
738418ade78SRichard Henderson     *xlat = orig_addr;
7391f871c5eSPeter Maydell     return &d->map.sections[PHYS_SECTION_UNASSIGNED];
74090260c6cSJan Kiszka }
7411a1562f5SAndreas Färber 
74280ceb07aSPeter Xu void cpu_address_space_init(CPUState *cpu, int asidx,
74380ceb07aSPeter Xu                             const char *prefix, MemoryRegion *mr)
74409daed84SEdgar E. Iglesias {
74512ebc9a7SPeter Maydell     CPUAddressSpace *newas;
74680ceb07aSPeter Xu     AddressSpace *as = g_new0(AddressSpace, 1);
74787a621d8SPeter Xu     char *as_name;
74880ceb07aSPeter Xu 
74980ceb07aSPeter Xu     assert(mr);
75087a621d8SPeter Xu     as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index);
75187a621d8SPeter Xu     address_space_init(as, mr, as_name);
75287a621d8SPeter Xu     g_free(as_name);
75312ebc9a7SPeter Maydell 
75412ebc9a7SPeter Maydell     /* Target code should have set num_ases before calling us */
75512ebc9a7SPeter Maydell     assert(asidx < cpu->num_ases);
75612ebc9a7SPeter Maydell 
75756943e8cSPeter Maydell     if (asidx == 0) {
75856943e8cSPeter Maydell         /* address space 0 gets the convenience alias */
75956943e8cSPeter Maydell         cpu->as = as;
76056943e8cSPeter Maydell     }
76156943e8cSPeter Maydell 
76212ebc9a7SPeter Maydell     /* KVM cannot currently support multiple address spaces. */
76312ebc9a7SPeter Maydell     assert(asidx == 0 || !kvm_enabled());
76409daed84SEdgar E. Iglesias 
76512ebc9a7SPeter Maydell     if (!cpu->cpu_ases) {
76612ebc9a7SPeter Maydell         cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
76724bec42fSSalil Mehta         cpu->cpu_ases_count = cpu->num_ases;
76809daed84SEdgar E. Iglesias     }
76932857f4dSPeter Maydell 
77012ebc9a7SPeter Maydell     newas = &cpu->cpu_ases[asidx];
77112ebc9a7SPeter Maydell     newas->cpu = cpu;
77212ebc9a7SPeter Maydell     newas->as = as;
77356943e8cSPeter Maydell     if (tcg_enabled()) {
7749458a9a1SPaolo Bonzini         newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync;
77512ebc9a7SPeter Maydell         newas->tcg_as_listener.commit = tcg_commit;
776142518bdSPeter Xu         newas->tcg_as_listener.name = "tcg";
77712ebc9a7SPeter Maydell         memory_listener_register(&newas->tcg_as_listener, as);
77809daed84SEdgar E. Iglesias     }
77956943e8cSPeter Maydell }
780651a5bc0SPeter Maydell 
78124bec42fSSalil Mehta void cpu_address_space_destroy(CPUState *cpu, int asidx)
78224bec42fSSalil Mehta {
78324bec42fSSalil Mehta     CPUAddressSpace *cpuas;
78424bec42fSSalil Mehta 
78524bec42fSSalil Mehta     assert(cpu->cpu_ases);
78624bec42fSSalil Mehta     assert(asidx >= 0 && asidx < cpu->num_ases);
78724bec42fSSalil Mehta     /* KVM cannot currently support multiple address spaces. */
78824bec42fSSalil Mehta     assert(asidx == 0 || !kvm_enabled());
78924bec42fSSalil Mehta 
79024bec42fSSalil Mehta     cpuas = &cpu->cpu_ases[asidx];
79124bec42fSSalil Mehta     if (tcg_enabled()) {
79224bec42fSSalil Mehta         memory_listener_unregister(&cpuas->tcg_as_listener);
79324bec42fSSalil Mehta     }
79424bec42fSSalil Mehta 
79524bec42fSSalil Mehta     address_space_destroy(cpuas->as);
79624bec42fSSalil Mehta     g_free_rcu(cpuas->as, rcu);
79724bec42fSSalil Mehta 
79824bec42fSSalil Mehta     if (asidx == 0) {
79924bec42fSSalil Mehta         /* reset the convenience alias for address space 0 */
80024bec42fSSalil Mehta         cpu->as = NULL;
80124bec42fSSalil Mehta     }
80224bec42fSSalil Mehta 
80324bec42fSSalil Mehta     if (--cpu->cpu_ases_count == 0) {
80424bec42fSSalil Mehta         g_free(cpu->cpu_ases);
80524bec42fSSalil Mehta         cpu->cpu_ases = NULL;
80624bec42fSSalil Mehta     }
80724bec42fSSalil Mehta }
80824bec42fSSalil Mehta 
809651a5bc0SPeter Maydell AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
810651a5bc0SPeter Maydell {
811651a5bc0SPeter Maydell     /* Return the AddressSpace corresponding to the specified index */
812651a5bc0SPeter Maydell     return cpu->cpu_ases[asidx].as;
813651a5bc0SPeter Maydell }
81409daed84SEdgar E. Iglesias 
8150dc3f44aSMike Day /* Called from RCU critical section */
816041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
817041603feSPaolo Bonzini {
818041603feSPaolo Bonzini     RAMBlock *block;
819041603feSPaolo Bonzini 
820d73415a3SStefan Hajnoczi     block = qatomic_rcu_read(&ram_list.mru_block);
8219b8424d5SMichael S. Tsirkin     if (block && addr - block->offset < block->max_length) {
82268851b98SPaolo Bonzini         return block;
823041603feSPaolo Bonzini     }
82499e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
8259b8424d5SMichael S. Tsirkin         if (addr - block->offset < block->max_length) {
826041603feSPaolo Bonzini             goto found;
827041603feSPaolo Bonzini         }
828041603feSPaolo Bonzini     }
829041603feSPaolo Bonzini 
830041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
831041603feSPaolo Bonzini     abort();
832041603feSPaolo Bonzini 
833041603feSPaolo Bonzini found:
834a4a411fbSStefan Hajnoczi     /* It is safe to write mru_block outside the BQL.  This
83543771539SPaolo Bonzini      * is what happens:
83643771539SPaolo Bonzini      *
83743771539SPaolo Bonzini      *     mru_block = xxx
83843771539SPaolo Bonzini      *     rcu_read_unlock()
83943771539SPaolo Bonzini      *                                        xxx removed from list
84043771539SPaolo Bonzini      *                  rcu_read_lock()
84143771539SPaolo Bonzini      *                  read mru_block
84243771539SPaolo Bonzini      *                                        mru_block = NULL;
84343771539SPaolo Bonzini      *                                        call_rcu(reclaim_ramblock, xxx);
84443771539SPaolo Bonzini      *                  rcu_read_unlock()
84543771539SPaolo Bonzini      *
846d73415a3SStefan Hajnoczi      * qatomic_rcu_set is not needed here.  The block was already published
84743771539SPaolo Bonzini      * when it was placed into the list.  Here we're just making an extra
84843771539SPaolo Bonzini      * copy of the pointer.
84943771539SPaolo Bonzini      */
850041603feSPaolo Bonzini     ram_list.mru_block = block;
851041603feSPaolo Bonzini     return block;
852041603feSPaolo Bonzini }
853041603feSPaolo Bonzini 
8547e8ccf99SPhilippe Mathieu-Daudé void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
8551ccde1cbSbellard {
8569a13565dSPeter Crosthwaite     CPUState *cpu;
857041603feSPaolo Bonzini     ram_addr_t start1;
858a2f4d5beSJuan Quintela     RAMBlock *block;
859a2f4d5beSJuan Quintela     ram_addr_t end;
860a2f4d5beSJuan Quintela 
861f28d0dfdSEmilio G. Cota     assert(tcg_enabled());
862a2f4d5beSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length);
863a2f4d5beSJuan Quintela     start &= TARGET_PAGE_MASK;
864f23db169Sbellard 
865694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
866041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
867041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
8681240be24SMichael S. Tsirkin     start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
8699a13565dSPeter Crosthwaite     CPU_FOREACH(cpu) {
8709a13565dSPeter Crosthwaite         tlb_reset_dirty(cpu, start1, length);
8719a13565dSPeter Crosthwaite     }
872d24981d3SJuan Quintela }
873d24981d3SJuan Quintela 
874d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
87503eebc9eSStefan Hajnoczi bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
87603eebc9eSStefan Hajnoczi                                               ram_addr_t length,
87752159192SJuan Quintela                                               unsigned client)
878d24981d3SJuan Quintela {
8795b82b703SStefan Hajnoczi     DirtyMemoryBlocks *blocks;
88025aa6b37SMatt Borgerson     unsigned long end, page, start_page;
8815b82b703SStefan Hajnoczi     bool dirty = false;
882077874e0SPeter Xu     RAMBlock *ramblock;
883077874e0SPeter Xu     uint64_t mr_offset, mr_size;
884d24981d3SJuan Quintela 
88503eebc9eSStefan Hajnoczi     if (length == 0) {
88603eebc9eSStefan Hajnoczi         return false;
88703eebc9eSStefan Hajnoczi     }
88803eebc9eSStefan Hajnoczi 
88903eebc9eSStefan Hajnoczi     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
89025aa6b37SMatt Borgerson     start_page = start >> TARGET_PAGE_BITS;
89125aa6b37SMatt Borgerson     page = start_page;
8925b82b703SStefan Hajnoczi 
893694ea274SDr. David Alan Gilbert     WITH_RCU_READ_LOCK_GUARD() {
894d73415a3SStefan Hajnoczi         blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
895077874e0SPeter Xu         ramblock = qemu_get_ram_block(start);
896077874e0SPeter Xu         /* Range sanity check on the ramblock */
897077874e0SPeter Xu         assert(start >= ramblock->offset &&
898077874e0SPeter Xu                start + length <= ramblock->offset + ramblock->used_length);
8995b82b703SStefan Hajnoczi 
9005b82b703SStefan Hajnoczi         while (page < end) {
9015b82b703SStefan Hajnoczi             unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
9025b82b703SStefan Hajnoczi             unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
903694ea274SDr. David Alan Gilbert             unsigned long num = MIN(end - page,
904694ea274SDr. David Alan Gilbert                                     DIRTY_MEMORY_BLOCK_SIZE - offset);
9055b82b703SStefan Hajnoczi 
9065b82b703SStefan Hajnoczi             dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
9075b82b703SStefan Hajnoczi                                                   offset, num);
9085b82b703SStefan Hajnoczi             page += num;
9095b82b703SStefan Hajnoczi         }
9105b82b703SStefan Hajnoczi 
91125aa6b37SMatt Borgerson         mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset;
91225aa6b37SMatt Borgerson         mr_size = (end - start_page) << TARGET_PAGE_BITS;
913077874e0SPeter Xu         memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
914694ea274SDr. David Alan Gilbert     }
91503eebc9eSStefan Hajnoczi 
91686a9ae80SNicholas Piggin     if (dirty) {
91786a9ae80SNicholas Piggin         cpu_physical_memory_dirty_bits_cleared(start, length);
918d24981d3SJuan Quintela     }
91903eebc9eSStefan Hajnoczi 
92003eebc9eSStefan Hajnoczi     return dirty;
9211ccde1cbSbellard }
9221ccde1cbSbellard 
9238deaf12cSGerd Hoffmann DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
9245dea4079SPeter Xu     (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
9258deaf12cSGerd Hoffmann {
9268deaf12cSGerd Hoffmann     DirtyMemoryBlocks *blocks;
92773188068SPeter Maydell     ram_addr_t start, first, last;
9288deaf12cSGerd Hoffmann     unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
9298deaf12cSGerd Hoffmann     DirtyBitmapSnapshot *snap;
9308deaf12cSGerd Hoffmann     unsigned long page, end, dest;
9318deaf12cSGerd Hoffmann 
93273188068SPeter Maydell     start = memory_region_get_ram_addr(mr);
93373188068SPeter Maydell     /* We know we're only called for RAM MemoryRegions */
93473188068SPeter Maydell     assert(start != RAM_ADDR_INVALID);
93573188068SPeter Maydell     start += offset;
93673188068SPeter Maydell 
93773188068SPeter Maydell     first = QEMU_ALIGN_DOWN(start, align);
93873188068SPeter Maydell     last  = QEMU_ALIGN_UP(start + length, align);
93973188068SPeter Maydell 
9408deaf12cSGerd Hoffmann     snap = g_malloc0(sizeof(*snap) +
9418deaf12cSGerd Hoffmann                      ((last - first) >> (TARGET_PAGE_BITS + 3)));
9428deaf12cSGerd Hoffmann     snap->start = first;
9438deaf12cSGerd Hoffmann     snap->end   = last;
9448deaf12cSGerd Hoffmann 
9458deaf12cSGerd Hoffmann     page = first >> TARGET_PAGE_BITS;
9468deaf12cSGerd Hoffmann     end  = last  >> TARGET_PAGE_BITS;
9478deaf12cSGerd Hoffmann     dest = 0;
9488deaf12cSGerd Hoffmann 
949694ea274SDr. David Alan Gilbert     WITH_RCU_READ_LOCK_GUARD() {
950d73415a3SStefan Hajnoczi         blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
9518deaf12cSGerd Hoffmann 
9528deaf12cSGerd Hoffmann         while (page < end) {
9538deaf12cSGerd Hoffmann             unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
9546ba9b60aSPhilippe Mathieu-Daudé             unsigned long ofs = page % DIRTY_MEMORY_BLOCK_SIZE;
955694ea274SDr. David Alan Gilbert             unsigned long num = MIN(end - page,
9566ba9b60aSPhilippe Mathieu-Daudé                                     DIRTY_MEMORY_BLOCK_SIZE - ofs);
9578deaf12cSGerd Hoffmann 
9586ba9b60aSPhilippe Mathieu-Daudé             assert(QEMU_IS_ALIGNED(ofs, (1 << BITS_PER_LEVEL)));
9598deaf12cSGerd Hoffmann             assert(QEMU_IS_ALIGNED(num,    (1 << BITS_PER_LEVEL)));
9606ba9b60aSPhilippe Mathieu-Daudé             ofs >>= BITS_PER_LEVEL;
9618deaf12cSGerd Hoffmann 
9628deaf12cSGerd Hoffmann             bitmap_copy_and_clear_atomic(snap->dirty + dest,
9636ba9b60aSPhilippe Mathieu-Daudé                                          blocks->blocks[idx] + ofs,
9648deaf12cSGerd Hoffmann                                          num);
9658deaf12cSGerd Hoffmann             page += num;
9668deaf12cSGerd Hoffmann             dest += num >> BITS_PER_LEVEL;
9678deaf12cSGerd Hoffmann         }
968694ea274SDr. David Alan Gilbert     }
9698deaf12cSGerd Hoffmann 
97086a9ae80SNicholas Piggin     cpu_physical_memory_dirty_bits_cleared(start, length);
9718deaf12cSGerd Hoffmann 
972077874e0SPeter Xu     memory_region_clear_dirty_bitmap(mr, offset, length);
973077874e0SPeter Xu 
9748deaf12cSGerd Hoffmann     return snap;
9758deaf12cSGerd Hoffmann }
9768deaf12cSGerd Hoffmann 
9778deaf12cSGerd Hoffmann bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
9788deaf12cSGerd Hoffmann                                             ram_addr_t start,
9798deaf12cSGerd Hoffmann                                             ram_addr_t length)
9808deaf12cSGerd Hoffmann {
9818deaf12cSGerd Hoffmann     unsigned long page, end;
9828deaf12cSGerd Hoffmann 
9838deaf12cSGerd Hoffmann     assert(start >= snap->start);
9848deaf12cSGerd Hoffmann     assert(start + length <= snap->end);
9858deaf12cSGerd Hoffmann 
9868deaf12cSGerd Hoffmann     end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS;
9878deaf12cSGerd Hoffmann     page = (start - snap->start) >> TARGET_PAGE_BITS;
9888deaf12cSGerd Hoffmann 
9898deaf12cSGerd Hoffmann     while (page < end) {
9908deaf12cSGerd Hoffmann         if (test_bit(page, snap->dirty)) {
9918deaf12cSGerd Hoffmann             return true;
9928deaf12cSGerd Hoffmann         }
9938deaf12cSGerd Hoffmann         page++;
9948deaf12cSGerd Hoffmann     }
9958deaf12cSGerd Hoffmann     return false;
9968deaf12cSGerd Hoffmann }
9978deaf12cSGerd Hoffmann 
99879e2b9aeSPaolo Bonzini /* Called from RCU critical section */
999bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu,
10008f5db641SRichard Henderson                                        MemoryRegionSection *section)
1001e5548617SBlue Swirl {
10028f5db641SRichard Henderson     AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
10038f5db641SRichard Henderson     return section - d->map.sections;
1004e5548617SBlue Swirl }
10058da3ff18Spbrook 
1006c227f099SAnthony Liguori static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
10075312bd8bSAvi Kivity                             uint16_t section);
100816620684SAlexey Kardashevskiy static subpage_t *subpage_init(FlatView *fv, hwaddr base);
100954688b1eSAvi Kivity 
101053cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
101153cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
10125312bd8bSAvi Kivity {
101368f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
101468f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
101568f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
101668f3f65bSPaolo Bonzini      */
101753cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
101868f3f65bSPaolo Bonzini 
101953cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
102053cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
102153cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
102253cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
10235312bd8bSAvi Kivity     }
102453cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
1025dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
102653cb28cbSMarcel Apfelbaum     return map->sections_nb++;
10275312bd8bSAvi Kivity }
10285312bd8bSAvi Kivity 
1029058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
1030058bc4b5SPaolo Bonzini {
103155b4e80bSDon Slutz     bool have_sub_page = mr->subpage;
103255b4e80bSDon Slutz 
1033dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
1034dfde4e6eSPaolo Bonzini 
103555b4e80bSDon Slutz     if (have_sub_page) {
1036058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
1037b4fefef9SPeter Crosthwaite         object_unref(OBJECT(&subpage->iomem));
1038058bc4b5SPaolo Bonzini         g_free(subpage);
1039058bc4b5SPaolo Bonzini     }
1040058bc4b5SPaolo Bonzini }
1041058bc4b5SPaolo Bonzini 
10426092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
10435312bd8bSAvi Kivity {
10449affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
10459affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
1046058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
1047058bc4b5SPaolo Bonzini     }
10489affd6fcSPaolo Bonzini     g_free(map->sections);
10499affd6fcSPaolo Bonzini     g_free(map->nodes);
10505312bd8bSAvi Kivity }
10515312bd8bSAvi Kivity 
10529950322aSAlexey Kardashevskiy static void register_subpage(FlatView *fv, MemoryRegionSection *section)
10530f0cb164SAvi Kivity {
10549950322aSAlexey Kardashevskiy     AddressSpaceDispatch *d = flatview_to_dispatch(fv);
10550f0cb164SAvi Kivity     subpage_t *subpage;
1056a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
10570f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
1058003a0cf2SPeter Xu     MemoryRegionSection *existing = phys_page_find(d, base);
10590f0cb164SAvi Kivity     MemoryRegionSection subsection = {
10600f0cb164SAvi Kivity         .offset_within_address_space = base,
1061052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
10620f0cb164SAvi Kivity     };
1063a8170e5eSAvi Kivity     hwaddr start, end;
10640f0cb164SAvi Kivity 
1065f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
10660f0cb164SAvi Kivity 
1067f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
106816620684SAlexey Kardashevskiy         subpage = subpage_init(fv, base);
106916620684SAlexey Kardashevskiy         subsection.fv = fv;
10700f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
1071ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
107253cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
10730f0cb164SAvi Kivity     } else {
1074f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
10750f0cb164SAvi Kivity     }
10760f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1077052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
107853cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
107953cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
10800f0cb164SAvi Kivity }
10810f0cb164SAvi Kivity 
10820f0cb164SAvi Kivity 
10839950322aSAlexey Kardashevskiy static void register_multipage(FlatView *fv,
1084052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
108533417e70Sbellard {
10869950322aSAlexey Kardashevskiy     AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1087a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
108853cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
1089052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
1090052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
1091dd81124bSAvi Kivity 
1092733d5ef5SPaolo Bonzini     assert(num_pages);
1093733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
109433417e70Sbellard }
109533417e70Sbellard 
1096494d1997SWei Yang /*
1097494d1997SWei Yang  * The range in *section* may look like this:
1098494d1997SWei Yang  *
1099494d1997SWei Yang  *      |s|PPPPPPP|s|
1100494d1997SWei Yang  *
1101494d1997SWei Yang  * where s stands for subpage and P for page.
1102494d1997SWei Yang  */
11038629d3fcSAlexey Kardashevskiy void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
11040f0cb164SAvi Kivity {
1105494d1997SWei Yang     MemoryRegionSection remain = *section;
1106052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
11070f0cb164SAvi Kivity 
1108494d1997SWei Yang     /* register first subpage */
1109494d1997SWei Yang     if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1110494d1997SWei Yang         uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
1111494d1997SWei Yang                         - remain.offset_within_address_space;
1112733d5ef5SPaolo Bonzini 
1113494d1997SWei Yang         MemoryRegionSection now = remain;
1114052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
11159950322aSAlexey Kardashevskiy         register_subpage(fv, &now);
1116494d1997SWei Yang         if (int128_eq(remain.size, now.size)) {
1117494d1997SWei Yang             return;
1118733d5ef5SPaolo Bonzini         }
1119052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
1120052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
1121052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
1122494d1997SWei Yang     }
1123494d1997SWei Yang 
1124494d1997SWei Yang     /* register whole pages */
1125494d1997SWei Yang     if (int128_ge(remain.size, page_size)) {
1126494d1997SWei Yang         MemoryRegionSection now = remain;
1127052e87b0SPaolo Bonzini         now.size = int128_and(now.size, int128_neg(page_size));
11289950322aSAlexey Kardashevskiy         register_multipage(fv, &now);
1129494d1997SWei Yang         if (int128_eq(remain.size, now.size)) {
1130494d1997SWei Yang             return;
113169b67646STyler Hall         }
1132494d1997SWei Yang         remain.size = int128_sub(remain.size, now.size);
1133494d1997SWei Yang         remain.offset_within_address_space += int128_get64(now.size);
1134494d1997SWei Yang         remain.offset_within_region += int128_get64(now.size);
11350f0cb164SAvi Kivity     }
1136494d1997SWei Yang 
1137494d1997SWei Yang     /* register last subpage */
1138494d1997SWei Yang     register_subpage(fv, &remain);
11390f0cb164SAvi Kivity }
11400f0cb164SAvi Kivity 
114162a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
114262a2744cSSheng Yang {
114362a2744cSSheng Yang     if (kvm_enabled())
114462a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
114562a2744cSSheng Yang }
114662a2744cSSheng Yang 
1147b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
1148b2a8658eSUmesh Deshpande {
1149b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
1150b2a8658eSUmesh Deshpande }
1151b2a8658eSUmesh Deshpande 
1152b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
1153b2a8658eSUmesh Deshpande {
1154b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
1155b2a8658eSUmesh Deshpande }
1156b2a8658eSUmesh Deshpande 
1157ca411b7cSDaniel P. Berrangé GString *ram_block_format(void)
1158be9b23c4SPeter Xu {
1159be9b23c4SPeter Xu     RAMBlock *block;
1160be9b23c4SPeter Xu     char *psize;
1161ca411b7cSDaniel P. Berrangé     GString *buf = g_string_new("");
1162be9b23c4SPeter Xu 
1163694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
1164dbc6ae9cSTed Chen     g_string_append_printf(buf, "%24s %8s  %18s %18s %18s %18s %3s\n",
1165dbc6ae9cSTed Chen                            "Block Name", "PSize", "Offset", "Used", "Total",
1166dbc6ae9cSTed Chen                            "HVA", "RO");
1167dbc6ae9cSTed Chen 
1168be9b23c4SPeter Xu     RAMBLOCK_FOREACH(block) {
1169be9b23c4SPeter Xu         psize = size_to_str(block->page_size);
1170ca411b7cSDaniel P. Berrangé         g_string_append_printf(buf, "%24s %8s  0x%016" PRIx64 " 0x%016" PRIx64
1171dbc6ae9cSTed Chen                                " 0x%016" PRIx64 " 0x%016" PRIx64 " %3s\n",
1172dbc6ae9cSTed Chen                                block->idstr, psize,
1173be9b23c4SPeter Xu                                (uint64_t)block->offset,
1174be9b23c4SPeter Xu                                (uint64_t)block->used_length,
1175dbc6ae9cSTed Chen                                (uint64_t)block->max_length,
1176dbc6ae9cSTed Chen                                (uint64_t)(uintptr_t)block->host,
1177dbc6ae9cSTed Chen                                block->mr->readonly ? "ro" : "rw");
1178dbc6ae9cSTed Chen 
1179be9b23c4SPeter Xu         g_free(psize);
1180be9b23c4SPeter Xu     }
1181ca411b7cSDaniel P. Berrangé 
1182ca411b7cSDaniel P. Berrangé     return buf;
1183be9b23c4SPeter Xu }
1184be9b23c4SPeter Xu 
1185905b7ee4SDavid Hildenbrand static int find_min_backend_pagesize(Object *obj, void *opaque)
11869c607668SAlexey Kardashevskiy {
11879c607668SAlexey Kardashevskiy     long *hpsize_min = opaque;
11889c607668SAlexey Kardashevskiy 
11899c607668SAlexey Kardashevskiy     if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
11907d5489e6SDavid Gibson         HostMemoryBackend *backend = MEMORY_BACKEND(obj);
11917d5489e6SDavid Gibson         long hpsize = host_memory_backend_pagesize(backend);
11922b108085SDavid Gibson 
11937d5489e6SDavid Gibson         if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) {
11949c607668SAlexey Kardashevskiy             *hpsize_min = hpsize;
11959c607668SAlexey Kardashevskiy         }
11969c607668SAlexey Kardashevskiy     }
11979c607668SAlexey Kardashevskiy 
11989c607668SAlexey Kardashevskiy     return 0;
11999c607668SAlexey Kardashevskiy }
12009c607668SAlexey Kardashevskiy 
1201905b7ee4SDavid Hildenbrand static int find_max_backend_pagesize(Object *obj, void *opaque)
1202905b7ee4SDavid Hildenbrand {
1203905b7ee4SDavid Hildenbrand     long *hpsize_max = opaque;
1204905b7ee4SDavid Hildenbrand 
1205905b7ee4SDavid Hildenbrand     if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1206905b7ee4SDavid Hildenbrand         HostMemoryBackend *backend = MEMORY_BACKEND(obj);
1207905b7ee4SDavid Hildenbrand         long hpsize = host_memory_backend_pagesize(backend);
1208905b7ee4SDavid Hildenbrand 
1209905b7ee4SDavid Hildenbrand         if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) {
1210905b7ee4SDavid Hildenbrand             *hpsize_max = hpsize;
1211905b7ee4SDavid Hildenbrand         }
1212905b7ee4SDavid Hildenbrand     }
1213905b7ee4SDavid Hildenbrand 
1214905b7ee4SDavid Hildenbrand     return 0;
1215905b7ee4SDavid Hildenbrand }
1216905b7ee4SDavid Hildenbrand 
1217905b7ee4SDavid Hildenbrand /*
1218905b7ee4SDavid Hildenbrand  * TODO: We assume right now that all mapped host memory backends are
1219905b7ee4SDavid Hildenbrand  * used as RAM, however some might be used for different purposes.
1220905b7ee4SDavid Hildenbrand  */
1221905b7ee4SDavid Hildenbrand long qemu_minrampagesize(void)
12229c607668SAlexey Kardashevskiy {
12239c607668SAlexey Kardashevskiy     long hpsize = LONG_MAX;
1224ad1172d8SIgor Mammedov     Object *memdev_root = object_resolve_path("/objects", NULL);
12259c607668SAlexey Kardashevskiy 
1226905b7ee4SDavid Hildenbrand     object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize);
12279c607668SAlexey Kardashevskiy     return hpsize;
12289c607668SAlexey Kardashevskiy }
1229905b7ee4SDavid Hildenbrand 
1230905b7ee4SDavid Hildenbrand long qemu_maxrampagesize(void)
1231905b7ee4SDavid Hildenbrand {
1232ad1172d8SIgor Mammedov     long pagesize = 0;
1233905b7ee4SDavid Hildenbrand     Object *memdev_root = object_resolve_path("/objects", NULL);
1234905b7ee4SDavid Hildenbrand 
1235ad1172d8SIgor Mammedov     object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize);
1236905b7ee4SDavid Hildenbrand     return pagesize;
1237905b7ee4SDavid Hildenbrand }
12389c607668SAlexey Kardashevskiy 
1239d5dbde46SHikaru Nishida #ifdef CONFIG_POSIX
1240d6af99c9SHaozhong Zhang static int64_t get_file_size(int fd)
1241d6af99c9SHaozhong Zhang {
124272d41eb4SStefan Hajnoczi     int64_t size;
124372d41eb4SStefan Hajnoczi #if defined(__linux__)
124472d41eb4SStefan Hajnoczi     struct stat st;
124572d41eb4SStefan Hajnoczi 
124672d41eb4SStefan Hajnoczi     if (fstat(fd, &st) < 0) {
124772d41eb4SStefan Hajnoczi         return -errno;
124872d41eb4SStefan Hajnoczi     }
124972d41eb4SStefan Hajnoczi 
125072d41eb4SStefan Hajnoczi     /* Special handling for devdax character devices */
125172d41eb4SStefan Hajnoczi     if (S_ISCHR(st.st_mode)) {
125272d41eb4SStefan Hajnoczi         g_autofree char *subsystem_path = NULL;
125372d41eb4SStefan Hajnoczi         g_autofree char *subsystem = NULL;
125472d41eb4SStefan Hajnoczi 
125572d41eb4SStefan Hajnoczi         subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem",
125672d41eb4SStefan Hajnoczi                                          major(st.st_rdev), minor(st.st_rdev));
125772d41eb4SStefan Hajnoczi         subsystem = g_file_read_link(subsystem_path, NULL);
125872d41eb4SStefan Hajnoczi 
125972d41eb4SStefan Hajnoczi         if (subsystem && g_str_has_suffix(subsystem, "/dax")) {
126072d41eb4SStefan Hajnoczi             g_autofree char *size_path = NULL;
126172d41eb4SStefan Hajnoczi             g_autofree char *size_str = NULL;
126272d41eb4SStefan Hajnoczi 
126372d41eb4SStefan Hajnoczi             size_path = g_strdup_printf("/sys/dev/char/%d:%d/size",
126472d41eb4SStefan Hajnoczi                                     major(st.st_rdev), minor(st.st_rdev));
126572d41eb4SStefan Hajnoczi 
126672d41eb4SStefan Hajnoczi             if (g_file_get_contents(size_path, &size_str, NULL, NULL)) {
126772d41eb4SStefan Hajnoczi                 return g_ascii_strtoll(size_str, NULL, 0);
126872d41eb4SStefan Hajnoczi             }
126972d41eb4SStefan Hajnoczi         }
127072d41eb4SStefan Hajnoczi     }
127172d41eb4SStefan Hajnoczi #endif /* defined(__linux__) */
127272d41eb4SStefan Hajnoczi 
127372d41eb4SStefan Hajnoczi     /* st.st_size may be zero for special files yet lseek(2) works */
127472d41eb4SStefan Hajnoczi     size = lseek(fd, 0, SEEK_END);
1275d6af99c9SHaozhong Zhang     if (size < 0) {
1276d6af99c9SHaozhong Zhang         return -errno;
1277d6af99c9SHaozhong Zhang     }
1278d6af99c9SHaozhong Zhang     return size;
1279d6af99c9SHaozhong Zhang }
1280d6af99c9SHaozhong Zhang 
1281ce317be9SJingqi Liu static int64_t get_file_align(int fd)
1282ce317be9SJingqi Liu {
1283ce317be9SJingqi Liu     int64_t align = -1;
1284ce317be9SJingqi Liu #if defined(__linux__) && defined(CONFIG_LIBDAXCTL)
1285ce317be9SJingqi Liu     struct stat st;
1286ce317be9SJingqi Liu 
1287ce317be9SJingqi Liu     if (fstat(fd, &st) < 0) {
1288ce317be9SJingqi Liu         return -errno;
1289ce317be9SJingqi Liu     }
1290ce317be9SJingqi Liu 
1291ce317be9SJingqi Liu     /* Special handling for devdax character devices */
1292ce317be9SJingqi Liu     if (S_ISCHR(st.st_mode)) {
1293ce317be9SJingqi Liu         g_autofree char *path = NULL;
1294ce317be9SJingqi Liu         g_autofree char *rpath = NULL;
1295ce317be9SJingqi Liu         struct daxctl_ctx *ctx;
1296ce317be9SJingqi Liu         struct daxctl_region *region;
1297ce317be9SJingqi Liu         int rc = 0;
1298ce317be9SJingqi Liu 
1299ce317be9SJingqi Liu         path = g_strdup_printf("/sys/dev/char/%d:%d",
1300ce317be9SJingqi Liu                     major(st.st_rdev), minor(st.st_rdev));
1301ce317be9SJingqi Liu         rpath = realpath(path, NULL);
13028efdb7baSPeter Maydell         if (!rpath) {
13038efdb7baSPeter Maydell             return -errno;
13048efdb7baSPeter Maydell         }
1305ce317be9SJingqi Liu 
1306ce317be9SJingqi Liu         rc = daxctl_new(&ctx);
1307ce317be9SJingqi Liu         if (rc) {
1308ce317be9SJingqi Liu             return -1;
1309ce317be9SJingqi Liu         }
1310ce317be9SJingqi Liu 
1311ce317be9SJingqi Liu         daxctl_region_foreach(ctx, region) {
1312ce317be9SJingqi Liu             if (strstr(rpath, daxctl_region_get_path(region))) {
1313ce317be9SJingqi Liu                 align = daxctl_region_get_align(region);
1314ce317be9SJingqi Liu                 break;
1315ce317be9SJingqi Liu             }
1316ce317be9SJingqi Liu         }
1317ce317be9SJingqi Liu         daxctl_unref(ctx);
1318ce317be9SJingqi Liu     }
1319ce317be9SJingqi Liu #endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */
1320ce317be9SJingqi Liu 
1321ce317be9SJingqi Liu     return align;
1322ce317be9SJingqi Liu }
1323ce317be9SJingqi Liu 
13248d37b030SMarc-André Lureau static int file_ram_open(const char *path,
13258d37b030SMarc-André Lureau                          const char *region_name,
1326369d6dc4SStefan Hajnoczi                          bool readonly,
13274d6b23f7SDavid Hildenbrand                          bool *created)
1328c902760fSMarcelo Tosatti {
1329c902760fSMarcelo Tosatti     char *filename;
13308ca761f6SPeter Feiner     char *sanitized_name;
13318ca761f6SPeter Feiner     char *c;
13325c3ece79SPaolo Bonzini     int fd = -1;
1333c902760fSMarcelo Tosatti 
13348d37b030SMarc-André Lureau     *created = false;
1335fd97fd44SMarkus Armbruster     for (;;) {
1336369d6dc4SStefan Hajnoczi         fd = open(path, readonly ? O_RDONLY : O_RDWR);
1337fd97fd44SMarkus Armbruster         if (fd >= 0) {
1338ca01f1b8SDavid Hildenbrand             /*
1339ca01f1b8SDavid Hildenbrand              * open(O_RDONLY) won't fail with EISDIR. Check manually if we
1340ca01f1b8SDavid Hildenbrand              * opened a directory and fail similarly to how we fail ENOENT
1341ca01f1b8SDavid Hildenbrand              * in readonly mode. Note that mkstemp() would imply O_RDWR.
1342ca01f1b8SDavid Hildenbrand              */
1343ca01f1b8SDavid Hildenbrand             if (readonly) {
1344ca01f1b8SDavid Hildenbrand                 struct stat file_stat;
1345ca01f1b8SDavid Hildenbrand 
1346ca01f1b8SDavid Hildenbrand                 if (fstat(fd, &file_stat)) {
1347ca01f1b8SDavid Hildenbrand                     close(fd);
1348ca01f1b8SDavid Hildenbrand                     if (errno == EINTR) {
1349ca01f1b8SDavid Hildenbrand                         continue;
1350ca01f1b8SDavid Hildenbrand                     }
1351ca01f1b8SDavid Hildenbrand                     return -errno;
1352ca01f1b8SDavid Hildenbrand                 } else if (S_ISDIR(file_stat.st_mode)) {
1353ca01f1b8SDavid Hildenbrand                     close(fd);
1354ca01f1b8SDavid Hildenbrand                     return -EISDIR;
1355ca01f1b8SDavid Hildenbrand                 }
1356ca01f1b8SDavid Hildenbrand             }
1357fd97fd44SMarkus Armbruster             /* @path names an existing file, use it */
1358fd97fd44SMarkus Armbruster             break;
1359fd97fd44SMarkus Armbruster         }
1360fd97fd44SMarkus Armbruster         if (errno == ENOENT) {
13614d6b23f7SDavid Hildenbrand             if (readonly) {
13624d6b23f7SDavid Hildenbrand                 /* Refuse to create new, readonly files. */
13634d6b23f7SDavid Hildenbrand                 return -ENOENT;
13644d6b23f7SDavid Hildenbrand             }
1365fd97fd44SMarkus Armbruster             /* @path names a file that doesn't exist, create it */
1366fd97fd44SMarkus Armbruster             fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1367fd97fd44SMarkus Armbruster             if (fd >= 0) {
13688d37b030SMarc-André Lureau                 *created = true;
1369fd97fd44SMarkus Armbruster                 break;
1370fd97fd44SMarkus Armbruster             }
1371fd97fd44SMarkus Armbruster         } else if (errno == EISDIR) {
1372fd97fd44SMarkus Armbruster             /* @path names a directory, create a file there */
13738ca761f6SPeter Feiner             /* Make name safe to use with mkstemp by replacing '/' with '_'. */
13748d37b030SMarc-André Lureau             sanitized_name = g_strdup(region_name);
13758ca761f6SPeter Feiner             for (c = sanitized_name; *c != '\0'; c++) {
13768d31d6b6SPavel Fedin                 if (*c == '/') {
13778ca761f6SPeter Feiner                     *c = '_';
13788ca761f6SPeter Feiner                 }
13798d31d6b6SPavel Fedin             }
13808ca761f6SPeter Feiner 
13818ca761f6SPeter Feiner             filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
13828ca761f6SPeter Feiner                                        sanitized_name);
13838ca761f6SPeter Feiner             g_free(sanitized_name);
1384c902760fSMarcelo Tosatti 
1385c902760fSMarcelo Tosatti             fd = mkstemp(filename);
13868d31d6b6SPavel Fedin             if (fd >= 0) {
13878d31d6b6SPavel Fedin                 unlink(filename);
1388fd97fd44SMarkus Armbruster                 g_free(filename);
1389fd97fd44SMarkus Armbruster                 break;
13908d31d6b6SPavel Fedin             }
13918d31d6b6SPavel Fedin             g_free(filename);
1392fd97fd44SMarkus Armbruster         }
1393fd97fd44SMarkus Armbruster         if (errno != EEXIST && errno != EINTR) {
13944d6b23f7SDavid Hildenbrand             return -errno;
1395fd97fd44SMarkus Armbruster         }
1396fd97fd44SMarkus Armbruster         /*
1397fd97fd44SMarkus Armbruster          * Try again on EINTR and EEXIST.  The latter happens when
1398fd97fd44SMarkus Armbruster          * something else creates the file between our two open().
1399fd97fd44SMarkus Armbruster          */
14008d31d6b6SPavel Fedin     }
14018d31d6b6SPavel Fedin 
14028d37b030SMarc-André Lureau     return fd;
14038d37b030SMarc-André Lureau }
14048d37b030SMarc-André Lureau 
14058d37b030SMarc-André Lureau static void *file_ram_alloc(RAMBlock *block,
14068d37b030SMarc-André Lureau                             ram_addr_t memory,
14078d37b030SMarc-André Lureau                             int fd,
14088d37b030SMarc-André Lureau                             bool truncate,
140944a4ff31SJagannathan Raman                             off_t offset,
14108d37b030SMarc-André Lureau                             Error **errp)
14118d37b030SMarc-André Lureau {
1412b444f5c0SDavid Hildenbrand     uint32_t qemu_map_flags;
14138d37b030SMarc-André Lureau     void *area;
14148d37b030SMarc-André Lureau 
1415863e9621SDr. David Alan Gilbert     block->page_size = qemu_fd_getpagesize(fd);
141698376843SHaozhong Zhang     if (block->mr->align % block->page_size) {
141798376843SHaozhong Zhang         error_setg(errp, "alignment 0x%" PRIx64
141898376843SHaozhong Zhang                    " must be multiples of page size 0x%zx",
141998376843SHaozhong Zhang                    block->mr->align, block->page_size);
142098376843SHaozhong Zhang         return NULL;
142161362b71SDavid Hildenbrand     } else if (block->mr->align && !is_power_of_2(block->mr->align)) {
142261362b71SDavid Hildenbrand         error_setg(errp, "alignment 0x%" PRIx64
142361362b71SDavid Hildenbrand                    " must be a power of two", block->mr->align);
142461362b71SDavid Hildenbrand         return NULL;
14254b870dc4SAlexander Graf     } else if (offset % block->page_size) {
14264b870dc4SAlexander Graf         error_setg(errp, "offset 0x%" PRIx64
14274b870dc4SAlexander Graf                    " must be multiples of page size 0x%zx",
14284b870dc4SAlexander Graf                    offset, block->page_size);
14294b870dc4SAlexander Graf         return NULL;
143098376843SHaozhong Zhang     }
143198376843SHaozhong Zhang     block->mr->align = MAX(block->page_size, block->mr->align);
14328360668eSHaozhong Zhang #if defined(__s390x__)
14338360668eSHaozhong Zhang     if (kvm_enabled()) {
14348360668eSHaozhong Zhang         block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
14358360668eSHaozhong Zhang     }
14368360668eSHaozhong Zhang #endif
1437fd97fd44SMarkus Armbruster 
1438863e9621SDr. David Alan Gilbert     if (memory < block->page_size) {
1439fd97fd44SMarkus Armbruster         error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1440863e9621SDr. David Alan Gilbert                    "or larger than page size 0x%zx",
1441863e9621SDr. David Alan Gilbert                    memory, block->page_size);
14428d37b030SMarc-André Lureau         return NULL;
14431775f111SHaozhong Zhang     }
14441775f111SHaozhong Zhang 
1445863e9621SDr. David Alan Gilbert     memory = ROUND_UP(memory, block->page_size);
1446c902760fSMarcelo Tosatti 
1447c902760fSMarcelo Tosatti     /*
1448c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1449c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1450c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1451c902760fSMarcelo Tosatti      * mmap will fail.
1452d6af99c9SHaozhong Zhang      *
1453d6af99c9SHaozhong Zhang      * Do not truncate the non-empty backend file to avoid corrupting
1454d6af99c9SHaozhong Zhang      * the existing data in the file. Disabling shrinking is not
1455d6af99c9SHaozhong Zhang      * enough. For example, the current vNVDIMM implementation stores
1456d6af99c9SHaozhong Zhang      * the guest NVDIMM labels at the end of the backend file. If the
1457d6af99c9SHaozhong Zhang      * backend file is later extended, QEMU will not be able to find
1458d6af99c9SHaozhong Zhang      * those labels. Therefore, extending the non-empty backend file
1459d6af99c9SHaozhong Zhang      * is disabled as well.
1460c902760fSMarcelo Tosatti      */
14614b870dc4SAlexander Graf     if (truncate && ftruncate(fd, offset + memory)) {
1462c902760fSMarcelo Tosatti         perror("ftruncate");
14637f56e740SPaolo Bonzini     }
1464c902760fSMarcelo Tosatti 
14655c52a219SDavid Hildenbrand     qemu_map_flags = (block->flags & RAM_READONLY) ? QEMU_MAP_READONLY : 0;
1466b444f5c0SDavid Hildenbrand     qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0;
1467b444f5c0SDavid Hildenbrand     qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0;
14688dbe22c6SDavid Hildenbrand     qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0;
1469b444f5c0SDavid Hildenbrand     area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset);
1470c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
14717f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
1472fd97fd44SMarkus Armbruster                          "unable to map backing store for guest RAM");
14738d37b030SMarc-André Lureau         return NULL;
1474c902760fSMarcelo Tosatti     }
1475ef36fa14SMarcelo Tosatti 
147604b16653SAlex Williamson     block->fd = fd;
14774b870dc4SAlexander Graf     block->fd_offset = offset;
1478c902760fSMarcelo Tosatti     return area;
1479c902760fSMarcelo Tosatti }
1480c902760fSMarcelo Tosatti #endif
1481c902760fSMarcelo Tosatti 
1482154cc9eaSDr. David Alan Gilbert /* Allocate space within the ram_addr_t space that governs the
1483154cc9eaSDr. David Alan Gilbert  * dirty bitmaps.
1484154cc9eaSDr. David Alan Gilbert  * Called with the ramlist lock held.
1485154cc9eaSDr. David Alan Gilbert  */
1486d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1487d17b5288SAlex Williamson {
148804b16653SAlex Williamson     RAMBlock *block, *next_block;
14893e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
149004b16653SAlex Williamson 
149149cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
149249cd9ac6SStefan Hajnoczi 
14930dc3f44aSMike Day     if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
149404b16653SAlex Williamson         return 0;
14950d53d9feSMike Day     }
149604b16653SAlex Williamson 
149799e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
1498154cc9eaSDr. David Alan Gilbert         ram_addr_t candidate, next = RAM_ADDR_MAX;
149904b16653SAlex Williamson 
1500801110abSDr. David Alan Gilbert         /* Align blocks to start on a 'long' in the bitmap
1501801110abSDr. David Alan Gilbert          * which makes the bitmap sync'ing take the fast path.
1502801110abSDr. David Alan Gilbert          */
1503154cc9eaSDr. David Alan Gilbert         candidate = block->offset + block->max_length;
1504801110abSDr. David Alan Gilbert         candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS);
150504b16653SAlex Williamson 
1506154cc9eaSDr. David Alan Gilbert         /* Search for the closest following block
1507154cc9eaSDr. David Alan Gilbert          * and find the gap.
1508154cc9eaSDr. David Alan Gilbert          */
150999e15582SPeter Xu         RAMBLOCK_FOREACH(next_block) {
1510154cc9eaSDr. David Alan Gilbert             if (next_block->offset >= candidate) {
151104b16653SAlex Williamson                 next = MIN(next, next_block->offset);
151204b16653SAlex Williamson             }
151304b16653SAlex Williamson         }
1514154cc9eaSDr. David Alan Gilbert 
1515154cc9eaSDr. David Alan Gilbert         /* If it fits remember our place and remember the size
1516154cc9eaSDr. David Alan Gilbert          * of gap, but keep going so that we might find a smaller
1517154cc9eaSDr. David Alan Gilbert          * gap to fill so avoiding fragmentation.
1518154cc9eaSDr. David Alan Gilbert          */
1519154cc9eaSDr. David Alan Gilbert         if (next - candidate >= size && next - candidate < mingap) {
1520154cc9eaSDr. David Alan Gilbert             offset = candidate;
1521154cc9eaSDr. David Alan Gilbert             mingap = next - candidate;
152204b16653SAlex Williamson         }
1523154cc9eaSDr. David Alan Gilbert 
1524154cc9eaSDr. David Alan Gilbert         trace_find_ram_offset_loop(size, candidate, offset, next, mingap);
152504b16653SAlex Williamson     }
15263e837b2cSAlex Williamson 
15273e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
15283e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
15293e837b2cSAlex Williamson                 (uint64_t)size);
15303e837b2cSAlex Williamson         abort();
15313e837b2cSAlex Williamson     }
15323e837b2cSAlex Williamson 
1533154cc9eaSDr. David Alan Gilbert     trace_find_ram_offset(size, offset);
1534154cc9eaSDr. David Alan Gilbert 
153504b16653SAlex Williamson     return offset;
153604b16653SAlex Williamson }
153704b16653SAlex Williamson 
1538ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1539ddb97f1dSJason Baron {
1540ddb97f1dSJason Baron     int ret;
1541ddb97f1dSJason Baron 
1542ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
154347c8ca53SMarcel Apfelbaum     if (!machine_dump_guest_core(current_machine)) {
1544ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1545ddb97f1dSJason Baron         if (ret) {
1546ddb97f1dSJason Baron             perror("qemu_madvise");
1547ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
15480ff3243aSAkihiko Odaki                             "but dump-guest-core=off specified\n");
1549ddb97f1dSJason Baron         }
1550ddb97f1dSJason Baron     }
1551ddb97f1dSJason Baron }
1552ddb97f1dSJason Baron 
1553422148d3SDr. David Alan Gilbert const char *qemu_ram_get_idstr(RAMBlock *rb)
1554422148d3SDr. David Alan Gilbert {
1555422148d3SDr. David Alan Gilbert     return rb->idstr;
1556422148d3SDr. David Alan Gilbert }
1557422148d3SDr. David Alan Gilbert 
1558754cb9c0SYury Kotov void *qemu_ram_get_host_addr(RAMBlock *rb)
1559754cb9c0SYury Kotov {
1560754cb9c0SYury Kotov     return rb->host;
1561754cb9c0SYury Kotov }
1562754cb9c0SYury Kotov 
1563754cb9c0SYury Kotov ram_addr_t qemu_ram_get_offset(RAMBlock *rb)
1564754cb9c0SYury Kotov {
1565754cb9c0SYury Kotov     return rb->offset;
1566754cb9c0SYury Kotov }
1567754cb9c0SYury Kotov 
1568754cb9c0SYury Kotov ram_addr_t qemu_ram_get_used_length(RAMBlock *rb)
1569754cb9c0SYury Kotov {
1570754cb9c0SYury Kotov     return rb->used_length;
1571754cb9c0SYury Kotov }
1572754cb9c0SYury Kotov 
1573082851a3SDavid Hildenbrand ram_addr_t qemu_ram_get_max_length(RAMBlock *rb)
1574082851a3SDavid Hildenbrand {
1575082851a3SDavid Hildenbrand     return rb->max_length;
1576082851a3SDavid Hildenbrand }
1577082851a3SDavid Hildenbrand 
1578463a4ac2SDr. David Alan Gilbert bool qemu_ram_is_shared(RAMBlock *rb)
1579463a4ac2SDr. David Alan Gilbert {
1580463a4ac2SDr. David Alan Gilbert     return rb->flags & RAM_SHARED;
1581463a4ac2SDr. David Alan Gilbert }
1582463a4ac2SDr. David Alan Gilbert 
15838dbe22c6SDavid Hildenbrand bool qemu_ram_is_noreserve(RAMBlock *rb)
15848dbe22c6SDavid Hildenbrand {
15858dbe22c6SDavid Hildenbrand     return rb->flags & RAM_NORESERVE;
15868dbe22c6SDavid Hildenbrand }
15878dbe22c6SDavid Hildenbrand 
15882ce16640SDr. David Alan Gilbert /* Note: Only set at the start of postcopy */
15892ce16640SDr. David Alan Gilbert bool qemu_ram_is_uf_zeroable(RAMBlock *rb)
15902ce16640SDr. David Alan Gilbert {
15912ce16640SDr. David Alan Gilbert     return rb->flags & RAM_UF_ZEROPAGE;
15922ce16640SDr. David Alan Gilbert }
15932ce16640SDr. David Alan Gilbert 
15942ce16640SDr. David Alan Gilbert void qemu_ram_set_uf_zeroable(RAMBlock *rb)
15952ce16640SDr. David Alan Gilbert {
15962ce16640SDr. David Alan Gilbert     rb->flags |= RAM_UF_ZEROPAGE;
15972ce16640SDr. David Alan Gilbert }
15982ce16640SDr. David Alan Gilbert 
1599b895de50SCédric Le Goater bool qemu_ram_is_migratable(RAMBlock *rb)
1600b895de50SCédric Le Goater {
1601b895de50SCédric Le Goater     return rb->flags & RAM_MIGRATABLE;
1602b895de50SCédric Le Goater }
1603b895de50SCédric Le Goater 
1604b895de50SCédric Le Goater void qemu_ram_set_migratable(RAMBlock *rb)
1605b895de50SCédric Le Goater {
1606b895de50SCédric Le Goater     rb->flags |= RAM_MIGRATABLE;
1607b895de50SCédric Le Goater }
1608b895de50SCédric Le Goater 
1609b895de50SCédric Le Goater void qemu_ram_unset_migratable(RAMBlock *rb)
1610b895de50SCédric Le Goater {
1611b895de50SCédric Le Goater     rb->flags &= ~RAM_MIGRATABLE;
1612b895de50SCédric Le Goater }
1613b895de50SCédric Le Goater 
1614b0182e53SSteve Sistare bool qemu_ram_is_named_file(RAMBlock *rb)
1615b0182e53SSteve Sistare {
1616b0182e53SSteve Sistare     return rb->flags & RAM_NAMED_FILE;
1617b0182e53SSteve Sistare }
1618b0182e53SSteve Sistare 
16196d998f3cSStefan Hajnoczi int qemu_ram_get_fd(RAMBlock *rb)
16206d998f3cSStefan Hajnoczi {
16216d998f3cSStefan Hajnoczi     return rb->fd;
16226d998f3cSStefan Hajnoczi }
16236d998f3cSStefan Hajnoczi 
1624a4a411fbSStefan Hajnoczi /* Called with the BQL held.  */
1625fa53a0e5SGonglei void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
162620cfe881SHu Tao {
1627fa53a0e5SGonglei     RAMBlock *block;
162820cfe881SHu Tao 
1629c5705a77SAvi Kivity     assert(new_block);
1630c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
163184b89d78SCam Macdonell 
163209e5ab63SAnthony Liguori     if (dev) {
163309e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
163484b89d78SCam Macdonell         if (id) {
163584b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
16367267c094SAnthony Liguori             g_free(id);
163784b89d78SCam Macdonell         }
163884b89d78SCam Macdonell     }
163984b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
164084b89d78SCam Macdonell 
1641694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
164299e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
1643fa53a0e5SGonglei         if (block != new_block &&
1644fa53a0e5SGonglei             !strcmp(block->idstr, new_block->idstr)) {
164584b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
164684b89d78SCam Macdonell                     new_block->idstr);
164784b89d78SCam Macdonell             abort();
164884b89d78SCam Macdonell         }
164984b89d78SCam Macdonell     }
1650c5705a77SAvi Kivity }
1651c5705a77SAvi Kivity 
1652a4a411fbSStefan Hajnoczi /* Called with the BQL held.  */
1653fa53a0e5SGonglei void qemu_ram_unset_idstr(RAMBlock *block)
165420cfe881SHu Tao {
1655ae3a7047SMike Day     /* FIXME: arch_init.c assumes that this is not called throughout
1656ae3a7047SMike Day      * migration.  Ignore the problem since hot-unplug during migration
1657ae3a7047SMike Day      * does not work anyway.
1658ae3a7047SMike Day      */
165920cfe881SHu Tao     if (block) {
166020cfe881SHu Tao         memset(block->idstr, 0, sizeof(block->idstr));
166120cfe881SHu Tao     }
166220cfe881SHu Tao }
166320cfe881SHu Tao 
1664863e9621SDr. David Alan Gilbert size_t qemu_ram_pagesize(RAMBlock *rb)
1665863e9621SDr. David Alan Gilbert {
1666863e9621SDr. David Alan Gilbert     return rb->page_size;
1667863e9621SDr. David Alan Gilbert }
1668863e9621SDr. David Alan Gilbert 
166967f11b5cSDr. David Alan Gilbert /* Returns the largest size of page in use */
167067f11b5cSDr. David Alan Gilbert size_t qemu_ram_pagesize_largest(void)
167167f11b5cSDr. David Alan Gilbert {
167267f11b5cSDr. David Alan Gilbert     RAMBlock *block;
167367f11b5cSDr. David Alan Gilbert     size_t largest = 0;
167467f11b5cSDr. David Alan Gilbert 
167599e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
167667f11b5cSDr. David Alan Gilbert         largest = MAX(largest, qemu_ram_pagesize(block));
167767f11b5cSDr. David Alan Gilbert     }
167867f11b5cSDr. David Alan Gilbert 
167967f11b5cSDr. David Alan Gilbert     return largest;
168067f11b5cSDr. David Alan Gilbert }
168167f11b5cSDr. David Alan Gilbert 
16828490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
16838490fc78SLuiz Capitulino {
168475cc7f01SMarcel Apfelbaum     if (!machine_mem_merge(current_machine)) {
16858490fc78SLuiz Capitulino         /* disabled by the user */
16868490fc78SLuiz Capitulino         return 0;
16878490fc78SLuiz Capitulino     }
16888490fc78SLuiz Capitulino 
16898490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
16908490fc78SLuiz Capitulino }
16918490fc78SLuiz Capitulino 
1692c7c0e724SDavid Hildenbrand /*
1693c7c0e724SDavid Hildenbrand  * Resizing RAM while migrating can result in the migration being canceled.
1694c7c0e724SDavid Hildenbrand  * Care has to be taken if the guest might have already detected the memory.
169562be4e3aSMichael S. Tsirkin  *
169662be4e3aSMichael S. Tsirkin  * As memory core doesn't know how is memory accessed, it is up to
169762be4e3aSMichael S. Tsirkin  * resize callback to update device state and/or add assertions to detect
169862be4e3aSMichael S. Tsirkin  * misuse, if necessary.
169962be4e3aSMichael S. Tsirkin  */
1700fa53a0e5SGonglei int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
170162be4e3aSMichael S. Tsirkin {
17028f44304cSDavid Hildenbrand     const ram_addr_t oldsize = block->used_length;
1703ce4adc0bSDavid Hildenbrand     const ram_addr_t unaligned_size = newsize;
1704ce4adc0bSDavid Hildenbrand 
170562be4e3aSMichael S. Tsirkin     assert(block);
170662be4e3aSMichael S. Tsirkin 
17079260bd40SRichard Henderson     newsize = TARGET_PAGE_ALIGN(newsize);
17089260bd40SRichard Henderson     newsize = REAL_HOST_PAGE_ALIGN(newsize);
1709129ddaf3SMichael S. Tsirkin 
171062be4e3aSMichael S. Tsirkin     if (block->used_length == newsize) {
1711ce4adc0bSDavid Hildenbrand         /*
1712ce4adc0bSDavid Hildenbrand          * We don't have to resize the ram block (which only knows aligned
1713ce4adc0bSDavid Hildenbrand          * sizes), however, we have to notify if the unaligned size changed.
1714ce4adc0bSDavid Hildenbrand          */
1715ce4adc0bSDavid Hildenbrand         if (unaligned_size != memory_region_size(block->mr)) {
1716ce4adc0bSDavid Hildenbrand             memory_region_set_size(block->mr, unaligned_size);
1717ce4adc0bSDavid Hildenbrand             if (block->resized) {
1718ce4adc0bSDavid Hildenbrand                 block->resized(block->idstr, unaligned_size, block->host);
1719ce4adc0bSDavid Hildenbrand             }
1720ce4adc0bSDavid Hildenbrand         }
172162be4e3aSMichael S. Tsirkin         return 0;
172262be4e3aSMichael S. Tsirkin     }
172362be4e3aSMichael S. Tsirkin 
172462be4e3aSMichael S. Tsirkin     if (!(block->flags & RAM_RESIZEABLE)) {
172562be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
1726a3a92908SPankaj Gupta                          "Size mismatch: %s: 0x" RAM_ADDR_FMT
1727a3a92908SPankaj Gupta                          " != 0x" RAM_ADDR_FMT, block->idstr,
172862be4e3aSMichael S. Tsirkin                          newsize, block->used_length);
172962be4e3aSMichael S. Tsirkin         return -EINVAL;
173062be4e3aSMichael S. Tsirkin     }
173162be4e3aSMichael S. Tsirkin 
173262be4e3aSMichael S. Tsirkin     if (block->max_length < newsize) {
173362be4e3aSMichael S. Tsirkin         error_setg_errno(errp, EINVAL,
1734a3a92908SPankaj Gupta                          "Size too large: %s: 0x" RAM_ADDR_FMT
173562be4e3aSMichael S. Tsirkin                          " > 0x" RAM_ADDR_FMT, block->idstr,
173662be4e3aSMichael S. Tsirkin                          newsize, block->max_length);
173762be4e3aSMichael S. Tsirkin         return -EINVAL;
173862be4e3aSMichael S. Tsirkin     }
173962be4e3aSMichael S. Tsirkin 
17408f44304cSDavid Hildenbrand     /* Notify before modifying the ram block and touching the bitmaps. */
17418f44304cSDavid Hildenbrand     if (block->host) {
17428f44304cSDavid Hildenbrand         ram_block_notify_resize(block->host, oldsize, newsize);
17438f44304cSDavid Hildenbrand     }
17448f44304cSDavid Hildenbrand 
174562be4e3aSMichael S. Tsirkin     cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
174662be4e3aSMichael S. Tsirkin     block->used_length = newsize;
174758d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
174858d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
1749ce4adc0bSDavid Hildenbrand     memory_region_set_size(block->mr, unaligned_size);
175062be4e3aSMichael S. Tsirkin     if (block->resized) {
1751ce4adc0bSDavid Hildenbrand         block->resized(block->idstr, unaligned_size, block->host);
175262be4e3aSMichael S. Tsirkin     }
175362be4e3aSMichael S. Tsirkin     return 0;
175462be4e3aSMichael S. Tsirkin }
175562be4e3aSMichael S. Tsirkin 
175661c490e2SBeata Michalska /*
175761c490e2SBeata Michalska  * Trigger sync on the given ram block for range [start, start + length]
175861c490e2SBeata Michalska  * with the backing store if one is available.
175961c490e2SBeata Michalska  * Otherwise no-op.
176061c490e2SBeata Michalska  * @Note: this is supposed to be a synchronous op.
176161c490e2SBeata Michalska  */
1762ab7e41e6SPhilippe Mathieu-Daudé void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
176361c490e2SBeata Michalska {
176461c490e2SBeata Michalska     /* The requested range should fit in within the block range */
176561c490e2SBeata Michalska     g_assert((start + length) <= block->used_length);
176661c490e2SBeata Michalska 
176761c490e2SBeata Michalska #ifdef CONFIG_LIBPMEM
176861c490e2SBeata Michalska     /* The lack of support for pmem should not block the sync */
176961c490e2SBeata Michalska     if (ramblock_is_pmem(block)) {
17705d4c9549SAnthony PERARD         void *addr = ramblock_ptr(block, start);
177161c490e2SBeata Michalska         pmem_persist(addr, length);
177261c490e2SBeata Michalska         return;
177361c490e2SBeata Michalska     }
177461c490e2SBeata Michalska #endif
177561c490e2SBeata Michalska     if (block->fd >= 0) {
177661c490e2SBeata Michalska         /**
177761c490e2SBeata Michalska          * Case there is no support for PMEM or the memory has not been
177861c490e2SBeata Michalska          * specified as persistent (or is not one) - use the msync.
177961c490e2SBeata Michalska          * Less optimal but still achieves the same goal
178061c490e2SBeata Michalska          */
17815d4c9549SAnthony PERARD         void *addr = ramblock_ptr(block, start);
178261c490e2SBeata Michalska         if (qemu_msync(addr, length, block->fd)) {
178361c490e2SBeata Michalska             warn_report("%s: failed to sync memory range: start: "
178461c490e2SBeata Michalska                     RAM_ADDR_FMT " length: " RAM_ADDR_FMT,
178561c490e2SBeata Michalska                     __func__, start, length);
178661c490e2SBeata Michalska         }
178761c490e2SBeata Michalska     }
178861c490e2SBeata Michalska }
178961c490e2SBeata Michalska 
17905b82b703SStefan Hajnoczi /* Called with ram_list.mutex held */
1791b84f06c2SDavid Hildenbrand static void dirty_memory_extend(ram_addr_t new_ram_size)
17925b82b703SStefan Hajnoczi {
1793b84f06c2SDavid Hildenbrand     unsigned int old_num_blocks = ram_list.num_dirty_blocks;
1794b84f06c2SDavid Hildenbrand     unsigned int new_num_blocks = DIV_ROUND_UP(new_ram_size,
17955b82b703SStefan Hajnoczi                                                DIRTY_MEMORY_BLOCK_SIZE);
17965b82b703SStefan Hajnoczi     int i;
17975b82b703SStefan Hajnoczi 
17985b82b703SStefan Hajnoczi     /* Only need to extend if block count increased */
17995b82b703SStefan Hajnoczi     if (new_num_blocks <= old_num_blocks) {
18005b82b703SStefan Hajnoczi         return;
18015b82b703SStefan Hajnoczi     }
18025b82b703SStefan Hajnoczi 
18035b82b703SStefan Hajnoczi     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
18045b82b703SStefan Hajnoczi         DirtyMemoryBlocks *old_blocks;
18055b82b703SStefan Hajnoczi         DirtyMemoryBlocks *new_blocks;
18065b82b703SStefan Hajnoczi         int j;
18075b82b703SStefan Hajnoczi 
1808d73415a3SStefan Hajnoczi         old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]);
18095b82b703SStefan Hajnoczi         new_blocks = g_malloc(sizeof(*new_blocks) +
18105b82b703SStefan Hajnoczi                               sizeof(new_blocks->blocks[0]) * new_num_blocks);
18115b82b703SStefan Hajnoczi 
18125b82b703SStefan Hajnoczi         if (old_num_blocks) {
18135b82b703SStefan Hajnoczi             memcpy(new_blocks->blocks, old_blocks->blocks,
18145b82b703SStefan Hajnoczi                    old_num_blocks * sizeof(old_blocks->blocks[0]));
18155b82b703SStefan Hajnoczi         }
18165b82b703SStefan Hajnoczi 
18175b82b703SStefan Hajnoczi         for (j = old_num_blocks; j < new_num_blocks; j++) {
18185b82b703SStefan Hajnoczi             new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
18195b82b703SStefan Hajnoczi         }
18205b82b703SStefan Hajnoczi 
1821d73415a3SStefan Hajnoczi         qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
18225b82b703SStefan Hajnoczi 
18235b82b703SStefan Hajnoczi         if (old_blocks) {
18245b82b703SStefan Hajnoczi             g_free_rcu(old_blocks, rcu);
18255b82b703SStefan Hajnoczi         }
18265b82b703SStefan Hajnoczi     }
1827b84f06c2SDavid Hildenbrand 
1828b84f06c2SDavid Hildenbrand     ram_list.num_dirty_blocks = new_num_blocks;
18295b82b703SStefan Hajnoczi }
18305b82b703SStefan Hajnoczi 
18317ce18ca0SDavid Hildenbrand static void ram_block_add(RAMBlock *new_block, Error **errp)
1832c5705a77SAvi Kivity {
18338dbe22c6SDavid Hildenbrand     const bool noreserve = qemu_ram_is_noreserve(new_block);
18347ce18ca0SDavid Hildenbrand     const bool shared = qemu_ram_is_shared(new_block);
1835e1c57ab8SPaolo Bonzini     RAMBlock *block;
18360d53d9feSMike Day     RAMBlock *last_block = NULL;
183715f7a80cSXiaoyao Li     bool free_on_error = false;
1838b84f06c2SDavid Hildenbrand     ram_addr_t ram_size;
183937aa7a0eSMarkus Armbruster     Error *err = NULL;
18402152f5caSJuan Quintela 
1841b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
18429b8424d5SMichael S. Tsirkin     new_block->offset = find_ram_offset(new_block->max_length);
1843e1c57ab8SPaolo Bonzini 
18440628c182SMarkus Armbruster     if (!new_block->host) {
1845e1c57ab8SPaolo Bonzini         if (xen_enabled()) {
18469b8424d5SMichael S. Tsirkin             xen_ram_alloc(new_block->offset, new_block->max_length,
184737aa7a0eSMarkus Armbruster                           new_block->mr, &err);
184837aa7a0eSMarkus Armbruster             if (err) {
184937aa7a0eSMarkus Armbruster                 error_propagate(errp, err);
185037aa7a0eSMarkus Armbruster                 qemu_mutex_unlock_ramlist();
185139c350eeSPaolo Bonzini                 return;
185237aa7a0eSMarkus Armbruster             }
1853e1c57ab8SPaolo Bonzini         } else {
185425459eb7SDavid Hildenbrand             new_block->host = qemu_anon_ram_alloc(new_block->max_length,
185525459eb7SDavid Hildenbrand                                                   &new_block->mr->align,
18568dbe22c6SDavid Hildenbrand                                                   shared, noreserve);
185739228250SMarkus Armbruster             if (!new_block->host) {
1858ef701d7bSHu Tao                 error_setg_errno(errp, errno,
1859ef701d7bSHu Tao                                  "cannot set up guest memory '%s'",
1860ef701d7bSHu Tao                                  memory_region_name(new_block->mr));
1861ef701d7bSHu Tao                 qemu_mutex_unlock_ramlist();
186239c350eeSPaolo Bonzini                 return;
186339228250SMarkus Armbruster             }
18649b8424d5SMichael S. Tsirkin             memory_try_enable_merging(new_block->host, new_block->max_length);
186515f7a80cSXiaoyao Li             free_on_error = true;
186615f7a80cSXiaoyao Li         }
186715f7a80cSXiaoyao Li     }
186815f7a80cSXiaoyao Li 
186915f7a80cSXiaoyao Li     if (new_block->flags & RAM_GUEST_MEMFD) {
1870644a5277SZhenzhong Duan         int ret;
1871644a5277SZhenzhong Duan 
187215f7a80cSXiaoyao Li         assert(kvm_enabled());
187315f7a80cSXiaoyao Li         assert(new_block->guest_memfd < 0);
187415f7a80cSXiaoyao Li 
1875644a5277SZhenzhong Duan         ret = ram_block_discard_require(true);
1876644a5277SZhenzhong Duan         if (ret < 0) {
1877644a5277SZhenzhong Duan             error_setg_errno(errp, -ret,
1878852f0048SPaolo Bonzini                              "cannot set up private guest memory: discard currently blocked");
1879852f0048SPaolo Bonzini             error_append_hint(errp, "Are you using assigned devices?\n");
1880852f0048SPaolo Bonzini             goto out_free;
1881852f0048SPaolo Bonzini         }
1882852f0048SPaolo Bonzini 
188315f7a80cSXiaoyao Li         new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length,
188415f7a80cSXiaoyao Li                                                         0, errp);
188515f7a80cSXiaoyao Li         if (new_block->guest_memfd < 0) {
188615f7a80cSXiaoyao Li             qemu_mutex_unlock_ramlist();
188715f7a80cSXiaoyao Li             goto out_free;
1888c902760fSMarcelo Tosatti         }
18896977dfe6SYoshiaki Tamura     }
189094a6b54fSpbrook 
1891b84f06c2SDavid Hildenbrand     ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS;
1892b84f06c2SDavid Hildenbrand     dirty_memory_extend(ram_size);
18930d53d9feSMike Day     /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
18940d53d9feSMike Day      * QLIST (which has an RCU-friendly variant) does not have insertion at
18950d53d9feSMike Day      * tail, so save the last element in last_block.
18960d53d9feSMike Day      */
189799e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
18980d53d9feSMike Day         last_block = block;
18999b8424d5SMichael S. Tsirkin         if (block->max_length < new_block->max_length) {
1900abb26d63SPaolo Bonzini             break;
1901abb26d63SPaolo Bonzini         }
1902abb26d63SPaolo Bonzini     }
1903abb26d63SPaolo Bonzini     if (block) {
19040dc3f44aSMike Day         QLIST_INSERT_BEFORE_RCU(block, new_block, next);
19050d53d9feSMike Day     } else if (last_block) {
19060dc3f44aSMike Day         QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
19070d53d9feSMike Day     } else { /* list is empty */
19080dc3f44aSMike Day         QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1909abb26d63SPaolo Bonzini     }
19100d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
191194a6b54fSpbrook 
19120dc3f44aSMike Day     /* Write list before version */
19130dc3f44aSMike Day     smp_wmb();
1914f798b07fSUmesh Deshpande     ram_list.version++;
1915b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1916f798b07fSUmesh Deshpande 
19179b8424d5SMichael S. Tsirkin     cpu_physical_memory_set_dirty_range(new_block->offset,
191858d2707eSPaolo Bonzini                                         new_block->used_length,
191958d2707eSPaolo Bonzini                                         DIRTY_CLIENTS_ALL);
192094a6b54fSpbrook 
1921a904c911SPaolo Bonzini     if (new_block->host) {
19229b8424d5SMichael S. Tsirkin         qemu_ram_setup_dump(new_block->host, new_block->max_length);
19239b8424d5SMichael S. Tsirkin         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1924a028edeaSAlexander Bulekov         /*
1925a028edeaSAlexander Bulekov          * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU
1926a028edeaSAlexander Bulekov          * Configure it unless the machine is a qtest server, in which case
1927a028edeaSAlexander Bulekov          * KVM is not used and it may be forked (eg for fuzzing purposes).
1928a028edeaSAlexander Bulekov          */
1929a028edeaSAlexander Bulekov         if (!qtest_enabled()) {
1930a028edeaSAlexander Bulekov             qemu_madvise(new_block->host, new_block->max_length,
1931a028edeaSAlexander Bulekov                          QEMU_MADV_DONTFORK);
1932a028edeaSAlexander Bulekov         }
19338f44304cSDavid Hildenbrand         ram_block_notify_add(new_block->host, new_block->used_length,
19348f44304cSDavid Hildenbrand                              new_block->max_length);
1935a904c911SPaolo Bonzini     }
193615f7a80cSXiaoyao Li     return;
193715f7a80cSXiaoyao Li 
193815f7a80cSXiaoyao Li out_free:
193915f7a80cSXiaoyao Li     if (free_on_error) {
194015f7a80cSXiaoyao Li         qemu_anon_ram_free(new_block->host, new_block->max_length);
194115f7a80cSXiaoyao Li         new_block->host = NULL;
194215f7a80cSXiaoyao Li     }
194394a6b54fSpbrook }
1944e9a1ab19Sbellard 
1945d5dbde46SHikaru Nishida #ifdef CONFIG_POSIX
19463ec02148SSteve Sistare RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, ram_addr_t max_size,
19473ec02148SSteve Sistare                                  qemu_ram_resize_cb resized, MemoryRegion *mr,
194844a4ff31SJagannathan Raman                                  uint32_t ram_flags, int fd, off_t offset,
19493ec02148SSteve Sistare                                  bool grow,
19505c52a219SDavid Hildenbrand                                  Error **errp)
1951e1c57ab8SPaolo Bonzini {
1952*9fb40bb9SSteve Sistare     ERRP_GUARD();
1953e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1954ef701d7bSHu Tao     Error *local_err = NULL;
1955ce317be9SJingqi Liu     int64_t file_size, file_align;
1956e1c57ab8SPaolo Bonzini 
1957a4de8552SJunyan He     /* Just support these ram flags by now. */
195856918a12SSean Christopherson     assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE |
19595c52a219SDavid Hildenbrand                           RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY |
19603ec02148SSteve Sistare                           RAM_READONLY_FD | RAM_GUEST_MEMFD |
19613ec02148SSteve Sistare                           RAM_RESIZEABLE)) == 0);
19623ec02148SSteve Sistare     assert(max_size >= size);
1963a4de8552SJunyan He 
1964e1c57ab8SPaolo Bonzini     if (xen_enabled()) {
19657f56e740SPaolo Bonzini         error_setg(errp, "-mem-path not supported with Xen");
1966528f46afSFam Zheng         return NULL;
1967e1c57ab8SPaolo Bonzini     }
1968e1c57ab8SPaolo Bonzini 
1969e45e7ae2SMarc-André Lureau     if (kvm_enabled() && !kvm_has_sync_mmu()) {
1970e45e7ae2SMarc-André Lureau         error_setg(errp,
1971e45e7ae2SMarc-André Lureau                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1972e45e7ae2SMarc-André Lureau         return NULL;
1973e45e7ae2SMarc-André Lureau     }
1974e45e7ae2SMarc-André Lureau 
19759260bd40SRichard Henderson     size = TARGET_PAGE_ALIGN(size);
19769260bd40SRichard Henderson     size = REAL_HOST_PAGE_ALIGN(size);
19773ec02148SSteve Sistare     max_size = TARGET_PAGE_ALIGN(max_size);
19783ec02148SSteve Sistare     max_size = REAL_HOST_PAGE_ALIGN(max_size);
19799260bd40SRichard Henderson 
19808d37b030SMarc-André Lureau     file_size = get_file_size(fd);
19813ec02148SSteve Sistare     if (file_size && file_size < offset + max_size && !grow) {
1982719168fbSSteve Sistare         error_setg(errp, "%s backing store size 0x%" PRIx64
1983719168fbSSteve Sistare                    " is too small for 'size' option 0x" RAM_ADDR_FMT
1984719168fbSSteve Sistare                    " plus 'offset' option 0x%" PRIx64,
19853ec02148SSteve Sistare                    memory_region_name(mr), file_size, max_size,
1986719168fbSSteve Sistare                    (uint64_t)offset);
19878d37b030SMarc-André Lureau         return NULL;
19888d37b030SMarc-André Lureau     }
19898d37b030SMarc-André Lureau 
1990ce317be9SJingqi Liu     file_align = get_file_align(fd);
19918f1bdb0eSPeter Maydell     if (file_align > 0 && file_align > mr->align) {
1992ce317be9SJingqi Liu         error_setg(errp, "backing store align 0x%" PRIx64
19935f509751SJingqi Liu                    " is larger than 'align' option 0x%" PRIx64,
1994ce317be9SJingqi Liu                    file_align, mr->align);
1995ce317be9SJingqi Liu         return NULL;
1996ce317be9SJingqi Liu     }
1997ce317be9SJingqi Liu 
1998e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1999e1c57ab8SPaolo Bonzini     new_block->mr = mr;
20009b8424d5SMichael S. Tsirkin     new_block->used_length = size;
20013ec02148SSteve Sistare     new_block->max_length = max_size;
20023ec02148SSteve Sistare     new_block->resized = resized;
2003cbfc0171SJunyan He     new_block->flags = ram_flags;
200415f7a80cSXiaoyao Li     new_block->guest_memfd = -1;
20053ec02148SSteve Sistare     new_block->host = file_ram_alloc(new_block, max_size, fd,
20063ec02148SSteve Sistare                                      file_size < offset + max_size,
20073ec02148SSteve Sistare                                      offset, errp);
20087f56e740SPaolo Bonzini     if (!new_block->host) {
20097f56e740SPaolo Bonzini         g_free(new_block);
2010528f46afSFam Zheng         return NULL;
20117f56e740SPaolo Bonzini     }
20127f56e740SPaolo Bonzini 
20137ce18ca0SDavid Hildenbrand     ram_block_add(new_block, &local_err);
2014ef701d7bSHu Tao     if (local_err) {
2015ef701d7bSHu Tao         g_free(new_block);
2016ef701d7bSHu Tao         error_propagate(errp, local_err);
2017528f46afSFam Zheng         return NULL;
2018ef701d7bSHu Tao     }
2019528f46afSFam Zheng     return new_block;
202038b3362dSMarc-André Lureau 
202138b3362dSMarc-André Lureau }
202238b3362dSMarc-André Lureau 
202338b3362dSMarc-André Lureau 
202438b3362dSMarc-André Lureau RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
2025cbfc0171SJunyan He                                    uint32_t ram_flags, const char *mem_path,
20265c52a219SDavid Hildenbrand                                    off_t offset, Error **errp)
202738b3362dSMarc-André Lureau {
202838b3362dSMarc-André Lureau     int fd;
202938b3362dSMarc-André Lureau     bool created;
203038b3362dSMarc-André Lureau     RAMBlock *block;
203138b3362dSMarc-André Lureau 
20325c52a219SDavid Hildenbrand     fd = file_ram_open(mem_path, memory_region_name(mr),
20334d6b23f7SDavid Hildenbrand                        !!(ram_flags & RAM_READONLY_FD), &created);
203438b3362dSMarc-André Lureau     if (fd < 0) {
20354d6b23f7SDavid Hildenbrand         error_setg_errno(errp, -fd, "can't open backing store %s for guest RAM",
20364d6b23f7SDavid Hildenbrand                          mem_path);
20376da4b1c2SDavid Hildenbrand         if (!(ram_flags & RAM_READONLY_FD) && !(ram_flags & RAM_SHARED) &&
20386da4b1c2SDavid Hildenbrand             fd == -EACCES) {
20396da4b1c2SDavid Hildenbrand             /*
20406da4b1c2SDavid Hildenbrand              * If we can open the file R/O (note: will never create a new file)
20416da4b1c2SDavid Hildenbrand              * and we are dealing with a private mapping, there are still ways
20426da4b1c2SDavid Hildenbrand              * to consume such files and get RAM instead of ROM.
20436da4b1c2SDavid Hildenbrand              */
20446da4b1c2SDavid Hildenbrand             fd = file_ram_open(mem_path, memory_region_name(mr), true,
20456da4b1c2SDavid Hildenbrand                                &created);
20466da4b1c2SDavid Hildenbrand             if (fd < 0) {
20476da4b1c2SDavid Hildenbrand                 return NULL;
20486da4b1c2SDavid Hildenbrand             }
20496da4b1c2SDavid Hildenbrand             assert(!created);
20506da4b1c2SDavid Hildenbrand             close(fd);
20516da4b1c2SDavid Hildenbrand             error_append_hint(errp, "Consider opening the backing store"
20526da4b1c2SDavid Hildenbrand                 " read-only but still creating writable RAM using"
20536da4b1c2SDavid Hildenbrand                 " '-object memory-backend-file,readonly=on,rom=off...'"
20546da4b1c2SDavid Hildenbrand                 " (see \"VM templating\" documentation)\n");
20556da4b1c2SDavid Hildenbrand         }
205638b3362dSMarc-André Lureau         return NULL;
205738b3362dSMarc-André Lureau     }
205838b3362dSMarc-André Lureau 
20593ec02148SSteve Sistare     block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd, offset,
20603ec02148SSteve Sistare                                    false, errp);
206138b3362dSMarc-André Lureau     if (!block) {
206238b3362dSMarc-André Lureau         if (created) {
206338b3362dSMarc-André Lureau             unlink(mem_path);
206438b3362dSMarc-André Lureau         }
206538b3362dSMarc-André Lureau         close(fd);
206638b3362dSMarc-André Lureau         return NULL;
206738b3362dSMarc-André Lureau     }
206838b3362dSMarc-André Lureau 
206938b3362dSMarc-André Lureau     return block;
2070e1c57ab8SPaolo Bonzini }
20710b183fc8SPaolo Bonzini #endif
2072e1c57ab8SPaolo Bonzini 
2073*9fb40bb9SSteve Sistare #ifdef CONFIG_POSIX
2074*9fb40bb9SSteve Sistare /*
2075*9fb40bb9SSteve Sistare  * Create MAP_SHARED RAMBlocks by mmap'ing a file descriptor, so it can be
2076*9fb40bb9SSteve Sistare  * shared with another process if CPR is being used.  Use memfd if available
2077*9fb40bb9SSteve Sistare  * because it has no size limits, else use POSIX shm.
2078*9fb40bb9SSteve Sistare  */
2079*9fb40bb9SSteve Sistare static int qemu_ram_get_shared_fd(const char *name, Error **errp)
2080*9fb40bb9SSteve Sistare {
2081*9fb40bb9SSteve Sistare     int fd;
2082*9fb40bb9SSteve Sistare 
2083*9fb40bb9SSteve Sistare     if (qemu_memfd_check(0)) {
2084*9fb40bb9SSteve Sistare         fd = qemu_memfd_create(name, 0, 0, 0, 0, errp);
2085*9fb40bb9SSteve Sistare     } else {
2086*9fb40bb9SSteve Sistare         fd = qemu_shm_alloc(0, errp);
2087*9fb40bb9SSteve Sistare     }
2088*9fb40bb9SSteve Sistare     return fd;
2089*9fb40bb9SSteve Sistare }
2090*9fb40bb9SSteve Sistare #endif
2091*9fb40bb9SSteve Sistare 
209262be4e3aSMichael S. Tsirkin static
2093528f46afSFam Zheng RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
20943ec02148SSteve Sistare                                   qemu_ram_resize_cb resized,
2095ebef62d0SDavid Hildenbrand                                   void *host, uint32_t ram_flags,
2096ef701d7bSHu Tao                                   MemoryRegion *mr, Error **errp)
2097e1c57ab8SPaolo Bonzini {
2098e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
2099ef701d7bSHu Tao     Error *local_err = NULL;
21009260bd40SRichard Henderson     int align;
2101e1c57ab8SPaolo Bonzini 
21028dbe22c6SDavid Hildenbrand     assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC |
210315f7a80cSXiaoyao Li                           RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
2104ebef62d0SDavid Hildenbrand     assert(!host ^ (ram_flags & RAM_PREALLOC));
2105*9fb40bb9SSteve Sistare     assert(max_size >= size);
2106*9fb40bb9SSteve Sistare 
2107*9fb40bb9SSteve Sistare #ifdef CONFIG_POSIX         /* ignore RAM_SHARED for Windows */
2108*9fb40bb9SSteve Sistare     if (!host) {
2109*9fb40bb9SSteve Sistare         if (ram_flags & RAM_SHARED) {
2110*9fb40bb9SSteve Sistare             const char *name = memory_region_name(mr);
2111*9fb40bb9SSteve Sistare             int fd = qemu_ram_get_shared_fd(name, errp);
2112*9fb40bb9SSteve Sistare 
2113*9fb40bb9SSteve Sistare             if (fd < 0) {
2114*9fb40bb9SSteve Sistare                 return NULL;
2115*9fb40bb9SSteve Sistare             }
2116*9fb40bb9SSteve Sistare 
2117*9fb40bb9SSteve Sistare             /* Use same alignment as qemu_anon_ram_alloc */
2118*9fb40bb9SSteve Sistare             mr->align = QEMU_VMALLOC_ALIGN;
2119*9fb40bb9SSteve Sistare 
2120*9fb40bb9SSteve Sistare             /*
2121*9fb40bb9SSteve Sistare              * This can fail if the shm mount size is too small, or alloc from
2122*9fb40bb9SSteve Sistare              * fd is not supported, but previous QEMU versions that called
2123*9fb40bb9SSteve Sistare              * qemu_anon_ram_alloc for anonymous shared memory could have
2124*9fb40bb9SSteve Sistare              * succeeded.  Quietly fail and fall back.
2125*9fb40bb9SSteve Sistare              */
2126*9fb40bb9SSteve Sistare             new_block = qemu_ram_alloc_from_fd(size, max_size, resized, mr,
2127*9fb40bb9SSteve Sistare                                                ram_flags, fd, 0, false, NULL);
2128*9fb40bb9SSteve Sistare             if (new_block) {
2129*9fb40bb9SSteve Sistare                 trace_qemu_ram_alloc_shared(name, new_block->used_length,
2130*9fb40bb9SSteve Sistare                                             new_block->max_length, fd,
2131*9fb40bb9SSteve Sistare                                             new_block->host);
2132*9fb40bb9SSteve Sistare                 return new_block;
2133*9fb40bb9SSteve Sistare             }
2134*9fb40bb9SSteve Sistare 
2135*9fb40bb9SSteve Sistare             close(fd);
2136*9fb40bb9SSteve Sistare             /* fall back to anon allocation */
2137*9fb40bb9SSteve Sistare         }
2138*9fb40bb9SSteve Sistare     }
2139*9fb40bb9SSteve Sistare #endif
2140ebef62d0SDavid Hildenbrand 
21419260bd40SRichard Henderson     align = qemu_real_host_page_size();
21429260bd40SRichard Henderson     align = MAX(align, TARGET_PAGE_SIZE);
21439260bd40SRichard Henderson     size = ROUND_UP(size, align);
21449260bd40SRichard Henderson     max_size = ROUND_UP(max_size, align);
21459260bd40SRichard Henderson 
2146e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
2147e1c57ab8SPaolo Bonzini     new_block->mr = mr;
214862be4e3aSMichael S. Tsirkin     new_block->resized = resized;
21499b8424d5SMichael S. Tsirkin     new_block->used_length = size;
21509b8424d5SMichael S. Tsirkin     new_block->max_length = max_size;
2151e1c57ab8SPaolo Bonzini     new_block->fd = -1;
215215f7a80cSXiaoyao Li     new_block->guest_memfd = -1;
21538e3b0cbbSMarc-André Lureau     new_block->page_size = qemu_real_host_page_size();
2154e1c57ab8SPaolo Bonzini     new_block->host = host;
2155ebef62d0SDavid Hildenbrand     new_block->flags = ram_flags;
21567ce18ca0SDavid Hildenbrand     ram_block_add(new_block, &local_err);
2157ef701d7bSHu Tao     if (local_err) {
2158ef701d7bSHu Tao         g_free(new_block);
2159ef701d7bSHu Tao         error_propagate(errp, local_err);
2160528f46afSFam Zheng         return NULL;
2161ef701d7bSHu Tao     }
2162528f46afSFam Zheng     return new_block;
2163e1c57ab8SPaolo Bonzini }
2164e1c57ab8SPaolo Bonzini 
2165528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
216662be4e3aSMichael S. Tsirkin                                    MemoryRegion *mr, Error **errp)
216762be4e3aSMichael S. Tsirkin {
2168ebef62d0SDavid Hildenbrand     return qemu_ram_alloc_internal(size, size, NULL, host, RAM_PREALLOC, mr,
2169ebef62d0SDavid Hildenbrand                                    errp);
217062be4e3aSMichael S. Tsirkin }
217162be4e3aSMichael S. Tsirkin 
2172ebef62d0SDavid Hildenbrand RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags,
217306329cceSMarcel Apfelbaum                          MemoryRegion *mr, Error **errp)
21746977dfe6SYoshiaki Tamura {
217515f7a80cSXiaoyao Li     assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
2176ebef62d0SDavid Hildenbrand     return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp);
217762be4e3aSMichael S. Tsirkin }
217862be4e3aSMichael S. Tsirkin 
2179528f46afSFam Zheng RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
21803ec02148SSteve Sistare                                     qemu_ram_resize_cb resized,
218162be4e3aSMichael S. Tsirkin                                     MemoryRegion *mr, Error **errp)
218262be4e3aSMichael S. Tsirkin {
2183ebef62d0SDavid Hildenbrand     return qemu_ram_alloc_internal(size, maxsz, resized, NULL,
2184ebef62d0SDavid Hildenbrand                                    RAM_RESIZEABLE, mr, errp);
21856977dfe6SYoshiaki Tamura }
21866977dfe6SYoshiaki Tamura 
218743771539SPaolo Bonzini static void reclaim_ramblock(RAMBlock *block)
2188e9a1ab19Sbellard {
21897bd4f430SPaolo Bonzini     if (block->flags & RAM_PREALLOC) {
2190cd19cfa2SHuang Ying         ;
2191dfeaf2abSMarkus Armbruster     } else if (xen_enabled()) {
2192dfeaf2abSMarkus Armbruster         xen_invalidate_map_cache_entry(block->host);
2193089f3f76SStefan Weil #ifndef _WIN32
21943435f395SMarkus Armbruster     } else if (block->fd >= 0) {
219553adb9d4SMurilo Opsfelder Araujo         qemu_ram_munmap(block->fd, block->host, block->max_length);
219604b16653SAlex Williamson         close(block->fd);
2197089f3f76SStefan Weil #endif
219804b16653SAlex Williamson     } else {
21999b8424d5SMichael S. Tsirkin         qemu_anon_ram_free(block->host, block->max_length);
220004b16653SAlex Williamson     }
220115f7a80cSXiaoyao Li 
220215f7a80cSXiaoyao Li     if (block->guest_memfd >= 0) {
220315f7a80cSXiaoyao Li         close(block->guest_memfd);
2204852f0048SPaolo Bonzini         ram_block_discard_require(false);
220515f7a80cSXiaoyao Li     }
220615f7a80cSXiaoyao Li 
22077267c094SAnthony Liguori     g_free(block);
220843771539SPaolo Bonzini }
220943771539SPaolo Bonzini 
2210f1060c55SFam Zheng void qemu_ram_free(RAMBlock *block)
221143771539SPaolo Bonzini {
221285bc2a15SMarc-André Lureau     if (!block) {
221385bc2a15SMarc-André Lureau         return;
221485bc2a15SMarc-André Lureau     }
221585bc2a15SMarc-André Lureau 
22160987d735SPaolo Bonzini     if (block->host) {
22178f44304cSDavid Hildenbrand         ram_block_notify_remove(block->host, block->used_length,
22188f44304cSDavid Hildenbrand                                 block->max_length);
22190987d735SPaolo Bonzini     }
22200987d735SPaolo Bonzini 
222143771539SPaolo Bonzini     qemu_mutex_lock_ramlist();
22220dc3f44aSMike Day     QLIST_REMOVE_RCU(block, next);
222343771539SPaolo Bonzini     ram_list.mru_block = NULL;
22240dc3f44aSMike Day     /* Write list before version */
22250dc3f44aSMike Day     smp_wmb();
222643771539SPaolo Bonzini     ram_list.version++;
222743771539SPaolo Bonzini     call_rcu(block, reclaim_ramblock, rcu);
2228b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
2229e9a1ab19Sbellard }
2230e9a1ab19Sbellard 
2231cd19cfa2SHuang Ying #ifndef _WIN32
2232cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2233cd19cfa2SHuang Ying {
2234cd19cfa2SHuang Ying     RAMBlock *block;
2235cd19cfa2SHuang Ying     ram_addr_t offset;
2236cd19cfa2SHuang Ying     int flags;
2237cd19cfa2SHuang Ying     void *area, *vaddr;
22389e6b9f37SDavid Hildenbrand     int prot;
2239cd19cfa2SHuang Ying 
224099e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
2241cd19cfa2SHuang Ying         offset = addr - block->offset;
22429b8424d5SMichael S. Tsirkin         if (offset < block->max_length) {
22431240be24SMichael S. Tsirkin             vaddr = ramblock_ptr(block, offset);
22447bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
2245cd19cfa2SHuang Ying                 ;
2246dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
2247dfeaf2abSMarkus Armbruster                 abort();
2248cd19cfa2SHuang Ying             } else {
2249cd19cfa2SHuang Ying                 flags = MAP_FIXED;
2250dbb92eeaSDavid Hildenbrand                 flags |= block->flags & RAM_SHARED ?
2251dbb92eeaSDavid Hildenbrand                          MAP_SHARED : MAP_PRIVATE;
2252d94e0bc9SDavid Hildenbrand                 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0;
22539e6b9f37SDavid Hildenbrand                 prot = PROT_READ;
22549e6b9f37SDavid Hildenbrand                 prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE;
22553435f395SMarkus Armbruster                 if (block->fd >= 0) {
22569e6b9f37SDavid Hildenbrand                     area = mmap(vaddr, length, prot, flags, block->fd,
22579e6b9f37SDavid Hildenbrand                                 offset + block->fd_offset);
2258cd19cfa2SHuang Ying                 } else {
2259dbb92eeaSDavid Hildenbrand                     flags |= MAP_ANONYMOUS;
22609e6b9f37SDavid Hildenbrand                     area = mmap(vaddr, length, prot, flags, -1, 0);
2261cd19cfa2SHuang Ying                 }
2262cd19cfa2SHuang Ying                 if (area != vaddr) {
2263493d89bfSAlistair Francis                     error_report("Could not remap addr: "
2264493d89bfSAlistair Francis                                  RAM_ADDR_FMT "@" RAM_ADDR_FMT "",
2265cd19cfa2SHuang Ying                                  length, addr);
2266cd19cfa2SHuang Ying                     exit(1);
2267cd19cfa2SHuang Ying                 }
22688490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
2269ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
2270cd19cfa2SHuang Ying             }
2271cd19cfa2SHuang Ying         }
2272cd19cfa2SHuang Ying     }
2273cd19cfa2SHuang Ying }
2274cd19cfa2SHuang Ying #endif /* !_WIN32 */
2275cd19cfa2SHuang Ying 
2276a99dd337SJuergen Gross /*
2277a99dd337SJuergen Gross  * Return a host pointer to guest's ram.
22785a5585f4SEdgar E. Iglesias  * For Xen, foreign mappings get created if they don't already exist.
22790dc3f44aSMike Day  *
22805a5585f4SEdgar E. Iglesias  * @block: block for the RAM to lookup (optional and may be NULL).
22815a5585f4SEdgar E. Iglesias  * @addr: address within the memory region.
22825a5585f4SEdgar E. Iglesias  * @size: pointer to requested size (optional and may be NULL).
22835a5585f4SEdgar E. Iglesias  *        size may get modified and return a value smaller than
22845a5585f4SEdgar E. Iglesias  *        what was requested.
22855a5585f4SEdgar E. Iglesias  * @lock: wether to lock the mapping in xen-mapcache until invalidated.
22865a5585f4SEdgar E. Iglesias  * @is_write: hint wether to map RW or RO in the xen-mapcache.
22875a5585f4SEdgar E. Iglesias  *            (optional and may always be set to true).
22880dc3f44aSMike Day  *
2289e81bcda5SPaolo Bonzini  * Called within RCU critical section.
2290ae3a7047SMike Day  */
2291aab4631aSManos Pitsidianakis static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr,
22925a5585f4SEdgar E. Iglesias                                  hwaddr *size, bool lock,
22935a5585f4SEdgar E. Iglesias                                  bool is_write)
229438bee5dcSStefano Stabellini {
2295a99dd337SJuergen Gross     hwaddr len = 0;
2296a99dd337SJuergen Gross 
2297a99dd337SJuergen Gross     if (size && *size == 0) {
22988ab934f9SStefano Stabellini         return NULL;
22998ab934f9SStefano Stabellini     }
2300e81bcda5SPaolo Bonzini 
23013655cb9cSGonglei     if (block == NULL) {
2302e81bcda5SPaolo Bonzini         block = qemu_get_ram_block(addr);
23030878d0e1SPaolo Bonzini         addr -= block->offset;
23043655cb9cSGonglei     }
2305a99dd337SJuergen Gross     if (size) {
23060878d0e1SPaolo Bonzini         *size = MIN(*size, block->max_length - addr);
2307a99dd337SJuergen Gross         len = *size;
2308a99dd337SJuergen Gross     }
2309e81bcda5SPaolo Bonzini 
2310e81bcda5SPaolo Bonzini     if (xen_enabled() && block->host == NULL) {
2311e81bcda5SPaolo Bonzini         /* We need to check if the requested address is in the RAM
2312e81bcda5SPaolo Bonzini          * because we don't want to map the entire memory in QEMU.
2313e81bcda5SPaolo Bonzini          * In that case just map the requested area.
2314e81bcda5SPaolo Bonzini          */
2315a5bdc451SEdgar E. Iglesias         if (xen_mr_is_memory(block->mr)) {
23165d1c2602SEdgar E. Iglesias             return xen_map_cache(block->mr, block->offset + addr,
231749a72029SEdgar E. Iglesias                                  len, block->offset,
231849a72029SEdgar E. Iglesias                                  lock, lock, is_write);
231938bee5dcSStefano Stabellini         }
232038bee5dcSStefano Stabellini 
23215a5585f4SEdgar E. Iglesias         block->host = xen_map_cache(block->mr, block->offset,
232249a72029SEdgar E. Iglesias                                     block->max_length,
232349a72029SEdgar E. Iglesias                                     block->offset,
232449a72029SEdgar E. Iglesias                                     1, lock, is_write);
232538bee5dcSStefano Stabellini     }
2326e81bcda5SPaolo Bonzini 
23270878d0e1SPaolo Bonzini     return ramblock_ptr(block, addr);
232838bee5dcSStefano Stabellini }
232938bee5dcSStefano Stabellini 
2330a99dd337SJuergen Gross /*
2331a99dd337SJuergen Gross  * Return a host pointer to ram allocated with qemu_ram_alloc.
2332a99dd337SJuergen Gross  * This should not be used for general purpose DMA.  Use address_space_map
2333a99dd337SJuergen Gross  * or address_space_rw instead. For local memory (e.g. video ram) that the
2334a99dd337SJuergen Gross  * device owns, use memory_region_get_ram_ptr.
2335a99dd337SJuergen Gross  *
2336a99dd337SJuergen Gross  * Called within RCU critical section.
2337a99dd337SJuergen Gross  */
2338a99dd337SJuergen Gross void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
2339a99dd337SJuergen Gross {
23405a5585f4SEdgar E. Iglesias     return qemu_ram_ptr_length(ram_block, addr, NULL, false, true);
2341a99dd337SJuergen Gross }
2342a99dd337SJuergen Gross 
2343f90bb71bSDr. David Alan Gilbert /* Return the offset of a hostpointer within a ramblock */
2344f90bb71bSDr. David Alan Gilbert ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host)
2345f90bb71bSDr. David Alan Gilbert {
2346f90bb71bSDr. David Alan Gilbert     ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host;
2347f90bb71bSDr. David Alan Gilbert     assert((uintptr_t)host >= (uintptr_t)rb->host);
2348f90bb71bSDr. David Alan Gilbert     assert(res < rb->max_length);
2349f90bb71bSDr. David Alan Gilbert 
2350f90bb71bSDr. David Alan Gilbert     return res;
2351f90bb71bSDr. David Alan Gilbert }
2352f90bb71bSDr. David Alan Gilbert 
2353422148d3SDr. David Alan Gilbert RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
2354422148d3SDr. David Alan Gilbert                                    ram_addr_t *offset)
23555579c7f3Spbrook {
235694a6b54fSpbrook     RAMBlock *block;
235794a6b54fSpbrook     uint8_t *host = ptr;
235894a6b54fSpbrook 
2359868bb33fSJan Kiszka     if (xen_enabled()) {
2360f615f396SPaolo Bonzini         ram_addr_t ram_addr;
2361694ea274SDr. David Alan Gilbert         RCU_READ_LOCK_GUARD();
2362f615f396SPaolo Bonzini         ram_addr = xen_ram_addr_from_mapcache(ptr);
2363596ccccdSEdgar E. Iglesias         if (ram_addr == RAM_ADDR_INVALID) {
2364596ccccdSEdgar E. Iglesias             return NULL;
2365596ccccdSEdgar E. Iglesias         }
2366596ccccdSEdgar E. Iglesias 
2367f615f396SPaolo Bonzini         block = qemu_get_ram_block(ram_addr);
2368422148d3SDr. David Alan Gilbert         if (block) {
2369d6b6aec4SAnthony PERARD             *offset = ram_addr - block->offset;
2370422148d3SDr. David Alan Gilbert         }
2371422148d3SDr. David Alan Gilbert         return block;
2372712c2b41SStefano Stabellini     }
2373712c2b41SStefano Stabellini 
2374694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
2375d73415a3SStefan Hajnoczi     block = qatomic_rcu_read(&ram_list.mru_block);
23769b8424d5SMichael S. Tsirkin     if (block && block->host && host - block->host < block->max_length) {
237723887b79SPaolo Bonzini         goto found;
237823887b79SPaolo Bonzini     }
237923887b79SPaolo Bonzini 
238099e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
2381432d268cSJun Nakajima         /* This case append when the block is not mapped. */
2382432d268cSJun Nakajima         if (block->host == NULL) {
2383432d268cSJun Nakajima             continue;
2384432d268cSJun Nakajima         }
23859b8424d5SMichael S. Tsirkin         if (host - block->host < block->max_length) {
238623887b79SPaolo Bonzini             goto found;
238794a6b54fSpbrook         }
2388f471a17eSAlex Williamson     }
2389432d268cSJun Nakajima 
23901b5ec234SPaolo Bonzini     return NULL;
239123887b79SPaolo Bonzini 
239223887b79SPaolo Bonzini found:
2393422148d3SDr. David Alan Gilbert     *offset = (host - block->host);
2394422148d3SDr. David Alan Gilbert     if (round_offset) {
2395422148d3SDr. David Alan Gilbert         *offset &= TARGET_PAGE_MASK;
2396422148d3SDr. David Alan Gilbert     }
2397422148d3SDr. David Alan Gilbert     return block;
2398422148d3SDr. David Alan Gilbert }
2399422148d3SDr. David Alan Gilbert 
2400e3dd7493SDr. David Alan Gilbert /*
2401e3dd7493SDr. David Alan Gilbert  * Finds the named RAMBlock
2402e3dd7493SDr. David Alan Gilbert  *
2403e3dd7493SDr. David Alan Gilbert  * name: The name of RAMBlock to find
2404e3dd7493SDr. David Alan Gilbert  *
2405e3dd7493SDr. David Alan Gilbert  * Returns: RAMBlock (or NULL if not found)
2406e3dd7493SDr. David Alan Gilbert  */
2407e3dd7493SDr. David Alan Gilbert RAMBlock *qemu_ram_block_by_name(const char *name)
2408e3dd7493SDr. David Alan Gilbert {
2409e3dd7493SDr. David Alan Gilbert     RAMBlock *block;
2410e3dd7493SDr. David Alan Gilbert 
241199e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
2412e3dd7493SDr. David Alan Gilbert         if (!strcmp(name, block->idstr)) {
2413e3dd7493SDr. David Alan Gilbert             return block;
2414e3dd7493SDr. David Alan Gilbert         }
2415e3dd7493SDr. David Alan Gilbert     }
2416e3dd7493SDr. David Alan Gilbert 
2417e3dd7493SDr. David Alan Gilbert     return NULL;
2418e3dd7493SDr. David Alan Gilbert }
2419e3dd7493SDr. David Alan Gilbert 
24208d7f2e76SPhilippe Mathieu-Daudé /*
24218d7f2e76SPhilippe Mathieu-Daudé  * Some of the system routines need to translate from a host pointer
24228d7f2e76SPhilippe Mathieu-Daudé  * (typically a TLB entry) back to a ram offset.
24238d7f2e76SPhilippe Mathieu-Daudé  */
242407bdaa41SPaolo Bonzini ram_addr_t qemu_ram_addr_from_host(void *ptr)
2425422148d3SDr. David Alan Gilbert {
2426422148d3SDr. David Alan Gilbert     RAMBlock *block;
2427f615f396SPaolo Bonzini     ram_addr_t offset;
2428422148d3SDr. David Alan Gilbert 
2429f615f396SPaolo Bonzini     block = qemu_ram_block_from_host(ptr, false, &offset);
2430422148d3SDr. David Alan Gilbert     if (!block) {
243107bdaa41SPaolo Bonzini         return RAM_ADDR_INVALID;
2432422148d3SDr. David Alan Gilbert     }
2433422148d3SDr. David Alan Gilbert 
243407bdaa41SPaolo Bonzini     return block->offset + offset;
2435e890261fSMarcelo Tosatti }
2436f471a17eSAlex Williamson 
243797e03465SRichard Henderson ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
243897e03465SRichard Henderson {
243997e03465SRichard Henderson     ram_addr_t ram_addr;
244097e03465SRichard Henderson 
244197e03465SRichard Henderson     ram_addr = qemu_ram_addr_from_host(ptr);
244297e03465SRichard Henderson     if (ram_addr == RAM_ADDR_INVALID) {
244397e03465SRichard Henderson         error_report("Bad ram pointer %p", ptr);
244497e03465SRichard Henderson         abort();
244597e03465SRichard Henderson     }
244697e03465SRichard Henderson     return ram_addr;
244797e03465SRichard Henderson }
244897e03465SRichard Henderson 
2449b2a44fcaSPaolo Bonzini static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
2450a152be43SPhilippe Mathieu-Daudé                                  MemTxAttrs attrs, void *buf, hwaddr len);
245116620684SAlexey Kardashevskiy static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2452a152be43SPhilippe Mathieu-Daudé                                   const void *buf, hwaddr len);
24530c249ff7SLi Zhijian static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
2454eace72b7SPeter Maydell                                   bool is_write, MemTxAttrs attrs);
245516620684SAlexey Kardashevskiy 
2456f25a49e0SPeter Maydell static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2457f25a49e0SPeter Maydell                                 unsigned len, MemTxAttrs attrs)
2458db7b5426Sblueswir1 {
2459acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2460ff6cff75SPaolo Bonzini     uint8_t buf[8];
24615c9eb028SPeter Maydell     MemTxResult res;
2462791af8c8SPaolo Bonzini 
2463db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2464883f2c59SPhilippe Mathieu-Daudé     printf("%s: subpage %p len %u addr " HWADDR_FMT_plx "\n", __func__,
2465acc9d80bSJan Kiszka            subpage, len, addr);
2466db7b5426Sblueswir1 #endif
246716620684SAlexey Kardashevskiy     res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
24685c9eb028SPeter Maydell     if (res) {
24695c9eb028SPeter Maydell         return res;
2470f25a49e0SPeter Maydell     }
24716d3ede54SPeter Maydell     *data = ldn_p(buf, len);
2472f25a49e0SPeter Maydell     return MEMTX_OK;
2473db7b5426Sblueswir1 }
2474db7b5426Sblueswir1 
2475f25a49e0SPeter Maydell static MemTxResult subpage_write(void *opaque, hwaddr addr,
2476f25a49e0SPeter Maydell                                  uint64_t value, unsigned len, MemTxAttrs attrs)
2477db7b5426Sblueswir1 {
2478acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2479ff6cff75SPaolo Bonzini     uint8_t buf[8];
2480acc9d80bSJan Kiszka 
2481db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2482883f2c59SPhilippe Mathieu-Daudé     printf("%s: subpage %p len %u addr " HWADDR_FMT_plx
2483acc9d80bSJan Kiszka            " value %"PRIx64"\n",
2484acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
2485db7b5426Sblueswir1 #endif
24866d3ede54SPeter Maydell     stn_p(buf, len, value);
248716620684SAlexey Kardashevskiy     return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
2488db7b5426Sblueswir1 }
2489db7b5426Sblueswir1 
2490c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
24918372d383SPeter Maydell                             unsigned len, bool is_write,
24928372d383SPeter Maydell                             MemTxAttrs attrs)
2493c353e4ccSPaolo Bonzini {
2494acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
2495c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
2496883f2c59SPhilippe Mathieu-Daudé     printf("%s: subpage %p %c len %u addr " HWADDR_FMT_plx "\n",
2497acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
2498c353e4ccSPaolo Bonzini #endif
2499c353e4ccSPaolo Bonzini 
250016620684SAlexey Kardashevskiy     return flatview_access_valid(subpage->fv, addr + subpage->base,
2501eace72b7SPeter Maydell                                  len, is_write, attrs);
2502c353e4ccSPaolo Bonzini }
2503c353e4ccSPaolo Bonzini 
250470c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
2505f25a49e0SPeter Maydell     .read_with_attrs = subpage_read,
2506f25a49e0SPeter Maydell     .write_with_attrs = subpage_write,
2507ff6cff75SPaolo Bonzini     .impl.min_access_size = 1,
2508ff6cff75SPaolo Bonzini     .impl.max_access_size = 8,
2509ff6cff75SPaolo Bonzini     .valid.min_access_size = 1,
2510ff6cff75SPaolo Bonzini     .valid.max_access_size = 8,
2511c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
251270c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
2513db7b5426Sblueswir1 };
2514db7b5426Sblueswir1 
2515c227f099SAnthony Liguori static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
25165312bd8bSAvi Kivity                             uint16_t section)
2517db7b5426Sblueswir1 {
2518db7b5426Sblueswir1     int idx, eidx;
2519db7b5426Sblueswir1 
2520db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2521db7b5426Sblueswir1         return -1;
2522db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2523db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2524db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2525016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2526016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
2527db7b5426Sblueswir1 #endif
2528db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
25295312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
2530db7b5426Sblueswir1     }
2531db7b5426Sblueswir1 
2532db7b5426Sblueswir1     return 0;
2533db7b5426Sblueswir1 }
2534db7b5426Sblueswir1 
253516620684SAlexey Kardashevskiy static subpage_t *subpage_init(FlatView *fv, hwaddr base)
2536db7b5426Sblueswir1 {
2537c227f099SAnthony Liguori     subpage_t *mmio;
2538db7b5426Sblueswir1 
2539b797ab1aSWei Yang     /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */
25402615fabdSVijaya Kumar K     mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
254116620684SAlexey Kardashevskiy     mmio->fv = fv;
2542db7b5426Sblueswir1     mmio->base = base;
25432c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2544b4fefef9SPeter Crosthwaite                           NULL, TARGET_PAGE_SIZE);
2545b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
2546db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2547883f2c59SPhilippe Mathieu-Daudé     printf("%s: %p base " HWADDR_FMT_plx " len %08x\n", __func__,
2548016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
2549db7b5426Sblueswir1 #endif
2550db7b5426Sblueswir1 
2551db7b5426Sblueswir1     return mmio;
2552db7b5426Sblueswir1 }
2553db7b5426Sblueswir1 
255416620684SAlexey Kardashevskiy static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
25555312bd8bSAvi Kivity {
255616620684SAlexey Kardashevskiy     assert(fv);
25575312bd8bSAvi Kivity     MemoryRegionSection section = {
255816620684SAlexey Kardashevskiy         .fv = fv,
25595312bd8bSAvi Kivity         .mr = mr,
25605312bd8bSAvi Kivity         .offset_within_address_space = 0,
25615312bd8bSAvi Kivity         .offset_within_region = 0,
2562052e87b0SPaolo Bonzini         .size = int128_2_64(),
25635312bd8bSAvi Kivity     };
25645312bd8bSAvi Kivity 
256553cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
25665312bd8bSAvi Kivity }
25675312bd8bSAvi Kivity 
25682d54f194SPeter Maydell MemoryRegionSection *iotlb_to_section(CPUState *cpu,
25692d54f194SPeter Maydell                                       hwaddr index, MemTxAttrs attrs)
2570aa102231SAvi Kivity {
2571a54c87b6SPeter Maydell     int asidx = cpu_asidx_from_attrs(cpu, attrs);
2572a54c87b6SPeter Maydell     CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
25730d58c660SRichard Henderson     AddressSpaceDispatch *d = cpuas->memory_dispatch;
257486e4f93dSRichard Henderson     int section_index = index & ~TARGET_PAGE_MASK;
257586e4f93dSRichard Henderson     MemoryRegionSection *ret;
25769d82b5a7SPaolo Bonzini 
257786e4f93dSRichard Henderson     assert(section_index < d->map.sections_nb);
257886e4f93dSRichard Henderson     ret = d->map.sections + section_index;
257986e4f93dSRichard Henderson     assert(ret->mr);
258086e4f93dSRichard Henderson     assert(ret->mr->ops);
258186e4f93dSRichard Henderson 
258286e4f93dSRichard Henderson     return ret;
2583aa102231SAvi Kivity }
2584aa102231SAvi Kivity 
2585e9179ce1SAvi Kivity static void io_mem_init(void)
2586e9179ce1SAvi Kivity {
25872c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
25881f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
2589e9179ce1SAvi Kivity }
2590e9179ce1SAvi Kivity 
25918629d3fcSAlexey Kardashevskiy AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
2592ac1970fbSAvi Kivity {
259353cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
259453cb28cbSMarcel Apfelbaum     uint16_t n;
259553cb28cbSMarcel Apfelbaum 
259616620684SAlexey Kardashevskiy     n = dummy_section(&d->map, fv, &io_mem_unassigned);
259753cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
259800752703SPaolo Bonzini 
25999736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
260066a6df1dSAlexey Kardashevskiy 
260166a6df1dSAlexey Kardashevskiy     return d;
260200752703SPaolo Bonzini }
260300752703SPaolo Bonzini 
260466a6df1dSAlexey Kardashevskiy void address_space_dispatch_free(AddressSpaceDispatch *d)
260579e2b9aeSPaolo Bonzini {
260679e2b9aeSPaolo Bonzini     phys_sections_free(&d->map);
260779e2b9aeSPaolo Bonzini     g_free(d);
260879e2b9aeSPaolo Bonzini }
260979e2b9aeSPaolo Bonzini 
26109458a9a1SPaolo Bonzini static void do_nothing(CPUState *cpu, run_on_cpu_data d)
26119458a9a1SPaolo Bonzini {
26129458a9a1SPaolo Bonzini }
26139458a9a1SPaolo Bonzini 
26149458a9a1SPaolo Bonzini static void tcg_log_global_after_sync(MemoryListener *listener)
26159458a9a1SPaolo Bonzini {
26169458a9a1SPaolo Bonzini     CPUAddressSpace *cpuas;
26179458a9a1SPaolo Bonzini 
26189458a9a1SPaolo Bonzini     /* Wait for the CPU to end the current TB.  This avoids the following
26199458a9a1SPaolo Bonzini      * incorrect race:
26209458a9a1SPaolo Bonzini      *
26219458a9a1SPaolo Bonzini      *      vCPU                         migration
26229458a9a1SPaolo Bonzini      *      ----------------------       -------------------------
26239458a9a1SPaolo Bonzini      *      TLB check -> slow path
26249458a9a1SPaolo Bonzini      *        notdirty_mem_write
26259458a9a1SPaolo Bonzini      *          write to RAM
26269458a9a1SPaolo Bonzini      *          mark dirty
26279458a9a1SPaolo Bonzini      *                                   clear dirty flag
26289458a9a1SPaolo Bonzini      *      TLB check -> fast path
26299458a9a1SPaolo Bonzini      *                                   read memory
26309458a9a1SPaolo Bonzini      *        write to RAM
26319458a9a1SPaolo Bonzini      *
26329458a9a1SPaolo Bonzini      * by pushing the migration thread's memory read after the vCPU thread has
26339458a9a1SPaolo Bonzini      * written the memory.
26349458a9a1SPaolo Bonzini      */
263586cf9e15SPavel Dovgalyuk     if (replay_mode == REPLAY_MODE_NONE) {
263686cf9e15SPavel Dovgalyuk         /*
263786cf9e15SPavel Dovgalyuk          * VGA can make calls to this function while updating the screen.
263886cf9e15SPavel Dovgalyuk          * In record/replay mode this causes a deadlock, because
263986cf9e15SPavel Dovgalyuk          * run_on_cpu waits for rr mutex. Therefore no races are possible
264086cf9e15SPavel Dovgalyuk          * in this case and no need for making run_on_cpu when
2641f18d403fSGreg Kurz          * record/replay is enabled.
264286cf9e15SPavel Dovgalyuk          */
26439458a9a1SPaolo Bonzini         cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
26449458a9a1SPaolo Bonzini         run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL);
26459458a9a1SPaolo Bonzini     }
264686cf9e15SPavel Dovgalyuk }
26479458a9a1SPaolo Bonzini 
26480d58c660SRichard Henderson static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data)
26490d58c660SRichard Henderson {
26500d58c660SRichard Henderson     CPUAddressSpace *cpuas = data.host_ptr;
26510d58c660SRichard Henderson 
26520d58c660SRichard Henderson     cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as);
26530d58c660SRichard Henderson     tlb_flush(cpu);
26540d58c660SRichard Henderson }
26550d58c660SRichard Henderson 
26561d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
265750c1e149SAvi Kivity {
265832857f4dSPeter Maydell     CPUAddressSpace *cpuas;
26590d58c660SRichard Henderson     CPUState *cpu;
2660117712c3SAvi Kivity 
2661f28d0dfdSEmilio G. Cota     assert(tcg_enabled());
2662117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
2663117712c3SAvi Kivity        reset the modified entries */
266432857f4dSPeter Maydell     cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
26650d58c660SRichard Henderson     cpu = cpuas->cpu;
26660d58c660SRichard Henderson 
26670d58c660SRichard Henderson     /*
26680d58c660SRichard Henderson      * Defer changes to as->memory_dispatch until the cpu is quiescent.
26690d58c660SRichard Henderson      * Otherwise we race between (1) other cpu threads and (2) ongoing
26700d58c660SRichard Henderson      * i/o for the current cpu thread, with data cached by mmu_lookup().
26710d58c660SRichard Henderson      *
26720d58c660SRichard Henderson      * In addition, queueing the work function will kick the cpu back to
26730d58c660SRichard Henderson      * the main loop, which will end the RCU critical section and reclaim
26740d58c660SRichard Henderson      * the memory data structures.
26750d58c660SRichard Henderson      *
26760d58c660SRichard Henderson      * That said, the listener is also called during realize, before
26770d58c660SRichard Henderson      * all of the tcg machinery for run-on is initialized: thus halt_cond.
267832857f4dSPeter Maydell      */
26790d58c660SRichard Henderson     if (cpu->halt_cond) {
26800d58c660SRichard Henderson         async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas));
26810d58c660SRichard Henderson     } else {
26820d58c660SRichard Henderson         tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas));
26830d58c660SRichard Henderson     }
268450c1e149SAvi Kivity }
268550c1e149SAvi Kivity 
268662152b8aSAvi Kivity static void memory_map_init(void)
268762152b8aSAvi Kivity {
26887267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
268903f49957SPaolo Bonzini 
269057271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
26917dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
2692309cb471SAvi Kivity 
26937267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
26943bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
26953bb28b72SJan Kiszka                           65536);
26967dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
26972641689aSliguang }
269862152b8aSAvi Kivity 
269962152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
270062152b8aSAvi Kivity {
270162152b8aSAvi Kivity     return system_memory;
270262152b8aSAvi Kivity }
270362152b8aSAvi Kivity 
2704309cb471SAvi Kivity MemoryRegion *get_system_io(void)
2705309cb471SAvi Kivity {
2706309cb471SAvi Kivity     return system_io;
2707309cb471SAvi Kivity }
2708309cb471SAvi Kivity 
2709845b6214SPaolo Bonzini static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2710a8170e5eSAvi Kivity                                      hwaddr length)
271151d7a9ebSAnthony PERARD {
2712845b6214SPaolo Bonzini     uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
271373188068SPeter Maydell     ram_addr_t ramaddr = memory_region_get_ram_addr(mr);
271473188068SPeter Maydell 
271573188068SPeter Maydell     /* We know we're only called for RAM MemoryRegions */
271673188068SPeter Maydell     assert(ramaddr != RAM_ADDR_INVALID);
271773188068SPeter Maydell     addr += ramaddr;
27180878d0e1SPaolo Bonzini 
2719e87f7778SPaolo Bonzini     /* No early return if dirty_log_mask is or becomes 0, because
2720e87f7778SPaolo Bonzini      * cpu_physical_memory_set_dirty_range will still call
2721e87f7778SPaolo Bonzini      * xen_modified_memory.
2722e87f7778SPaolo Bonzini      */
2723e87f7778SPaolo Bonzini     if (dirty_log_mask) {
2724e87f7778SPaolo Bonzini         dirty_log_mask =
2725e87f7778SPaolo Bonzini             cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2726e87f7778SPaolo Bonzini     }
2727845b6214SPaolo Bonzini     if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
27285aa1ef71SPaolo Bonzini         assert(tcg_enabled());
2729e506ad6aSRichard Henderson         tb_invalidate_phys_range(addr, addr + length - 1);
2730845b6214SPaolo Bonzini         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2731845b6214SPaolo Bonzini     }
273258d2707eSPaolo Bonzini     cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
273349dfcec4SPaolo Bonzini }
273451d7a9ebSAnthony PERARD 
2735047be4edSStefan Hajnoczi void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size)
2736047be4edSStefan Hajnoczi {
2737047be4edSStefan Hajnoczi     /*
2738047be4edSStefan Hajnoczi      * In principle this function would work on other memory region types too,
2739047be4edSStefan Hajnoczi      * but the ROM device use case is the only one where this operation is
2740047be4edSStefan Hajnoczi      * necessary.  Other memory regions should use the
2741047be4edSStefan Hajnoczi      * address_space_read/write() APIs.
2742047be4edSStefan Hajnoczi      */
2743047be4edSStefan Hajnoczi     assert(memory_region_is_romd(mr));
2744047be4edSStefan Hajnoczi 
2745047be4edSStefan Hajnoczi     invalidate_and_set_dirty(mr, addr, size);
2746047be4edSStefan Hajnoczi }
2747047be4edSStefan Hajnoczi 
27483123f93dSJagannathan Raman int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
274982f2563fSPaolo Bonzini {
2750e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
275123326164SRichard Henderson 
275223326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
275323326164SRichard Henderson        otherwise specified.  */
275423326164SRichard Henderson     if (access_size_max == 0) {
275523326164SRichard Henderson         access_size_max = 4;
275682f2563fSPaolo Bonzini     }
275723326164SRichard Henderson 
275823326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
275923326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
276023326164SRichard Henderson         unsigned align_size_max = addr & -addr;
276123326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
276223326164SRichard Henderson             access_size_max = align_size_max;
276323326164SRichard Henderson         }
276423326164SRichard Henderson     }
276523326164SRichard Henderson 
276623326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
276723326164SRichard Henderson     if (l > access_size_max) {
276823326164SRichard Henderson         l = access_size_max;
276923326164SRichard Henderson     }
27706554f5c0SPeter Maydell     l = pow2floor(l);
277123326164SRichard Henderson 
277223326164SRichard Henderson     return l;
277382f2563fSPaolo Bonzini }
277482f2563fSPaolo Bonzini 
27753123f93dSJagannathan Raman bool prepare_mmio_access(MemoryRegion *mr)
2776125b3806SPaolo Bonzini {
27774840f10eSJan Kiszka     bool release_lock = false;
27784840f10eSJan Kiszka 
2779195801d7SStefan Hajnoczi     if (!bql_locked()) {
2780195801d7SStefan Hajnoczi         bql_lock();
27814840f10eSJan Kiszka         release_lock = true;
2782125b3806SPaolo Bonzini     }
27834840f10eSJan Kiszka     if (mr->flush_coalesced_mmio) {
27844840f10eSJan Kiszka         qemu_flush_coalesced_mmio_buffer();
27854840f10eSJan Kiszka     }
27864840f10eSJan Kiszka 
27874840f10eSJan Kiszka     return release_lock;
2788125b3806SPaolo Bonzini }
2789125b3806SPaolo Bonzini 
27903ab6fdc9SPhilippe Mathieu-Daudé /**
27913ab6fdc9SPhilippe Mathieu-Daudé  * flatview_access_allowed
27923ab6fdc9SPhilippe Mathieu-Daudé  * @mr: #MemoryRegion to be accessed
27933ab6fdc9SPhilippe Mathieu-Daudé  * @attrs: memory transaction attributes
27943ab6fdc9SPhilippe Mathieu-Daudé  * @addr: address within that memory region
27953ab6fdc9SPhilippe Mathieu-Daudé  * @len: the number of bytes to access
27963ab6fdc9SPhilippe Mathieu-Daudé  *
27973ab6fdc9SPhilippe Mathieu-Daudé  * Check if a memory transaction is allowed.
27983ab6fdc9SPhilippe Mathieu-Daudé  *
27993ab6fdc9SPhilippe Mathieu-Daudé  * Returns: true if transaction is allowed, false if denied.
28003ab6fdc9SPhilippe Mathieu-Daudé  */
28013ab6fdc9SPhilippe Mathieu-Daudé static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs,
28023ab6fdc9SPhilippe Mathieu-Daudé                                     hwaddr addr, hwaddr len)
28033ab6fdc9SPhilippe Mathieu-Daudé {
28043ab6fdc9SPhilippe Mathieu-Daudé     if (likely(!attrs.memory)) {
28053ab6fdc9SPhilippe Mathieu-Daudé         return true;
28063ab6fdc9SPhilippe Mathieu-Daudé     }
28073ab6fdc9SPhilippe Mathieu-Daudé     if (memory_region_is_ram(mr)) {
28083ab6fdc9SPhilippe Mathieu-Daudé         return true;
28093ab6fdc9SPhilippe Mathieu-Daudé     }
2810678bf8f2SBALATON Zoltan     qemu_log_mask(LOG_INVALID_MEM,
28113ab6fdc9SPhilippe Mathieu-Daudé                   "Invalid access to non-RAM device at "
28123ab6fdc9SPhilippe Mathieu-Daudé                   "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", "
28133ab6fdc9SPhilippe Mathieu-Daudé                   "region '%s'\n", addr, len, memory_region_name(mr));
28143ab6fdc9SPhilippe Mathieu-Daudé     return false;
28153ab6fdc9SPhilippe Mathieu-Daudé }
28163ab6fdc9SPhilippe Mathieu-Daudé 
2817e7927d33SJonathan Cameron static MemTxResult flatview_write_continue_step(MemTxAttrs attrs,
2818e7927d33SJonathan Cameron                                                 const uint8_t *buf,
2819e7927d33SJonathan Cameron                                                 hwaddr len, hwaddr mr_addr,
2820e7927d33SJonathan Cameron                                                 hwaddr *l, MemoryRegion *mr)
2821e7927d33SJonathan Cameron {
2822e7927d33SJonathan Cameron     if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) {
2823e7927d33SJonathan Cameron         return MEMTX_ACCESS_ERROR;
2824e7927d33SJonathan Cameron     }
2825e7927d33SJonathan Cameron 
2826e7927d33SJonathan Cameron     if (!memory_access_is_direct(mr, true)) {
2827e7927d33SJonathan Cameron         uint64_t val;
2828e7927d33SJonathan Cameron         MemTxResult result;
2829e7927d33SJonathan Cameron         bool release_lock = prepare_mmio_access(mr);
2830e7927d33SJonathan Cameron 
2831e7927d33SJonathan Cameron         *l = memory_access_size(mr, *l, mr_addr);
2832e7927d33SJonathan Cameron         /*
2833e7927d33SJonathan Cameron          * XXX: could force current_cpu to NULL to avoid
2834e7927d33SJonathan Cameron          * potential bugs
2835e7927d33SJonathan Cameron          */
2836e7927d33SJonathan Cameron 
2837e7927d33SJonathan Cameron         /*
2838e7927d33SJonathan Cameron          * Assure Coverity (and ourselves) that we are not going to OVERRUN
2839e7927d33SJonathan Cameron          * the buffer by following ldn_he_p().
2840e7927d33SJonathan Cameron          */
2841e7927d33SJonathan Cameron #ifdef QEMU_STATIC_ANALYSIS
2842e7927d33SJonathan Cameron         assert((*l == 1 && len >= 1) ||
2843e7927d33SJonathan Cameron                (*l == 2 && len >= 2) ||
2844e7927d33SJonathan Cameron                (*l == 4 && len >= 4) ||
2845e7927d33SJonathan Cameron                (*l == 8 && len >= 8));
2846e7927d33SJonathan Cameron #endif
2847e7927d33SJonathan Cameron         val = ldn_he_p(buf, *l);
2848e7927d33SJonathan Cameron         result = memory_region_dispatch_write(mr, mr_addr, val,
2849e7927d33SJonathan Cameron                                               size_memop(*l), attrs);
2850e7927d33SJonathan Cameron         if (release_lock) {
2851e7927d33SJonathan Cameron             bql_unlock();
2852e7927d33SJonathan Cameron         }
2853e7927d33SJonathan Cameron 
2854e7927d33SJonathan Cameron         return result;
2855e7927d33SJonathan Cameron     } else {
2856e7927d33SJonathan Cameron         /* RAM case */
2857e7927d33SJonathan Cameron         uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l,
28585a5585f4SEdgar E. Iglesias                                                false, true);
2859e7927d33SJonathan Cameron 
2860e7927d33SJonathan Cameron         memmove(ram_ptr, buf, *l);
2861e7927d33SJonathan Cameron         invalidate_and_set_dirty(mr, mr_addr, *l);
2862e7927d33SJonathan Cameron 
2863e7927d33SJonathan Cameron         return MEMTX_OK;
2864e7927d33SJonathan Cameron     }
2865e7927d33SJonathan Cameron }
2866e7927d33SJonathan Cameron 
2867a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
286816620684SAlexey Kardashevskiy static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
2869a203ac70SPaolo Bonzini                                            MemTxAttrs attrs,
2870a152be43SPhilippe Mathieu-Daudé                                            const void *ptr,
28714c7c8563SJonathan Cameron                                            hwaddr len, hwaddr mr_addr,
2872a203ac70SPaolo Bonzini                                            hwaddr l, MemoryRegion *mr)
287313eb76e0Sbellard {
28743b643495SPeter Maydell     MemTxResult result = MEMTX_OK;
2875a152be43SPhilippe Mathieu-Daudé     const uint8_t *buf = ptr;
287613eb76e0Sbellard 
2877a203ac70SPaolo Bonzini     for (;;) {
2878e7927d33SJonathan Cameron         result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l,
2879e7927d33SJonathan Cameron                                                mr);
2880eb7eeb88SPaolo Bonzini 
2881eb7eeb88SPaolo Bonzini         len -= l;
2882eb7eeb88SPaolo Bonzini         buf += l;
2883eb7eeb88SPaolo Bonzini         addr += l;
2884a203ac70SPaolo Bonzini 
2885a203ac70SPaolo Bonzini         if (!len) {
2886a203ac70SPaolo Bonzini             break;
2887eb7eeb88SPaolo Bonzini         }
2888a203ac70SPaolo Bonzini 
2889a203ac70SPaolo Bonzini         l = len;
28904c7c8563SJonathan Cameron         mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs);
2891a203ac70SPaolo Bonzini     }
2892eb7eeb88SPaolo Bonzini 
2893eb7eeb88SPaolo Bonzini     return result;
2894eb7eeb88SPaolo Bonzini }
2895eb7eeb88SPaolo Bonzini 
28964c6ebbb3SPaolo Bonzini /* Called from RCU critical section.  */
289716620684SAlexey Kardashevskiy static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2898a152be43SPhilippe Mathieu-Daudé                                   const void *buf, hwaddr len)
2899eb7eeb88SPaolo Bonzini {
2900eb7eeb88SPaolo Bonzini     hwaddr l;
29014c7c8563SJonathan Cameron     hwaddr mr_addr;
2902eb7eeb88SPaolo Bonzini     MemoryRegion *mr;
2903a203ac70SPaolo Bonzini 
2904a203ac70SPaolo Bonzini     l = len;
29054c7c8563SJonathan Cameron     mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs);
29063ab6fdc9SPhilippe Mathieu-Daudé     if (!flatview_access_allowed(mr, attrs, addr, len)) {
29073ab6fdc9SPhilippe Mathieu-Daudé         return MEMTX_ACCESS_ERROR;
29083ab6fdc9SPhilippe Mathieu-Daudé     }
290958e74682SPhilippe Mathieu-Daudé     return flatview_write_continue(fv, addr, attrs, buf, len,
29104c7c8563SJonathan Cameron                                    mr_addr, l, mr);
2911a203ac70SPaolo Bonzini }
2912a203ac70SPaolo Bonzini 
2913e7927d33SJonathan Cameron static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf,
2914e7927d33SJonathan Cameron                                                hwaddr len, hwaddr mr_addr,
2915e7927d33SJonathan Cameron                                                hwaddr *l,
2916e7927d33SJonathan Cameron                                                MemoryRegion *mr)
2917e7927d33SJonathan Cameron {
2918e7927d33SJonathan Cameron     if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) {
2919e7927d33SJonathan Cameron         return MEMTX_ACCESS_ERROR;
2920e7927d33SJonathan Cameron     }
2921e7927d33SJonathan Cameron 
2922e7927d33SJonathan Cameron     if (!memory_access_is_direct(mr, false)) {
2923e7927d33SJonathan Cameron         /* I/O case */
2924e7927d33SJonathan Cameron         uint64_t val;
2925e7927d33SJonathan Cameron         MemTxResult result;
2926e7927d33SJonathan Cameron         bool release_lock = prepare_mmio_access(mr);
2927e7927d33SJonathan Cameron 
2928e7927d33SJonathan Cameron         *l = memory_access_size(mr, *l, mr_addr);
2929e7927d33SJonathan Cameron         result = memory_region_dispatch_read(mr, mr_addr, &val, size_memop(*l),
2930e7927d33SJonathan Cameron                                              attrs);
2931e7927d33SJonathan Cameron 
2932e7927d33SJonathan Cameron         /*
2933e7927d33SJonathan Cameron          * Assure Coverity (and ourselves) that we are not going to OVERRUN
2934e7927d33SJonathan Cameron          * the buffer by following stn_he_p().
2935e7927d33SJonathan Cameron          */
2936e7927d33SJonathan Cameron #ifdef QEMU_STATIC_ANALYSIS
2937e7927d33SJonathan Cameron         assert((*l == 1 && len >= 1) ||
2938e7927d33SJonathan Cameron                (*l == 2 && len >= 2) ||
2939e7927d33SJonathan Cameron                (*l == 4 && len >= 4) ||
2940e7927d33SJonathan Cameron                (*l == 8 && len >= 8));
2941e7927d33SJonathan Cameron #endif
2942e7927d33SJonathan Cameron         stn_he_p(buf, *l, val);
2943e7927d33SJonathan Cameron 
2944e7927d33SJonathan Cameron         if (release_lock) {
2945e7927d33SJonathan Cameron             bql_unlock();
2946e7927d33SJonathan Cameron         }
2947e7927d33SJonathan Cameron         return result;
2948e7927d33SJonathan Cameron     } else {
2949e7927d33SJonathan Cameron         /* RAM case */
2950e7927d33SJonathan Cameron         uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l,
29515a5585f4SEdgar E. Iglesias                                                false, false);
2952e7927d33SJonathan Cameron 
2953e7927d33SJonathan Cameron         memcpy(buf, ram_ptr, *l);
2954e7927d33SJonathan Cameron 
2955e7927d33SJonathan Cameron         return MEMTX_OK;
2956e7927d33SJonathan Cameron     }
2957e7927d33SJonathan Cameron }
2958e7927d33SJonathan Cameron 
2959a203ac70SPaolo Bonzini /* Called within RCU critical section.  */
296016620684SAlexey Kardashevskiy MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2961a152be43SPhilippe Mathieu-Daudé                                    MemTxAttrs attrs, void *ptr,
29624c7c8563SJonathan Cameron                                    hwaddr len, hwaddr mr_addr, hwaddr l,
2963a203ac70SPaolo Bonzini                                    MemoryRegion *mr)
2964a203ac70SPaolo Bonzini {
2965a203ac70SPaolo Bonzini     MemTxResult result = MEMTX_OK;
2966a152be43SPhilippe Mathieu-Daudé     uint8_t *buf = ptr;
2967eb7eeb88SPaolo Bonzini 
29687cac7feaSAlexander Bulekov     fuzz_dma_read_cb(addr, len, mr);
2969a203ac70SPaolo Bonzini     for (;;) {
2970e7927d33SJonathan Cameron         result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr);
29714840f10eSJan Kiszka 
297213eb76e0Sbellard         len -= l;
297313eb76e0Sbellard         buf += l;
297413eb76e0Sbellard         addr += l;
2975a203ac70SPaolo Bonzini 
2976a203ac70SPaolo Bonzini         if (!len) {
2977a203ac70SPaolo Bonzini             break;
297813eb76e0Sbellard         }
2979a203ac70SPaolo Bonzini 
2980a203ac70SPaolo Bonzini         l = len;
29814c7c8563SJonathan Cameron         mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs);
2982a203ac70SPaolo Bonzini     }
2983a203ac70SPaolo Bonzini 
2984a203ac70SPaolo Bonzini     return result;
2985a203ac70SPaolo Bonzini }
2986a203ac70SPaolo Bonzini 
2987b2a44fcaSPaolo Bonzini /* Called from RCU critical section.  */
2988b2a44fcaSPaolo Bonzini static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
2989a152be43SPhilippe Mathieu-Daudé                                  MemTxAttrs attrs, void *buf, hwaddr len)
2990a203ac70SPaolo Bonzini {
2991a203ac70SPaolo Bonzini     hwaddr l;
29924c7c8563SJonathan Cameron     hwaddr mr_addr;
2993a203ac70SPaolo Bonzini     MemoryRegion *mr;
2994a203ac70SPaolo Bonzini 
2995a203ac70SPaolo Bonzini     l = len;
29964c7c8563SJonathan Cameron     mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs);
29973ab6fdc9SPhilippe Mathieu-Daudé     if (!flatview_access_allowed(mr, attrs, addr, len)) {
29983ab6fdc9SPhilippe Mathieu-Daudé         return MEMTX_ACCESS_ERROR;
29993ab6fdc9SPhilippe Mathieu-Daudé     }
3000b2a44fcaSPaolo Bonzini     return flatview_read_continue(fv, addr, attrs, buf, len,
30014c7c8563SJonathan Cameron                                   mr_addr, l, mr);
300213eb76e0Sbellard }
30038df1cd07Sbellard 
3004b2a44fcaSPaolo Bonzini MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
3005daa3dda4SPhilippe Mathieu-Daudé                                     MemTxAttrs attrs, void *buf, hwaddr len)
3006b2a44fcaSPaolo Bonzini {
3007b2a44fcaSPaolo Bonzini     MemTxResult result = MEMTX_OK;
3008b2a44fcaSPaolo Bonzini     FlatView *fv;
3009b2a44fcaSPaolo Bonzini 
3010b2a44fcaSPaolo Bonzini     if (len > 0) {
3011694ea274SDr. David Alan Gilbert         RCU_READ_LOCK_GUARD();
3012b2a44fcaSPaolo Bonzini         fv = address_space_to_flatview(as);
3013b2a44fcaSPaolo Bonzini         result = flatview_read(fv, addr, attrs, buf, len);
3014b2a44fcaSPaolo Bonzini     }
3015b2a44fcaSPaolo Bonzini 
3016b2a44fcaSPaolo Bonzini     return result;
3017b2a44fcaSPaolo Bonzini }
3018b2a44fcaSPaolo Bonzini 
30194c6ebbb3SPaolo Bonzini MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
30204c6ebbb3SPaolo Bonzini                                 MemTxAttrs attrs,
3021daa3dda4SPhilippe Mathieu-Daudé                                 const void *buf, hwaddr len)
30224c6ebbb3SPaolo Bonzini {
30234c6ebbb3SPaolo Bonzini     MemTxResult result = MEMTX_OK;
30244c6ebbb3SPaolo Bonzini     FlatView *fv;
30254c6ebbb3SPaolo Bonzini 
30264c6ebbb3SPaolo Bonzini     if (len > 0) {
3027694ea274SDr. David Alan Gilbert         RCU_READ_LOCK_GUARD();
30284c6ebbb3SPaolo Bonzini         fv = address_space_to_flatview(as);
30294c6ebbb3SPaolo Bonzini         result = flatview_write(fv, addr, attrs, buf, len);
30304c6ebbb3SPaolo Bonzini     }
30314c6ebbb3SPaolo Bonzini 
30324c6ebbb3SPaolo Bonzini     return result;
30334c6ebbb3SPaolo Bonzini }
30344c6ebbb3SPaolo Bonzini 
3035db84fd97SPaolo Bonzini MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
3036daa3dda4SPhilippe Mathieu-Daudé                              void *buf, hwaddr len, bool is_write)
3037db84fd97SPaolo Bonzini {
3038db84fd97SPaolo Bonzini     if (is_write) {
3039db84fd97SPaolo Bonzini         return address_space_write(as, addr, attrs, buf, len);
3040db84fd97SPaolo Bonzini     } else {
3041db84fd97SPaolo Bonzini         return address_space_read_full(as, addr, attrs, buf, len);
3042db84fd97SPaolo Bonzini     }
3043db84fd97SPaolo Bonzini }
3044db84fd97SPaolo Bonzini 
304575f01c68SPhilippe Mathieu-Daudé MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
304675f01c68SPhilippe Mathieu-Daudé                               uint8_t c, hwaddr len, MemTxAttrs attrs)
304775f01c68SPhilippe Mathieu-Daudé {
304875f01c68SPhilippe Mathieu-Daudé #define FILLBUF_SIZE 512
304975f01c68SPhilippe Mathieu-Daudé     uint8_t fillbuf[FILLBUF_SIZE];
305075f01c68SPhilippe Mathieu-Daudé     int l;
305175f01c68SPhilippe Mathieu-Daudé     MemTxResult error = MEMTX_OK;
305275f01c68SPhilippe Mathieu-Daudé 
305375f01c68SPhilippe Mathieu-Daudé     memset(fillbuf, c, FILLBUF_SIZE);
305475f01c68SPhilippe Mathieu-Daudé     while (len > 0) {
305575f01c68SPhilippe Mathieu-Daudé         l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
305675f01c68SPhilippe Mathieu-Daudé         error |= address_space_write(as, addr, attrs, fillbuf, l);
305775f01c68SPhilippe Mathieu-Daudé         len -= l;
305875f01c68SPhilippe Mathieu-Daudé         addr += l;
305975f01c68SPhilippe Mathieu-Daudé     }
306075f01c68SPhilippe Mathieu-Daudé 
306175f01c68SPhilippe Mathieu-Daudé     return error;
306275f01c68SPhilippe Mathieu-Daudé }
306375f01c68SPhilippe Mathieu-Daudé 
3064d7ef71efSPhilippe Mathieu-Daudé void cpu_physical_memory_rw(hwaddr addr, void *buf,
306528c80bfeSPhilippe Mathieu-Daudé                             hwaddr len, bool is_write)
3066ac1970fbSAvi Kivity {
30675c9eb028SPeter Maydell     address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
30685c9eb028SPeter Maydell                      buf, len, is_write);
3069ac1970fbSAvi Kivity }
3070ac1970fbSAvi Kivity 
3071582b55a9SAlexander Graf enum write_rom_type {
3072582b55a9SAlexander Graf     WRITE_DATA,
3073582b55a9SAlexander Graf     FLUSH_CACHE,
3074582b55a9SAlexander Graf };
3075582b55a9SAlexander Graf 
307675693e14SPeter Maydell static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
307775693e14SPeter Maydell                                                            hwaddr addr,
307875693e14SPeter Maydell                                                            MemTxAttrs attrs,
3079daa3dda4SPhilippe Mathieu-Daudé                                                            const void *ptr,
30800c249ff7SLi Zhijian                                                            hwaddr len,
308175693e14SPeter Maydell                                                            enum write_rom_type type)
3082d0ecd2aaSbellard {
3083149f54b5SPaolo Bonzini     hwaddr l;
308420804676SPhilippe Mathieu-Daudé     uint8_t *ram_ptr;
3085149f54b5SPaolo Bonzini     hwaddr addr1;
30865c8a00ceSPaolo Bonzini     MemoryRegion *mr;
3087daa3dda4SPhilippe Mathieu-Daudé     const uint8_t *buf = ptr;
3088d0ecd2aaSbellard 
3089694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
3090d0ecd2aaSbellard     while (len > 0) {
3091d0ecd2aaSbellard         l = len;
309275693e14SPeter Maydell         mr = address_space_translate(as, addr, &addr1, &l, true, attrs);
3093d0ecd2aaSbellard 
30945c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
30955c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
3096b242e0e0SPaolo Bonzini             l = memory_access_size(mr, l, addr1);
3097d0ecd2aaSbellard         } else {
3098d0ecd2aaSbellard             /* ROM/RAM case */
309920804676SPhilippe Mathieu-Daudé             ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3100582b55a9SAlexander Graf             switch (type) {
3101582b55a9SAlexander Graf             case WRITE_DATA:
310220804676SPhilippe Mathieu-Daudé                 memcpy(ram_ptr, buf, l);
3103845b6214SPaolo Bonzini                 invalidate_and_set_dirty(mr, addr1, l);
3104582b55a9SAlexander Graf                 break;
3105582b55a9SAlexander Graf             case FLUSH_CACHE:
31061da8de39SRichard Henderson                 flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l);
3107582b55a9SAlexander Graf                 break;
3108582b55a9SAlexander Graf             }
3109d0ecd2aaSbellard         }
3110d0ecd2aaSbellard         len -= l;
3111d0ecd2aaSbellard         buf += l;
3112d0ecd2aaSbellard         addr += l;
3113d0ecd2aaSbellard     }
311475693e14SPeter Maydell     return MEMTX_OK;
3115d0ecd2aaSbellard }
3116d0ecd2aaSbellard 
3117582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
31183c8133f9SPeter Maydell MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
31193c8133f9SPeter Maydell                                     MemTxAttrs attrs,
3120daa3dda4SPhilippe Mathieu-Daudé                                     const void *buf, hwaddr len)
3121582b55a9SAlexander Graf {
31223c8133f9SPeter Maydell     return address_space_write_rom_internal(as, addr, attrs,
312375693e14SPeter Maydell                                             buf, len, WRITE_DATA);
3124582b55a9SAlexander Graf }
3125582b55a9SAlexander Graf 
31260c249ff7SLi Zhijian void cpu_flush_icache_range(hwaddr start, hwaddr len)
3127582b55a9SAlexander Graf {
3128582b55a9SAlexander Graf     /*
3129582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
3130582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
3131582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
3132582b55a9SAlexander Graf      * the host's instruction cache at least.
3133582b55a9SAlexander Graf      */
3134582b55a9SAlexander Graf     if (tcg_enabled()) {
3135582b55a9SAlexander Graf         return;
3136582b55a9SAlexander Graf     }
3137582b55a9SAlexander Graf 
313875693e14SPeter Maydell     address_space_write_rom_internal(&address_space_memory,
313975693e14SPeter Maydell                                      start, MEMTXATTRS_UNSPECIFIED,
314075693e14SPeter Maydell                                      NULL, len, FLUSH_CACHE);
3141582b55a9SAlexander Graf }
3142582b55a9SAlexander Graf 
3143637b0aa1SMattias Nissler /*
3144637b0aa1SMattias Nissler  * A magic value stored in the first 8 bytes of the bounce buffer struct. Used
3145637b0aa1SMattias Nissler  * to detect illegal pointers passed to address_space_unmap.
3146637b0aa1SMattias Nissler  */
3147637b0aa1SMattias Nissler #define BOUNCE_BUFFER_MAGIC 0xb4017ceb4ffe12ed
3148637b0aa1SMattias Nissler 
3149637b0aa1SMattias Nissler typedef struct {
3150637b0aa1SMattias Nissler     uint64_t magic;
3151637b0aa1SMattias Nissler     MemoryRegion *mr;
3152637b0aa1SMattias Nissler     hwaddr addr;
3153637b0aa1SMattias Nissler     size_t len;
3154637b0aa1SMattias Nissler     uint8_t buffer[];
3155637b0aa1SMattias Nissler } BounceBuffer;
3156637b0aa1SMattias Nissler 
315769e78f1bSMattias Nissler static void
315869e78f1bSMattias Nissler address_space_unregister_map_client_do(AddressSpaceMapClient *client)
3159ba223c29Saliguori {
316072cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
31617267c094SAnthony Liguori     g_free(client);
3162ba223c29Saliguori }
3163ba223c29Saliguori 
31645c627197SMattias Nissler static void address_space_notify_map_clients_locked(AddressSpace *as)
3165ba223c29Saliguori {
316669e78f1bSMattias Nissler     AddressSpaceMapClient *client;
3167ba223c29Saliguori 
316869e78f1bSMattias Nissler     while (!QLIST_EMPTY(&as->map_client_list)) {
316969e78f1bSMattias Nissler         client = QLIST_FIRST(&as->map_client_list);
3170e95205e1SFam Zheng         qemu_bh_schedule(client->bh);
31715c627197SMattias Nissler         address_space_unregister_map_client_do(client);
3172ba223c29Saliguori     }
3173ba223c29Saliguori }
3174ba223c29Saliguori 
31755c627197SMattias Nissler void address_space_register_map_client(AddressSpace *as, QEMUBH *bh)
3176d0ecd2aaSbellard {
317769e78f1bSMattias Nissler     AddressSpaceMapClient *client = g_malloc(sizeof(*client));
3178d0ecd2aaSbellard 
317969e78f1bSMattias Nissler     QEMU_LOCK_GUARD(&as->map_client_list_lock);
3180e95205e1SFam Zheng     client->bh = bh;
318169e78f1bSMattias Nissler     QLIST_INSERT_HEAD(&as->map_client_list, client, link);
3182637b0aa1SMattias Nissler     /* Write map_client_list before reading bounce_buffer_size. */
318333828ca1SPaolo Bonzini     smp_mb();
3184637b0aa1SMattias Nissler     if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) {
31855c627197SMattias Nissler         address_space_notify_map_clients_locked(as);
318633b6c2edSFam Zheng     }
3187d0ecd2aaSbellard }
3188d0ecd2aaSbellard 
318938e047b5SFam Zheng void cpu_exec_init_all(void)
319038e047b5SFam Zheng {
319138e047b5SFam Zheng     qemu_mutex_init(&ram_list.mutex);
319220bccb82SPeter Maydell     /* The data structures we set up here depend on knowing the page size,
319320bccb82SPeter Maydell      * so no more changes can be made after this point.
319420bccb82SPeter Maydell      * In an ideal world, nothing we did before we had finished the
319520bccb82SPeter Maydell      * machine setup would care about the target page size, and we could
319620bccb82SPeter Maydell      * do this much later, rather than requiring board models to state
319720bccb82SPeter Maydell      * up front what their requirements are.
319820bccb82SPeter Maydell      */
319920bccb82SPeter Maydell     finalize_target_page_bits();
320038e047b5SFam Zheng     io_mem_init();
3201680a4783SPaolo Bonzini     memory_map_init();
320238e047b5SFam Zheng }
320338e047b5SFam Zheng 
32045c627197SMattias Nissler void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh)
3205d0ecd2aaSbellard {
320669e78f1bSMattias Nissler     AddressSpaceMapClient *client;
3207d0ecd2aaSbellard 
320869e78f1bSMattias Nissler     QEMU_LOCK_GUARD(&as->map_client_list_lock);
320969e78f1bSMattias Nissler     QLIST_FOREACH(client, &as->map_client_list, link) {
3210e95205e1SFam Zheng         if (client->bh == bh) {
32115c627197SMattias Nissler             address_space_unregister_map_client_do(client);
3212e95205e1SFam Zheng             break;
3213e95205e1SFam Zheng         }
3214e95205e1SFam Zheng     }
3215d0ecd2aaSbellard }
3216d0ecd2aaSbellard 
32175c627197SMattias Nissler static void address_space_notify_map_clients(AddressSpace *as)
3218d0ecd2aaSbellard {
321969e78f1bSMattias Nissler     QEMU_LOCK_GUARD(&as->map_client_list_lock);
32205c627197SMattias Nissler     address_space_notify_map_clients_locked(as);
32216d16c2f8Saliguori }
32226d16c2f8Saliguori 
32230c249ff7SLi Zhijian static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
3224eace72b7SPeter Maydell                                   bool is_write, MemTxAttrs attrs)
322551644ab7SPaolo Bonzini {
32265c8a00ceSPaolo Bonzini     MemoryRegion *mr;
322751644ab7SPaolo Bonzini     hwaddr l, xlat;
322851644ab7SPaolo Bonzini 
322951644ab7SPaolo Bonzini     while (len > 0) {
323051644ab7SPaolo Bonzini         l = len;
3231efa99a2fSPeter Maydell         mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
32325c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
32335c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
3234eace72b7SPeter Maydell             if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) {
323551644ab7SPaolo Bonzini                 return false;
323651644ab7SPaolo Bonzini             }
323751644ab7SPaolo Bonzini         }
323851644ab7SPaolo Bonzini 
323951644ab7SPaolo Bonzini         len -= l;
324051644ab7SPaolo Bonzini         addr += l;
324151644ab7SPaolo Bonzini     }
324251644ab7SPaolo Bonzini     return true;
324351644ab7SPaolo Bonzini }
324451644ab7SPaolo Bonzini 
324516620684SAlexey Kardashevskiy bool address_space_access_valid(AddressSpace *as, hwaddr addr,
32460c249ff7SLi Zhijian                                 hwaddr len, bool is_write,
3247fddffa42SPeter Maydell                                 MemTxAttrs attrs)
324816620684SAlexey Kardashevskiy {
324911e732a5SPaolo Bonzini     FlatView *fv;
325011e732a5SPaolo Bonzini 
3251694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
325211e732a5SPaolo Bonzini     fv = address_space_to_flatview(as);
325358e74682SPhilippe Mathieu-Daudé     return flatview_access_valid(fv, addr, len, is_write, attrs);
325416620684SAlexey Kardashevskiy }
325516620684SAlexey Kardashevskiy 
3256715c31ecSPaolo Bonzini static hwaddr
325716620684SAlexey Kardashevskiy flatview_extend_translation(FlatView *fv, hwaddr addr,
325816620684SAlexey Kardashevskiy                             hwaddr target_len,
3259715c31ecSPaolo Bonzini                             MemoryRegion *mr, hwaddr base, hwaddr len,
326053d0790dSPeter Maydell                             bool is_write, MemTxAttrs attrs)
3261715c31ecSPaolo Bonzini {
3262715c31ecSPaolo Bonzini     hwaddr done = 0;
3263715c31ecSPaolo Bonzini     hwaddr xlat;
3264715c31ecSPaolo Bonzini     MemoryRegion *this_mr;
3265715c31ecSPaolo Bonzini 
3266715c31ecSPaolo Bonzini     for (;;) {
3267715c31ecSPaolo Bonzini         target_len -= len;
3268715c31ecSPaolo Bonzini         addr += len;
3269715c31ecSPaolo Bonzini         done += len;
3270715c31ecSPaolo Bonzini         if (target_len == 0) {
3271715c31ecSPaolo Bonzini             return done;
3272715c31ecSPaolo Bonzini         }
3273715c31ecSPaolo Bonzini 
3274715c31ecSPaolo Bonzini         len = target_len;
327516620684SAlexey Kardashevskiy         this_mr = flatview_translate(fv, addr, &xlat,
3276efa99a2fSPeter Maydell                                      &len, is_write, attrs);
3277715c31ecSPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
3278715c31ecSPaolo Bonzini             return done;
3279715c31ecSPaolo Bonzini         }
3280715c31ecSPaolo Bonzini     }
3281715c31ecSPaolo Bonzini }
3282715c31ecSPaolo Bonzini 
32836d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
32846d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
32856d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
32866d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
32875c627197SMattias Nissler  * Use address_space_register_map_client() to know when retrying the map
32885c627197SMattias Nissler  * operation is likely to succeed.
32896d16c2f8Saliguori  */
3290ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
3291a8170e5eSAvi Kivity                         hwaddr addr,
3292a8170e5eSAvi Kivity                         hwaddr *plen,
3293f26404fbSPeter Maydell                         bool is_write,
3294f26404fbSPeter Maydell                         MemTxAttrs attrs)
32956d16c2f8Saliguori {
3296a8170e5eSAvi Kivity     hwaddr len = *plen;
3297715c31ecSPaolo Bonzini     hwaddr l, xlat;
3298715c31ecSPaolo Bonzini     MemoryRegion *mr;
3299ad0c60faSPaolo Bonzini     FlatView *fv;
33006d16c2f8Saliguori 
3301d44fe13bSAlex Bennée     trace_address_space_map(as, addr, len, is_write, *(uint32_t *) &attrs);
3302d44fe13bSAlex Bennée 
3303e3127ae0SPaolo Bonzini     if (len == 0) {
3304e3127ae0SPaolo Bonzini         return NULL;
3305e3127ae0SPaolo Bonzini     }
3306e3127ae0SPaolo Bonzini 
33076d16c2f8Saliguori     l = len;
3308694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
3309ad0c60faSPaolo Bonzini     fv = address_space_to_flatview(as);
3310efa99a2fSPeter Maydell     mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
331141063e1eSPaolo Bonzini 
33125c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
3313637b0aa1SMattias Nissler         size_t used = qatomic_read(&as->bounce_buffer_size);
3314637b0aa1SMattias Nissler         for (;;) {
3315637b0aa1SMattias Nissler             hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l);
3316637b0aa1SMattias Nissler             size_t new_size = used + alloc;
3317637b0aa1SMattias Nissler             size_t actual =
3318637b0aa1SMattias Nissler                 qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size);
3319637b0aa1SMattias Nissler             if (actual == used) {
3320637b0aa1SMattias Nissler                 l = alloc;
3321637b0aa1SMattias Nissler                 break;
3322637b0aa1SMattias Nissler             }
3323637b0aa1SMattias Nissler             used = actual;
3324637b0aa1SMattias Nissler         }
3325637b0aa1SMattias Nissler 
3326637b0aa1SMattias Nissler         if (l == 0) {
332777f55eacSPrasad J Pandit             *plen = 0;
3328e3127ae0SPaolo Bonzini             return NULL;
33296d16c2f8Saliguori         }
3330d3e71559SPaolo Bonzini 
3331637b0aa1SMattias Nissler         BounceBuffer *bounce = g_malloc0(l + sizeof(BounceBuffer));
3332637b0aa1SMattias Nissler         bounce->magic = BOUNCE_BUFFER_MAGIC;
3333d3e71559SPaolo Bonzini         memory_region_ref(mr);
3334637b0aa1SMattias Nissler         bounce->mr = mr;
3335637b0aa1SMattias Nissler         bounce->addr = addr;
3336637b0aa1SMattias Nissler         bounce->len = l;
3337637b0aa1SMattias Nissler 
33386d16c2f8Saliguori         if (!is_write) {
3339d8d5ca40SFea.Wang             flatview_read(fv, addr, attrs,
3340637b0aa1SMattias Nissler                           bounce->buffer, l);
33416d16c2f8Saliguori         }
334238bee5dcSStefano Stabellini 
334338bee5dcSStefano Stabellini         *plen = l;
3344637b0aa1SMattias Nissler         return bounce->buffer;
33456d16c2f8Saliguori     }
3346e3127ae0SPaolo Bonzini 
3347d3e71559SPaolo Bonzini     memory_region_ref(mr);
334816620684SAlexey Kardashevskiy     *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
334953d0790dSPeter Maydell                                         l, is_write, attrs);
3350fc1c8344SAlexander Bulekov     fuzz_dma_read_cb(addr, *plen, mr);
33515a5585f4SEdgar E. Iglesias     return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true, is_write);
33526d16c2f8Saliguori }
33536d16c2f8Saliguori 
3354ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
3355ae5883abSPhilippe Mathieu-Daudé  * Will also mark the memory as dirty if is_write is true.  access_len gives
33566d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
33576d16c2f8Saliguori  */
3358a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3359ae5883abSPhilippe Mathieu-Daudé                          bool is_write, hwaddr access_len)
33606d16c2f8Saliguori {
3361d3e71559SPaolo Bonzini     MemoryRegion *mr;
33627443b437SPaolo Bonzini     ram_addr_t addr1;
3363d3e71559SPaolo Bonzini 
336407bdaa41SPaolo Bonzini     mr = memory_region_from_host(buffer, &addr1);
3365637b0aa1SMattias Nissler     if (mr != NULL) {
3366d3e71559SPaolo Bonzini         if (is_write) {
3367845b6214SPaolo Bonzini             invalidate_and_set_dirty(mr, addr1, access_len);
33686d16c2f8Saliguori         }
3369868bb33fSJan Kiszka         if (xen_enabled()) {
3370e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
3371050a0ddfSAnthony PERARD         }
3372d3e71559SPaolo Bonzini         memory_region_unref(mr);
33736d16c2f8Saliguori         return;
33746d16c2f8Saliguori     }
3375637b0aa1SMattias Nissler 
3376637b0aa1SMattias Nissler 
3377637b0aa1SMattias Nissler     BounceBuffer *bounce = container_of(buffer, BounceBuffer, buffer);
3378637b0aa1SMattias Nissler     assert(bounce->magic == BOUNCE_BUFFER_MAGIC);
3379637b0aa1SMattias Nissler 
33806d16c2f8Saliguori     if (is_write) {
3381637b0aa1SMattias Nissler         address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED,
3382637b0aa1SMattias Nissler                             bounce->buffer, access_len);
33836d16c2f8Saliguori     }
3384637b0aa1SMattias Nissler 
3385637b0aa1SMattias Nissler     qatomic_sub(&as->bounce_buffer_size, bounce->len);
3386637b0aa1SMattias Nissler     bounce->magic = ~BOUNCE_BUFFER_MAGIC;
3387637b0aa1SMattias Nissler     memory_region_unref(bounce->mr);
3388637b0aa1SMattias Nissler     g_free(bounce);
3389637b0aa1SMattias Nissler     /* Write bounce_buffer_size before reading map_client_list. */
3390637b0aa1SMattias Nissler     smp_mb();
33915c627197SMattias Nissler     address_space_notify_map_clients(as);
33926d16c2f8Saliguori }
3393d0ecd2aaSbellard 
3394a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
3395a8170e5eSAvi Kivity                               hwaddr *plen,
339628c80bfeSPhilippe Mathieu-Daudé                               bool is_write)
3397ac1970fbSAvi Kivity {
3398f26404fbSPeter Maydell     return address_space_map(&address_space_memory, addr, plen, is_write,
3399f26404fbSPeter Maydell                              MEMTXATTRS_UNSPECIFIED);
3400ac1970fbSAvi Kivity }
3401ac1970fbSAvi Kivity 
3402a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
340328c80bfeSPhilippe Mathieu-Daudé                                bool is_write, hwaddr access_len)
3404ac1970fbSAvi Kivity {
3405ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3406ac1970fbSAvi Kivity }
3407ac1970fbSAvi Kivity 
34080ce265ffSPaolo Bonzini #define ARG1_DECL                AddressSpace *as
34090ce265ffSPaolo Bonzini #define ARG1                     as
34100ce265ffSPaolo Bonzini #define SUFFIX
34110ce265ffSPaolo Bonzini #define TRANSLATE(...)           address_space_translate(as, __VA_ARGS__)
34120ce265ffSPaolo Bonzini #define RCU_READ_LOCK(...)       rcu_read_lock()
34130ce265ffSPaolo Bonzini #define RCU_READ_UNLOCK(...)     rcu_read_unlock()
3414139c1837SPaolo Bonzini #include "memory_ldst.c.inc"
34151e78bcc1SAlexander Graf 
34161f4e496eSPaolo Bonzini int64_t address_space_cache_init(MemoryRegionCache *cache,
34171f4e496eSPaolo Bonzini                                  AddressSpace *as,
34181f4e496eSPaolo Bonzini                                  hwaddr addr,
34191f4e496eSPaolo Bonzini                                  hwaddr len,
34201f4e496eSPaolo Bonzini                                  bool is_write)
34211f4e496eSPaolo Bonzini {
342248564041SPaolo Bonzini     AddressSpaceDispatch *d;
342348564041SPaolo Bonzini     hwaddr l;
342448564041SPaolo Bonzini     MemoryRegion *mr;
34254bfb024bSPaolo Bonzini     Int128 diff;
342648564041SPaolo Bonzini 
342748564041SPaolo Bonzini     assert(len > 0);
342848564041SPaolo Bonzini 
342948564041SPaolo Bonzini     l = len;
343048564041SPaolo Bonzini     cache->fv = address_space_get_flatview(as);
343148564041SPaolo Bonzini     d = flatview_to_dispatch(cache->fv);
343248564041SPaolo Bonzini     cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true);
343348564041SPaolo Bonzini 
34344bfb024bSPaolo Bonzini     /*
34354bfb024bSPaolo Bonzini      * cache->xlat is now relative to cache->mrs.mr, not to the section itself.
34364bfb024bSPaolo Bonzini      * Take that into account to compute how many bytes are there between
34374bfb024bSPaolo Bonzini      * cache->xlat and the end of the section.
34384bfb024bSPaolo Bonzini      */
34394bfb024bSPaolo Bonzini     diff = int128_sub(cache->mrs.size,
34404bfb024bSPaolo Bonzini                       int128_make64(cache->xlat - cache->mrs.offset_within_region));
34414bfb024bSPaolo Bonzini     l = int128_get64(int128_min(diff, int128_make64(l)));
34424bfb024bSPaolo Bonzini 
344348564041SPaolo Bonzini     mr = cache->mrs.mr;
344448564041SPaolo Bonzini     memory_region_ref(mr);
344548564041SPaolo Bonzini     if (memory_access_is_direct(mr, is_write)) {
344653d0790dSPeter Maydell         /* We don't care about the memory attributes here as we're only
344753d0790dSPeter Maydell          * doing this if we found actual RAM, which behaves the same
344853d0790dSPeter Maydell          * regardless of attributes; so UNSPECIFIED is fine.
344953d0790dSPeter Maydell          */
345048564041SPaolo Bonzini         l = flatview_extend_translation(cache->fv, addr, len, mr,
345153d0790dSPeter Maydell                                         cache->xlat, l, is_write,
345253d0790dSPeter Maydell                                         MEMTXATTRS_UNSPECIFIED);
34535a5585f4SEdgar E. Iglesias         cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true,
34545a5585f4SEdgar E. Iglesias                                          is_write);
345548564041SPaolo Bonzini     } else {
345648564041SPaolo Bonzini         cache->ptr = NULL;
345748564041SPaolo Bonzini     }
345848564041SPaolo Bonzini 
345948564041SPaolo Bonzini     cache->len = l;
346048564041SPaolo Bonzini     cache->is_write = is_write;
346148564041SPaolo Bonzini     return l;
34621f4e496eSPaolo Bonzini }
34631f4e496eSPaolo Bonzini 
34641f4e496eSPaolo Bonzini void address_space_cache_invalidate(MemoryRegionCache *cache,
34651f4e496eSPaolo Bonzini                                     hwaddr addr,
34661f4e496eSPaolo Bonzini                                     hwaddr access_len)
34671f4e496eSPaolo Bonzini {
346848564041SPaolo Bonzini     assert(cache->is_write);
346948564041SPaolo Bonzini     if (likely(cache->ptr)) {
347048564041SPaolo Bonzini         invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len);
347148564041SPaolo Bonzini     }
34721f4e496eSPaolo Bonzini }
34731f4e496eSPaolo Bonzini 
34741f4e496eSPaolo Bonzini void address_space_cache_destroy(MemoryRegionCache *cache)
34751f4e496eSPaolo Bonzini {
347648564041SPaolo Bonzini     if (!cache->mrs.mr) {
347748564041SPaolo Bonzini         return;
347848564041SPaolo Bonzini     }
347948564041SPaolo Bonzini 
348048564041SPaolo Bonzini     if (xen_enabled()) {
348148564041SPaolo Bonzini         xen_invalidate_map_cache_entry(cache->ptr);
348248564041SPaolo Bonzini     }
348348564041SPaolo Bonzini     memory_region_unref(cache->mrs.mr);
348448564041SPaolo Bonzini     flatview_unref(cache->fv);
348548564041SPaolo Bonzini     cache->mrs.mr = NULL;
348648564041SPaolo Bonzini     cache->fv = NULL;
348748564041SPaolo Bonzini }
348848564041SPaolo Bonzini 
348948564041SPaolo Bonzini /* Called from RCU critical section.  This function has the same
349048564041SPaolo Bonzini  * semantics as address_space_translate, but it only works on a
349148564041SPaolo Bonzini  * predefined range of a MemoryRegion that was mapped with
349248564041SPaolo Bonzini  * address_space_cache_init.
349348564041SPaolo Bonzini  */
349448564041SPaolo Bonzini static inline MemoryRegion *address_space_translate_cached(
349548564041SPaolo Bonzini     MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat,
3496bc6b1cecSPeter Maydell     hwaddr *plen, bool is_write, MemTxAttrs attrs)
349748564041SPaolo Bonzini {
349848564041SPaolo Bonzini     MemoryRegionSection section;
349948564041SPaolo Bonzini     MemoryRegion *mr;
350048564041SPaolo Bonzini     IOMMUMemoryRegion *iommu_mr;
350148564041SPaolo Bonzini     AddressSpace *target_as;
350248564041SPaolo Bonzini 
350348564041SPaolo Bonzini     assert(!cache->ptr);
350448564041SPaolo Bonzini     *xlat = addr + cache->xlat;
350548564041SPaolo Bonzini 
350648564041SPaolo Bonzini     mr = cache->mrs.mr;
350748564041SPaolo Bonzini     iommu_mr = memory_region_get_iommu(mr);
350848564041SPaolo Bonzini     if (!iommu_mr) {
350948564041SPaolo Bonzini         /* MMIO region.  */
351048564041SPaolo Bonzini         return mr;
351148564041SPaolo Bonzini     }
351248564041SPaolo Bonzini 
351348564041SPaolo Bonzini     section = address_space_translate_iommu(iommu_mr, xlat, plen,
351448564041SPaolo Bonzini                                             NULL, is_write, true,
35152f7b009cSPeter Maydell                                             &target_as, attrs);
351648564041SPaolo Bonzini     return section.mr;
351748564041SPaolo Bonzini }
351848564041SPaolo Bonzini 
351947293c92SJonathan Cameron /* Called within RCU critical section.  */
352047293c92SJonathan Cameron static MemTxResult address_space_write_continue_cached(MemTxAttrs attrs,
352147293c92SJonathan Cameron                                                        const void *ptr,
352247293c92SJonathan Cameron                                                        hwaddr len,
352347293c92SJonathan Cameron                                                        hwaddr mr_addr,
352447293c92SJonathan Cameron                                                        hwaddr l,
352547293c92SJonathan Cameron                                                        MemoryRegion *mr)
352647293c92SJonathan Cameron {
352747293c92SJonathan Cameron     MemTxResult result = MEMTX_OK;
352847293c92SJonathan Cameron     const uint8_t *buf = ptr;
352947293c92SJonathan Cameron 
353047293c92SJonathan Cameron     for (;;) {
353147293c92SJonathan Cameron         result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l,
353247293c92SJonathan Cameron                                                mr);
353347293c92SJonathan Cameron 
353447293c92SJonathan Cameron         len -= l;
353547293c92SJonathan Cameron         buf += l;
353647293c92SJonathan Cameron         mr_addr += l;
353747293c92SJonathan Cameron 
353847293c92SJonathan Cameron         if (!len) {
353947293c92SJonathan Cameron             break;
354047293c92SJonathan Cameron         }
354147293c92SJonathan Cameron 
354247293c92SJonathan Cameron         l = len;
354347293c92SJonathan Cameron     }
354447293c92SJonathan Cameron 
354547293c92SJonathan Cameron     return result;
354647293c92SJonathan Cameron }
354747293c92SJonathan Cameron 
354847293c92SJonathan Cameron /* Called within RCU critical section.  */
354947293c92SJonathan Cameron static MemTxResult address_space_read_continue_cached(MemTxAttrs attrs,
355047293c92SJonathan Cameron                                                       void *ptr, hwaddr len,
355147293c92SJonathan Cameron                                                       hwaddr mr_addr, hwaddr l,
355247293c92SJonathan Cameron                                                       MemoryRegion *mr)
355347293c92SJonathan Cameron {
355447293c92SJonathan Cameron     MemTxResult result = MEMTX_OK;
355547293c92SJonathan Cameron     uint8_t *buf = ptr;
355647293c92SJonathan Cameron 
355747293c92SJonathan Cameron     for (;;) {
355847293c92SJonathan Cameron         result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr);
355947293c92SJonathan Cameron         len -= l;
356047293c92SJonathan Cameron         buf += l;
356147293c92SJonathan Cameron         mr_addr += l;
356247293c92SJonathan Cameron 
356347293c92SJonathan Cameron         if (!len) {
356447293c92SJonathan Cameron             break;
356547293c92SJonathan Cameron         }
356647293c92SJonathan Cameron         l = len;
356747293c92SJonathan Cameron     }
356847293c92SJonathan Cameron 
356947293c92SJonathan Cameron     return result;
357047293c92SJonathan Cameron }
357147293c92SJonathan Cameron 
357248564041SPaolo Bonzini /* Called from RCU critical section. address_space_read_cached uses this
357348564041SPaolo Bonzini  * out of line function when the target is an MMIO or IOMMU region.
357448564041SPaolo Bonzini  */
357538df19faSPhilippe Mathieu-Daudé MemTxResult
357648564041SPaolo Bonzini address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
35770c249ff7SLi Zhijian                                    void *buf, hwaddr len)
357848564041SPaolo Bonzini {
35794c7c8563SJonathan Cameron     hwaddr mr_addr, l;
358048564041SPaolo Bonzini     MemoryRegion *mr;
358148564041SPaolo Bonzini 
358248564041SPaolo Bonzini     l = len;
35834c7c8563SJonathan Cameron     mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false,
3584bc6b1cecSPeter Maydell                                         MEMTXATTRS_UNSPECIFIED);
358547293c92SJonathan Cameron     return address_space_read_continue_cached(MEMTXATTRS_UNSPECIFIED,
358647293c92SJonathan Cameron                                               buf, len, mr_addr, l, mr);
358748564041SPaolo Bonzini }
358848564041SPaolo Bonzini 
358948564041SPaolo Bonzini /* Called from RCU critical section. address_space_write_cached uses this
359048564041SPaolo Bonzini  * out of line function when the target is an MMIO or IOMMU region.
359148564041SPaolo Bonzini  */
359238df19faSPhilippe Mathieu-Daudé MemTxResult
359348564041SPaolo Bonzini address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
35940c249ff7SLi Zhijian                                     const void *buf, hwaddr len)
359548564041SPaolo Bonzini {
35964c7c8563SJonathan Cameron     hwaddr mr_addr, l;
359748564041SPaolo Bonzini     MemoryRegion *mr;
359848564041SPaolo Bonzini 
359948564041SPaolo Bonzini     l = len;
36004c7c8563SJonathan Cameron     mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true,
3601bc6b1cecSPeter Maydell                                         MEMTXATTRS_UNSPECIFIED);
360247293c92SJonathan Cameron     return address_space_write_continue_cached(MEMTXATTRS_UNSPECIFIED,
360347293c92SJonathan Cameron                                                buf, len, mr_addr, l, mr);
36041f4e496eSPaolo Bonzini }
36051f4e496eSPaolo Bonzini 
36061f4e496eSPaolo Bonzini #define ARG1_DECL                MemoryRegionCache *cache
36071f4e496eSPaolo Bonzini #define ARG1                     cache
360848564041SPaolo Bonzini #define SUFFIX                   _cached_slow
360948564041SPaolo Bonzini #define TRANSLATE(...)           address_space_translate_cached(cache, __VA_ARGS__)
361048564041SPaolo Bonzini #define RCU_READ_LOCK()          ((void)0)
361148564041SPaolo Bonzini #define RCU_READ_UNLOCK()        ((void)0)
3612139c1837SPaolo Bonzini #include "memory_ldst.c.inc"
36131f4e496eSPaolo Bonzini 
36145e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
361573842ef0SPhilippe Mathieu-Daudé int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
361673842ef0SPhilippe Mathieu-Daudé                         void *ptr, size_t len, bool is_write)
361713eb76e0Sbellard {
3618a8170e5eSAvi Kivity     hwaddr phys_addr;
361973842ef0SPhilippe Mathieu-Daudé     vaddr l, page;
3620d7ef71efSPhilippe Mathieu-Daudé     uint8_t *buf = ptr;
362113eb76e0Sbellard 
362279ca7a1bSChristian Borntraeger     cpu_synchronize_state(cpu);
362313eb76e0Sbellard     while (len > 0) {
36245232e4c7SPeter Maydell         int asidx;
36255232e4c7SPeter Maydell         MemTxAttrs attrs;
3626ddfc8b96SPhilippe Mathieu-Daudé         MemTxResult res;
36275232e4c7SPeter Maydell 
362813eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
36295232e4c7SPeter Maydell         phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
36305232e4c7SPeter Maydell         asidx = cpu_asidx_from_attrs(cpu, attrs);
363113eb76e0Sbellard         /* if no physical page mapped, return an error */
363213eb76e0Sbellard         if (phys_addr == -1)
363313eb76e0Sbellard             return -1;
363413eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
363513eb76e0Sbellard         if (l > len)
363613eb76e0Sbellard             l = len;
36375e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
36382e38847bSEdgar E. Iglesias         if (is_write) {
3639ddfc8b96SPhilippe Mathieu-Daudé             res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
3640ea7a5330SPeter Maydell                                           attrs, buf, l);
36412e38847bSEdgar E. Iglesias         } else {
3642ddfc8b96SPhilippe Mathieu-Daudé             res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr,
3643ddfc8b96SPhilippe Mathieu-Daudé                                      attrs, buf, l);
3644ddfc8b96SPhilippe Mathieu-Daudé         }
3645ddfc8b96SPhilippe Mathieu-Daudé         if (res != MEMTX_OK) {
3646ddfc8b96SPhilippe Mathieu-Daudé             return -1;
36472e38847bSEdgar E. Iglesias         }
364813eb76e0Sbellard         len -= l;
364913eb76e0Sbellard         buf += l;
365013eb76e0Sbellard         addr += l;
365113eb76e0Sbellard     }
365213eb76e0Sbellard     return 0;
365313eb76e0Sbellard }
3654038629a6SDr. David Alan Gilbert 
3655a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
365676f35538SWen Congyang {
36575c8a00ceSPaolo Bonzini     MemoryRegion*mr;
3658149f54b5SPaolo Bonzini     hwaddr l = 1;
365976f35538SWen Congyang 
3660694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
36615c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
3662bc6b1cecSPeter Maydell                                  phys_addr, &phys_addr, &l, false,
3663bc6b1cecSPeter Maydell                                  MEMTXATTRS_UNSPECIFIED);
366476f35538SWen Congyang 
366566997c42SMarkus Armbruster     return !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
366676f35538SWen Congyang }
3667bd2fa51fSMichael R. Hines 
3668e3807054SDr. David Alan Gilbert int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3669bd2fa51fSMichael R. Hines {
3670bd2fa51fSMichael R. Hines     RAMBlock *block;
3671e3807054SDr. David Alan Gilbert     int ret = 0;
3672bd2fa51fSMichael R. Hines 
3673694ea274SDr. David Alan Gilbert     RCU_READ_LOCK_GUARD();
367499e15582SPeter Xu     RAMBLOCK_FOREACH(block) {
3675754cb9c0SYury Kotov         ret = func(block, opaque);
3676e3807054SDr. David Alan Gilbert         if (ret) {
3677e3807054SDr. David Alan Gilbert             break;
3678e3807054SDr. David Alan Gilbert         }
3679bd2fa51fSMichael R. Hines     }
3680e3807054SDr. David Alan Gilbert     return ret;
3681bd2fa51fSMichael R. Hines }
3682d3a5038cSDr. David Alan Gilbert 
3683d3a5038cSDr. David Alan Gilbert /*
3684d3a5038cSDr. David Alan Gilbert  * Unmap pages of memory from start to start+length such that
3685d3a5038cSDr. David Alan Gilbert  * they a) read as 0, b) Trigger whatever fault mechanism
3686d3a5038cSDr. David Alan Gilbert  * the OS provides for postcopy.
3687d3a5038cSDr. David Alan Gilbert  * The pages must be unmapped by the end of the function.
3688d3a5038cSDr. David Alan Gilbert  * Returns: 0 on success, none-0 on failure
3689d3a5038cSDr. David Alan Gilbert  *
3690d3a5038cSDr. David Alan Gilbert  */
3691d3a5038cSDr. David Alan Gilbert int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
3692d3a5038cSDr. David Alan Gilbert {
3693d3a5038cSDr. David Alan Gilbert     int ret = -1;
3694d3a5038cSDr. David Alan Gilbert 
3695d3a5038cSDr. David Alan Gilbert     uint8_t *host_startaddr = rb->host + start;
3696d3a5038cSDr. David Alan Gilbert 
3697619bd31dSMarc-André Lureau     if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) {
3698ea18be78SXiaoyao Li         error_report("%s: Unaligned start address: %p",
3699ea18be78SXiaoyao Li                      __func__, host_startaddr);
3700d3a5038cSDr. David Alan Gilbert         goto err;
3701d3a5038cSDr. David Alan Gilbert     }
3702d3a5038cSDr. David Alan Gilbert 
3703dcdc4607SDavid Hildenbrand     if ((start + length) <= rb->max_length) {
3704db144f70SDr. David Alan Gilbert         bool need_madvise, need_fallocate;
3705619bd31dSMarc-André Lureau         if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
3706ea18be78SXiaoyao Li             error_report("%s: Unaligned length: %zx", __func__, length);
3707d3a5038cSDr. David Alan Gilbert             goto err;
3708d3a5038cSDr. David Alan Gilbert         }
3709d3a5038cSDr. David Alan Gilbert 
3710d3a5038cSDr. David Alan Gilbert         errno = ENOTSUP; /* If we are missing MADVISE etc */
3711d3a5038cSDr. David Alan Gilbert 
3712db144f70SDr. David Alan Gilbert         /* The logic here is messy;
3713db144f70SDr. David Alan Gilbert          *    madvise DONTNEED fails for hugepages
3714db144f70SDr. David Alan Gilbert          *    fallocate works on hugepages and shmem
3715cdfa56c5SDavid Hildenbrand          *    shared anonymous memory requires madvise REMOVE
3716d3a5038cSDr. David Alan Gilbert          */
371780c3aeefSRichard Henderson         need_madvise = (rb->page_size == qemu_real_host_page_size());
3718db144f70SDr. David Alan Gilbert         need_fallocate = rb->fd != -1;
3719db144f70SDr. David Alan Gilbert         if (need_fallocate) {
3720db144f70SDr. David Alan Gilbert             /* For a file, this causes the area of the file to be zero'd
3721db144f70SDr. David Alan Gilbert              * if read, and for hugetlbfs also causes it to be unmapped
3722db144f70SDr. David Alan Gilbert              * so a userfault will trigger.
3723e2fa71f5SDr. David Alan Gilbert              */
3724e2fa71f5SDr. David Alan Gilbert #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
37251d44ff58SDavid Hildenbrand             /*
3726b2cccb52SDavid Hildenbrand              * fallocate() will fail with readonly files. Let's print a
3727b2cccb52SDavid Hildenbrand              * proper error message.
3728b2cccb52SDavid Hildenbrand              */
3729b2cccb52SDavid Hildenbrand             if (rb->flags & RAM_READONLY_FD) {
3730ea18be78SXiaoyao Li                 error_report("%s: Discarding RAM with readonly files is not"
3731ea18be78SXiaoyao Li                              " supported", __func__);
3732b2cccb52SDavid Hildenbrand                 goto err;
3733b2cccb52SDavid Hildenbrand 
3734b2cccb52SDavid Hildenbrand             }
3735b2cccb52SDavid Hildenbrand             /*
37361d44ff58SDavid Hildenbrand              * We'll discard data from the actual file, even though we only
37371d44ff58SDavid Hildenbrand              * have a MAP_PRIVATE mapping, possibly messing with other
37381d44ff58SDavid Hildenbrand              * MAP_PRIVATE/MAP_SHARED mappings. There is no easy way to
37391d44ff58SDavid Hildenbrand              * change that behavior whithout violating the promised
37401d44ff58SDavid Hildenbrand              * semantics of ram_block_discard_range().
37411d44ff58SDavid Hildenbrand              *
37421d44ff58SDavid Hildenbrand              * Only warn, because it works as long as nobody else uses that
37431d44ff58SDavid Hildenbrand              * file.
37441d44ff58SDavid Hildenbrand              */
37451d44ff58SDavid Hildenbrand             if (!qemu_ram_is_shared(rb)) {
3746ea18be78SXiaoyao Li                 warn_report_once("%s: Discarding RAM"
37471d44ff58SDavid Hildenbrand                                  " in private file mappings is possibly"
37481d44ff58SDavid Hildenbrand                                  " dangerous, because it will modify the"
37491d44ff58SDavid Hildenbrand                                  " underlying file and will affect other"
3750ea18be78SXiaoyao Li                                  " users of the file", __func__);
37511d44ff58SDavid Hildenbrand             }
37521d44ff58SDavid Hildenbrand 
3753e2fa71f5SDr. David Alan Gilbert             ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3754e2fa71f5SDr. David Alan Gilbert                             start, length);
3755db144f70SDr. David Alan Gilbert             if (ret) {
3756db144f70SDr. David Alan Gilbert                 ret = -errno;
3757ea18be78SXiaoyao Li                 error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)",
3758ea18be78SXiaoyao Li                              __func__, rb->idstr, start, length, ret);
3759db144f70SDr. David Alan Gilbert                 goto err;
3760db144f70SDr. David Alan Gilbert             }
3761db144f70SDr. David Alan Gilbert #else
3762db144f70SDr. David Alan Gilbert             ret = -ENOSYS;
3763ea18be78SXiaoyao Li             error_report("%s: fallocate not available/file"
3764db144f70SDr. David Alan Gilbert                          "%s:%" PRIx64 " +%zx (%d)",
3765ea18be78SXiaoyao Li                          __func__, rb->idstr, start, length, ret);
3766db144f70SDr. David Alan Gilbert             goto err;
3767e2fa71f5SDr. David Alan Gilbert #endif
3768e2fa71f5SDr. David Alan Gilbert         }
3769db144f70SDr. David Alan Gilbert         if (need_madvise) {
3770db144f70SDr. David Alan Gilbert             /* For normal RAM this causes it to be unmapped,
3771db144f70SDr. David Alan Gilbert              * for shared memory it causes the local mapping to disappear
3772db144f70SDr. David Alan Gilbert              * and to fall back on the file contents (which we just
3773db144f70SDr. David Alan Gilbert              * fallocate'd away).
3774db144f70SDr. David Alan Gilbert              */
3775db144f70SDr. David Alan Gilbert #if defined(CONFIG_MADVISE)
3776cdfa56c5SDavid Hildenbrand             if (qemu_ram_is_shared(rb) && rb->fd < 0) {
3777cdfa56c5SDavid Hildenbrand                 ret = madvise(host_startaddr, length, QEMU_MADV_REMOVE);
3778cdfa56c5SDavid Hildenbrand             } else {
3779cdfa56c5SDavid Hildenbrand                 ret = madvise(host_startaddr, length, QEMU_MADV_DONTNEED);
3780cdfa56c5SDavid Hildenbrand             }
3781d3a5038cSDr. David Alan Gilbert             if (ret) {
3782d3a5038cSDr. David Alan Gilbert                 ret = -errno;
3783ea18be78SXiaoyao Li                 error_report("%s: Failed to discard range "
3784d3a5038cSDr. David Alan Gilbert                              "%s:%" PRIx64 " +%zx (%d)",
3785ea18be78SXiaoyao Li                              __func__, rb->idstr, start, length, ret);
3786db144f70SDr. David Alan Gilbert                 goto err;
3787d3a5038cSDr. David Alan Gilbert             }
3788db144f70SDr. David Alan Gilbert #else
3789db144f70SDr. David Alan Gilbert             ret = -ENOSYS;
3790ea18be78SXiaoyao Li             error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)",
3791ea18be78SXiaoyao Li                          __func__, rb->idstr, start, length, ret);
3792db144f70SDr. David Alan Gilbert             goto err;
3793db144f70SDr. David Alan Gilbert #endif
3794db144f70SDr. David Alan Gilbert         }
3795db144f70SDr. David Alan Gilbert         trace_ram_block_discard_range(rb->idstr, host_startaddr, length,
3796db144f70SDr. David Alan Gilbert                                       need_madvise, need_fallocate, ret);
3797d3a5038cSDr. David Alan Gilbert     } else {
3798ea18be78SXiaoyao Li         error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")",
3799ea18be78SXiaoyao Li                      __func__, rb->idstr, start, length, rb->max_length);
3800d3a5038cSDr. David Alan Gilbert     }
3801d3a5038cSDr. David Alan Gilbert 
3802d3a5038cSDr. David Alan Gilbert err:
3803d3a5038cSDr. David Alan Gilbert     return ret;
3804d3a5038cSDr. David Alan Gilbert }
3805d3a5038cSDr. David Alan Gilbert 
3806b2e9426cSXiaoyao Li int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
3807b2e9426cSXiaoyao Li                                         size_t length)
3808b2e9426cSXiaoyao Li {
3809b2e9426cSXiaoyao Li     int ret = -1;
3810b2e9426cSXiaoyao Li 
3811b2e9426cSXiaoyao Li #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3812b2e9426cSXiaoyao Li     ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3813b2e9426cSXiaoyao Li                     start, length);
3814b2e9426cSXiaoyao Li 
3815b2e9426cSXiaoyao Li     if (ret) {
3816b2e9426cSXiaoyao Li         ret = -errno;
3817b2e9426cSXiaoyao Li         error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)",
3818b2e9426cSXiaoyao Li                      __func__, rb->idstr, start, length, ret);
3819b2e9426cSXiaoyao Li     }
3820b2e9426cSXiaoyao Li #else
3821b2e9426cSXiaoyao Li     ret = -ENOSYS;
3822b2e9426cSXiaoyao Li     error_report("%s: fallocate not available %s:%" PRIx64 " +%zx (%d)",
3823b2e9426cSXiaoyao Li                  __func__, rb->idstr, start, length, ret);
3824b2e9426cSXiaoyao Li #endif
3825b2e9426cSXiaoyao Li 
3826b2e9426cSXiaoyao Li     return ret;
3827b2e9426cSXiaoyao Li }
3828b2e9426cSXiaoyao Li 
3829a4de8552SJunyan He bool ramblock_is_pmem(RAMBlock *rb)
3830a4de8552SJunyan He {
3831a4de8552SJunyan He     return rb->flags & RAM_PMEM;
3832a4de8552SJunyan He }
3833a4de8552SJunyan He 
3834b6b71cb5SMarkus Armbruster static void mtree_print_phys_entries(int start, int end, int skip, int ptr)
38355e8fd947SAlexey Kardashevskiy {
38365e8fd947SAlexey Kardashevskiy     if (start == end - 1) {
3837b6b71cb5SMarkus Armbruster         qemu_printf("\t%3d      ", start);
38385e8fd947SAlexey Kardashevskiy     } else {
3839b6b71cb5SMarkus Armbruster         qemu_printf("\t%3d..%-3d ", start, end - 1);
38405e8fd947SAlexey Kardashevskiy     }
3841b6b71cb5SMarkus Armbruster     qemu_printf(" skip=%d ", skip);
38425e8fd947SAlexey Kardashevskiy     if (ptr == PHYS_MAP_NODE_NIL) {
3843b6b71cb5SMarkus Armbruster         qemu_printf(" ptr=NIL");
38445e8fd947SAlexey Kardashevskiy     } else if (!skip) {
3845b6b71cb5SMarkus Armbruster         qemu_printf(" ptr=#%d", ptr);
38465e8fd947SAlexey Kardashevskiy     } else {
3847b6b71cb5SMarkus Armbruster         qemu_printf(" ptr=[%d]", ptr);
38485e8fd947SAlexey Kardashevskiy     }
3849b6b71cb5SMarkus Armbruster     qemu_printf("\n");
38505e8fd947SAlexey Kardashevskiy }
38515e8fd947SAlexey Kardashevskiy 
38525e8fd947SAlexey Kardashevskiy #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
38535e8fd947SAlexey Kardashevskiy                            int128_sub((size), int128_one())) : 0)
38545e8fd947SAlexey Kardashevskiy 
3855b6b71cb5SMarkus Armbruster void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root)
38565e8fd947SAlexey Kardashevskiy {
38575e8fd947SAlexey Kardashevskiy     int i;
38585e8fd947SAlexey Kardashevskiy 
3859b6b71cb5SMarkus Armbruster     qemu_printf("  Dispatch\n");
3860b6b71cb5SMarkus Armbruster     qemu_printf("    Physical sections\n");
38615e8fd947SAlexey Kardashevskiy 
38625e8fd947SAlexey Kardashevskiy     for (i = 0; i < d->map.sections_nb; ++i) {
38635e8fd947SAlexey Kardashevskiy         MemoryRegionSection *s = d->map.sections + i;
38645e8fd947SAlexey Kardashevskiy         const char *names[] = { " [unassigned]", " [not dirty]",
38655e8fd947SAlexey Kardashevskiy                                 " [ROM]", " [watch]" };
38665e8fd947SAlexey Kardashevskiy 
3867883f2c59SPhilippe Mathieu-Daudé         qemu_printf("      #%d @" HWADDR_FMT_plx ".." HWADDR_FMT_plx
3868b6b71cb5SMarkus Armbruster                     " %s%s%s%s%s",
38695e8fd947SAlexey Kardashevskiy             i,
38705e8fd947SAlexey Kardashevskiy             s->offset_within_address_space,
3871f9c307c3SZhenzhong Duan             s->offset_within_address_space + MR_SIZE(s->size),
38725e8fd947SAlexey Kardashevskiy             s->mr->name ? s->mr->name : "(noname)",
38735e8fd947SAlexey Kardashevskiy             i < ARRAY_SIZE(names) ? names[i] : "",
38745e8fd947SAlexey Kardashevskiy             s->mr == root ? " [ROOT]" : "",
38755e8fd947SAlexey Kardashevskiy             s == d->mru_section ? " [MRU]" : "",
38765e8fd947SAlexey Kardashevskiy             s->mr->is_iommu ? " [iommu]" : "");
38775e8fd947SAlexey Kardashevskiy 
38785e8fd947SAlexey Kardashevskiy         if (s->mr->alias) {
3879b6b71cb5SMarkus Armbruster             qemu_printf(" alias=%s", s->mr->alias->name ?
38805e8fd947SAlexey Kardashevskiy                     s->mr->alias->name : "noname");
38815e8fd947SAlexey Kardashevskiy         }
3882b6b71cb5SMarkus Armbruster         qemu_printf("\n");
38835e8fd947SAlexey Kardashevskiy     }
38845e8fd947SAlexey Kardashevskiy 
3885b6b71cb5SMarkus Armbruster     qemu_printf("    Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
38865e8fd947SAlexey Kardashevskiy                P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip);
38875e8fd947SAlexey Kardashevskiy     for (i = 0; i < d->map.nodes_nb; ++i) {
38885e8fd947SAlexey Kardashevskiy         int j, jprev;
38895e8fd947SAlexey Kardashevskiy         PhysPageEntry prev;
38905e8fd947SAlexey Kardashevskiy         Node *n = d->map.nodes + i;
38915e8fd947SAlexey Kardashevskiy 
3892b6b71cb5SMarkus Armbruster         qemu_printf("      [%d]\n", i);
38935e8fd947SAlexey Kardashevskiy 
38945e8fd947SAlexey Kardashevskiy         for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) {
38955e8fd947SAlexey Kardashevskiy             PhysPageEntry *pe = *n + j;
38965e8fd947SAlexey Kardashevskiy 
38975e8fd947SAlexey Kardashevskiy             if (pe->ptr == prev.ptr && pe->skip == prev.skip) {
38985e8fd947SAlexey Kardashevskiy                 continue;
38995e8fd947SAlexey Kardashevskiy             }
39005e8fd947SAlexey Kardashevskiy 
3901b6b71cb5SMarkus Armbruster             mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr);
39025e8fd947SAlexey Kardashevskiy 
39035e8fd947SAlexey Kardashevskiy             jprev = j;
39045e8fd947SAlexey Kardashevskiy             prev = *pe;
39055e8fd947SAlexey Kardashevskiy         }
39065e8fd947SAlexey Kardashevskiy 
39075e8fd947SAlexey Kardashevskiy         if (jprev != ARRAY_SIZE(*n)) {
3908b6b71cb5SMarkus Armbruster             mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr);
39095e8fd947SAlexey Kardashevskiy         }
39105e8fd947SAlexey Kardashevskiy     }
39115e8fd947SAlexey Kardashevskiy }
39125e8fd947SAlexey Kardashevskiy 
39137e6d32ebSDavid Hildenbrand /* Require any discards to work. */
391498da491dSDavid Hildenbrand static unsigned int ram_block_discard_required_cnt;
39157e6d32ebSDavid Hildenbrand /* Require only coordinated discards to work. */
39167e6d32ebSDavid Hildenbrand static unsigned int ram_block_coordinated_discard_required_cnt;
39177e6d32ebSDavid Hildenbrand /* Disable any discards. */
391898da491dSDavid Hildenbrand static unsigned int ram_block_discard_disabled_cnt;
39197e6d32ebSDavid Hildenbrand /* Disable only uncoordinated discards. */
39207e6d32ebSDavid Hildenbrand static unsigned int ram_block_uncoordinated_discard_disabled_cnt;
392198da491dSDavid Hildenbrand static QemuMutex ram_block_discard_disable_mutex;
392298da491dSDavid Hildenbrand 
392398da491dSDavid Hildenbrand static void ram_block_discard_disable_mutex_lock(void)
392498da491dSDavid Hildenbrand {
392598da491dSDavid Hildenbrand     static gsize initialized;
392698da491dSDavid Hildenbrand 
392798da491dSDavid Hildenbrand     if (g_once_init_enter(&initialized)) {
392898da491dSDavid Hildenbrand         qemu_mutex_init(&ram_block_discard_disable_mutex);
392998da491dSDavid Hildenbrand         g_once_init_leave(&initialized, 1);
393098da491dSDavid Hildenbrand     }
393198da491dSDavid Hildenbrand     qemu_mutex_lock(&ram_block_discard_disable_mutex);
393298da491dSDavid Hildenbrand }
393398da491dSDavid Hildenbrand 
393498da491dSDavid Hildenbrand static void ram_block_discard_disable_mutex_unlock(void)
393598da491dSDavid Hildenbrand {
393698da491dSDavid Hildenbrand     qemu_mutex_unlock(&ram_block_discard_disable_mutex);
393798da491dSDavid Hildenbrand }
3938d24f31dbSDavid Hildenbrand 
3939d24f31dbSDavid Hildenbrand int ram_block_discard_disable(bool state)
3940d24f31dbSDavid Hildenbrand {
394198da491dSDavid Hildenbrand     int ret = 0;
3942d24f31dbSDavid Hildenbrand 
394398da491dSDavid Hildenbrand     ram_block_discard_disable_mutex_lock();
3944d24f31dbSDavid Hildenbrand     if (!state) {
394598da491dSDavid Hildenbrand         ram_block_discard_disabled_cnt--;
39467e6d32ebSDavid Hildenbrand     } else if (ram_block_discard_required_cnt ||
39477e6d32ebSDavid Hildenbrand                ram_block_coordinated_discard_required_cnt) {
394898da491dSDavid Hildenbrand         ret = -EBUSY;
39497e6d32ebSDavid Hildenbrand     } else {
39507e6d32ebSDavid Hildenbrand         ram_block_discard_disabled_cnt++;
39517e6d32ebSDavid Hildenbrand     }
39527e6d32ebSDavid Hildenbrand     ram_block_discard_disable_mutex_unlock();
39537e6d32ebSDavid Hildenbrand     return ret;
39547e6d32ebSDavid Hildenbrand }
39557e6d32ebSDavid Hildenbrand 
39567e6d32ebSDavid Hildenbrand int ram_block_uncoordinated_discard_disable(bool state)
39577e6d32ebSDavid Hildenbrand {
39587e6d32ebSDavid Hildenbrand     int ret = 0;
39597e6d32ebSDavid Hildenbrand 
39607e6d32ebSDavid Hildenbrand     ram_block_discard_disable_mutex_lock();
39617e6d32ebSDavid Hildenbrand     if (!state) {
39627e6d32ebSDavid Hildenbrand         ram_block_uncoordinated_discard_disabled_cnt--;
39637e6d32ebSDavid Hildenbrand     } else if (ram_block_discard_required_cnt) {
39647e6d32ebSDavid Hildenbrand         ret = -EBUSY;
39657e6d32ebSDavid Hildenbrand     } else {
39667e6d32ebSDavid Hildenbrand         ram_block_uncoordinated_discard_disabled_cnt++;
3967d24f31dbSDavid Hildenbrand     }
396898da491dSDavid Hildenbrand     ram_block_discard_disable_mutex_unlock();
396998da491dSDavid Hildenbrand     return ret;
3970d24f31dbSDavid Hildenbrand }
3971d24f31dbSDavid Hildenbrand 
3972d24f31dbSDavid Hildenbrand int ram_block_discard_require(bool state)
3973d24f31dbSDavid Hildenbrand {
397498da491dSDavid Hildenbrand     int ret = 0;
3975d24f31dbSDavid Hildenbrand 
397698da491dSDavid Hildenbrand     ram_block_discard_disable_mutex_lock();
3977d24f31dbSDavid Hildenbrand     if (!state) {
397898da491dSDavid Hildenbrand         ram_block_discard_required_cnt--;
39797e6d32ebSDavid Hildenbrand     } else if (ram_block_discard_disabled_cnt ||
39807e6d32ebSDavid Hildenbrand                ram_block_uncoordinated_discard_disabled_cnt) {
398198da491dSDavid Hildenbrand         ret = -EBUSY;
39827e6d32ebSDavid Hildenbrand     } else {
39837e6d32ebSDavid Hildenbrand         ram_block_discard_required_cnt++;
39847e6d32ebSDavid Hildenbrand     }
39857e6d32ebSDavid Hildenbrand     ram_block_discard_disable_mutex_unlock();
39867e6d32ebSDavid Hildenbrand     return ret;
39877e6d32ebSDavid Hildenbrand }
39887e6d32ebSDavid Hildenbrand 
39897e6d32ebSDavid Hildenbrand int ram_block_coordinated_discard_require(bool state)
39907e6d32ebSDavid Hildenbrand {
39917e6d32ebSDavid Hildenbrand     int ret = 0;
39927e6d32ebSDavid Hildenbrand 
39937e6d32ebSDavid Hildenbrand     ram_block_discard_disable_mutex_lock();
39947e6d32ebSDavid Hildenbrand     if (!state) {
39957e6d32ebSDavid Hildenbrand         ram_block_coordinated_discard_required_cnt--;
39967e6d32ebSDavid Hildenbrand     } else if (ram_block_discard_disabled_cnt) {
39977e6d32ebSDavid Hildenbrand         ret = -EBUSY;
39987e6d32ebSDavid Hildenbrand     } else {
39997e6d32ebSDavid Hildenbrand         ram_block_coordinated_discard_required_cnt++;
4000d24f31dbSDavid Hildenbrand     }
400198da491dSDavid Hildenbrand     ram_block_discard_disable_mutex_unlock();
400298da491dSDavid Hildenbrand     return ret;
4003d24f31dbSDavid Hildenbrand }
4004d24f31dbSDavid Hildenbrand 
4005d24f31dbSDavid Hildenbrand bool ram_block_discard_is_disabled(void)
4006d24f31dbSDavid Hildenbrand {
40077e6d32ebSDavid Hildenbrand     return qatomic_read(&ram_block_discard_disabled_cnt) ||
40087e6d32ebSDavid Hildenbrand            qatomic_read(&ram_block_uncoordinated_discard_disabled_cnt);
4009d24f31dbSDavid Hildenbrand }
4010d24f31dbSDavid Hildenbrand 
4011d24f31dbSDavid Hildenbrand bool ram_block_discard_is_required(void)
4012d24f31dbSDavid Hildenbrand {
40137e6d32ebSDavid Hildenbrand     return qatomic_read(&ram_block_discard_required_cnt) ||
40147e6d32ebSDavid Hildenbrand            qatomic_read(&ram_block_coordinated_discard_required_cnt);
4015d24f31dbSDavid Hildenbrand }
4016