xref: /qemu/system/physmem.c (revision 733d05bdc75c10209f72a7f07a602e85d337fd29)
1 /*
2  * RAM allocation and memory access
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "exec/page-vary.h"
22 #include "qapi/error.h"
23 
24 #include "qemu/cutils.h"
25 #include "qemu/cacheflush.h"
26 #include "qemu/hbitmap.h"
27 #include "qemu/madvise.h"
28 #include "qemu/lockable.h"
29 
30 #ifdef CONFIG_TCG
31 #include "hw/core/tcg-cpu-ops.h"
32 #endif /* CONFIG_TCG */
33 
34 #include "exec/exec-all.h"
35 #include "exec/page-protection.h"
36 #include "exec/target_page.h"
37 #include "hw/qdev-core.h"
38 #include "hw/qdev-properties.h"
39 #include "hw/boards.h"
40 #include "system/xen.h"
41 #include "system/kvm.h"
42 #include "system/tcg.h"
43 #include "system/qtest.h"
44 #include "qemu/timer.h"
45 #include "qemu/config-file.h"
46 #include "qemu/error-report.h"
47 #include "qemu/qemu-print.h"
48 #include "qemu/log.h"
49 #include "qemu/memalign.h"
50 #include "exec/memory.h"
51 #include "exec/ioport.h"
52 #include "system/dma.h"
53 #include "system/hostmem.h"
54 #include "system/hw_accel.h"
55 #include "system/xen-mapcache.h"
56 #include "trace.h"
57 
58 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
59 #include <linux/falloc.h>
60 #endif
61 
62 #include "qemu/rcu_queue.h"
63 #include "qemu/main-loop.h"
64 #include "system/replay.h"
65 
66 #include "exec/memory-internal.h"
67 #include "exec/ram_addr.h"
68 
69 #include "qemu/pmem.h"
70 
71 #include "migration/vmstate.h"
72 
73 #include "qemu/range.h"
74 #ifndef _WIN32
75 #include "qemu/mmap-alloc.h"
76 #endif
77 
78 #include "monitor/monitor.h"
79 
80 #ifdef CONFIG_LIBDAXCTL
81 #include <daxctl/libdaxctl.h>
82 #endif
83 
84 //#define DEBUG_SUBPAGE
85 
86 /* ram_list is read under rcu_read_lock()/rcu_read_unlock().  Writes
87  * are protected by the ramlist lock.
88  */
89 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
90 
91 static MemoryRegion *system_memory;
92 static MemoryRegion *system_io;
93 
94 AddressSpace address_space_io;
95 AddressSpace address_space_memory;
96 
97 static MemoryRegion io_mem_unassigned;
98 
99 typedef struct PhysPageEntry PhysPageEntry;
100 
101 struct PhysPageEntry {
102     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
103     uint32_t skip : 6;
104      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
105     uint32_t ptr : 26;
106 };
107 
108 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
109 
110 /* Size of the L2 (and L3, etc) page tables.  */
111 #define ADDR_SPACE_BITS 64
112 
113 #define P_L2_BITS 9
114 #define P_L2_SIZE (1 << P_L2_BITS)
115 
116 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
117 
118 typedef PhysPageEntry Node[P_L2_SIZE];
119 
120 typedef struct PhysPageMap {
121     struct rcu_head rcu;
122 
123     unsigned sections_nb;
124     unsigned sections_nb_alloc;
125     unsigned nodes_nb;
126     unsigned nodes_nb_alloc;
127     Node *nodes;
128     MemoryRegionSection *sections;
129 } PhysPageMap;
130 
131 struct AddressSpaceDispatch {
132     MemoryRegionSection *mru_section;
133     /* This is a multi-level map on the physical address space.
134      * The bottom level has pointers to MemoryRegionSections.
135      */
136     PhysPageEntry phys_map;
137     PhysPageMap map;
138 };
139 
140 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
141 typedef struct subpage_t {
142     MemoryRegion iomem;
143     FlatView *fv;
144     hwaddr base;
145     uint16_t sub_section[];
146 } subpage_t;
147 
148 #define PHYS_SECTION_UNASSIGNED 0
149 
150 static void io_mem_init(void);
151 static void memory_map_init(void);
152 static void tcg_log_global_after_sync(MemoryListener *listener);
153 static void tcg_commit(MemoryListener *listener);
154 
155 /**
156  * CPUAddressSpace: all the information a CPU needs about an AddressSpace
157  * @cpu: the CPU whose AddressSpace this is
158  * @as: the AddressSpace itself
159  * @memory_dispatch: its dispatch pointer (cached, RCU protected)
160  * @tcg_as_listener: listener for tracking changes to the AddressSpace
161  */
162 typedef struct CPUAddressSpace {
163     CPUState *cpu;
164     AddressSpace *as;
165     struct AddressSpaceDispatch *memory_dispatch;
166     MemoryListener tcg_as_listener;
167 } CPUAddressSpace;
168 
169 struct DirtyBitmapSnapshot {
170     ram_addr_t start;
171     ram_addr_t end;
172     unsigned long dirty[];
173 };
174 
175 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
176 {
177     static unsigned alloc_hint = 16;
178     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
179         map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes);
180         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
181         alloc_hint = map->nodes_nb_alloc;
182     }
183 }
184 
185 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
186 {
187     unsigned i;
188     uint32_t ret;
189     PhysPageEntry e;
190     PhysPageEntry *p;
191 
192     ret = map->nodes_nb++;
193     p = map->nodes[ret];
194     assert(ret != PHYS_MAP_NODE_NIL);
195     assert(ret != map->nodes_nb_alloc);
196 
197     e.skip = leaf ? 0 : 1;
198     e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
199     for (i = 0; i < P_L2_SIZE; ++i) {
200         memcpy(&p[i], &e, sizeof(e));
201     }
202     return ret;
203 }
204 
205 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
206                                 hwaddr *index, uint64_t *nb, uint16_t leaf,
207                                 int level)
208 {
209     PhysPageEntry *p;
210     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
211 
212     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
213         lp->ptr = phys_map_node_alloc(map, level == 0);
214     }
215     p = map->nodes[lp->ptr];
216     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
217 
218     while (*nb && lp < &p[P_L2_SIZE]) {
219         if ((*index & (step - 1)) == 0 && *nb >= step) {
220             lp->skip = 0;
221             lp->ptr = leaf;
222             *index += step;
223             *nb -= step;
224         } else {
225             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
226         }
227         ++lp;
228     }
229 }
230 
231 static void phys_page_set(AddressSpaceDispatch *d,
232                           hwaddr index, uint64_t nb,
233                           uint16_t leaf)
234 {
235     /* Wildly overreserve - it doesn't matter much. */
236     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
237 
238     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
239 }
240 
241 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
242  * and update our entry so we can skip it and go directly to the destination.
243  */
244 static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
245 {
246     unsigned valid_ptr = P_L2_SIZE;
247     int valid = 0;
248     PhysPageEntry *p;
249     int i;
250 
251     if (lp->ptr == PHYS_MAP_NODE_NIL) {
252         return;
253     }
254 
255     p = nodes[lp->ptr];
256     for (i = 0; i < P_L2_SIZE; i++) {
257         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
258             continue;
259         }
260 
261         valid_ptr = i;
262         valid++;
263         if (p[i].skip) {
264             phys_page_compact(&p[i], nodes);
265         }
266     }
267 
268     /* We can only compress if there's only one child. */
269     if (valid != 1) {
270         return;
271     }
272 
273     assert(valid_ptr < P_L2_SIZE);
274 
275     /* Don't compress if it won't fit in the # of bits we have. */
276     if (P_L2_LEVELS >= (1 << 6) &&
277         lp->skip + p[valid_ptr].skip >= (1 << 6)) {
278         return;
279     }
280 
281     lp->ptr = p[valid_ptr].ptr;
282     if (!p[valid_ptr].skip) {
283         /* If our only child is a leaf, make this a leaf. */
284         /* By design, we should have made this node a leaf to begin with so we
285          * should never reach here.
286          * But since it's so simple to handle this, let's do it just in case we
287          * change this rule.
288          */
289         lp->skip = 0;
290     } else {
291         lp->skip += p[valid_ptr].skip;
292     }
293 }
294 
295 void address_space_dispatch_compact(AddressSpaceDispatch *d)
296 {
297     if (d->phys_map.skip) {
298         phys_page_compact(&d->phys_map, d->map.nodes);
299     }
300 }
301 
302 static inline bool section_covers_addr(const MemoryRegionSection *section,
303                                        hwaddr addr)
304 {
305     /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
306      * the section must cover the entire address space.
307      */
308     return int128_gethi(section->size) ||
309            range_covers_byte(section->offset_within_address_space,
310                              int128_getlo(section->size), addr);
311 }
312 
313 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
314 {
315     PhysPageEntry lp = d->phys_map, *p;
316     Node *nodes = d->map.nodes;
317     MemoryRegionSection *sections = d->map.sections;
318     hwaddr index = addr >> TARGET_PAGE_BITS;
319     int i;
320 
321     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
322         if (lp.ptr == PHYS_MAP_NODE_NIL) {
323             return &sections[PHYS_SECTION_UNASSIGNED];
324         }
325         p = nodes[lp.ptr];
326         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
327     }
328 
329     if (section_covers_addr(&sections[lp.ptr], addr)) {
330         return &sections[lp.ptr];
331     } else {
332         return &sections[PHYS_SECTION_UNASSIGNED];
333     }
334 }
335 
336 /* Called from RCU critical section */
337 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
338                                                         hwaddr addr,
339                                                         bool resolve_subpage)
340 {
341     MemoryRegionSection *section = qatomic_read(&d->mru_section);
342     subpage_t *subpage;
343 
344     if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
345         !section_covers_addr(section, addr)) {
346         section = phys_page_find(d, addr);
347         qatomic_set(&d->mru_section, section);
348     }
349     if (resolve_subpage && section->mr->subpage) {
350         subpage = container_of(section->mr, subpage_t, iomem);
351         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
352     }
353     return section;
354 }
355 
356 /* Called from RCU critical section */
357 static MemoryRegionSection *
358 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
359                                  hwaddr *plen, bool resolve_subpage)
360 {
361     MemoryRegionSection *section;
362     MemoryRegion *mr;
363     Int128 diff;
364 
365     section = address_space_lookup_region(d, addr, resolve_subpage);
366     /* Compute offset within MemoryRegionSection */
367     addr -= section->offset_within_address_space;
368 
369     /* Compute offset within MemoryRegion */
370     *xlat = addr + section->offset_within_region;
371 
372     mr = section->mr;
373 
374     /* MMIO registers can be expected to perform full-width accesses based only
375      * on their address, without considering adjacent registers that could
376      * decode to completely different MemoryRegions.  When such registers
377      * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
378      * regions overlap wildly.  For this reason we cannot clamp the accesses
379      * here.
380      *
381      * If the length is small (as is the case for address_space_ldl/stl),
382      * everything works fine.  If the incoming length is large, however,
383      * the caller really has to do the clamping through memory_access_size.
384      */
385     if (memory_region_is_ram(mr)) {
386         diff = int128_sub(section->size, int128_make64(addr));
387         *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
388     }
389     return section;
390 }
391 
392 /**
393  * address_space_translate_iommu - translate an address through an IOMMU
394  * memory region and then through the target address space.
395  *
396  * @iommu_mr: the IOMMU memory region that we start the translation from
397  * @addr: the address to be translated through the MMU
398  * @xlat: the translated address offset within the destination memory region.
399  *        It cannot be %NULL.
400  * @plen_out: valid read/write length of the translated address. It
401  *            cannot be %NULL.
402  * @page_mask_out: page mask for the translated address. This
403  *            should only be meaningful for IOMMU translated
404  *            addresses, since there may be huge pages that this bit
405  *            would tell. It can be %NULL if we don't care about it.
406  * @is_write: whether the translation operation is for write
407  * @is_mmio: whether this can be MMIO, set true if it can
408  * @target_as: the address space targeted by the IOMMU
409  * @attrs: transaction attributes
410  *
411  * This function is called from RCU critical section.  It is the common
412  * part of flatview_do_translate and address_space_translate_cached.
413  */
414 static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr,
415                                                          hwaddr *xlat,
416                                                          hwaddr *plen_out,
417                                                          hwaddr *page_mask_out,
418                                                          bool is_write,
419                                                          bool is_mmio,
420                                                          AddressSpace **target_as,
421                                                          MemTxAttrs attrs)
422 {
423     MemoryRegionSection *section;
424     hwaddr page_mask = (hwaddr)-1;
425 
426     do {
427         hwaddr addr = *xlat;
428         IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
429         int iommu_idx = 0;
430         IOMMUTLBEntry iotlb;
431 
432         if (imrc->attrs_to_index) {
433             iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
434         }
435 
436         iotlb = imrc->translate(iommu_mr, addr, is_write ?
437                                 IOMMU_WO : IOMMU_RO, iommu_idx);
438 
439         if (!(iotlb.perm & (1 << is_write))) {
440             goto unassigned;
441         }
442 
443         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
444                 | (addr & iotlb.addr_mask));
445         page_mask &= iotlb.addr_mask;
446         *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
447         *target_as = iotlb.target_as;
448 
449         section = address_space_translate_internal(
450                 address_space_to_dispatch(iotlb.target_as), addr, xlat,
451                 plen_out, is_mmio);
452 
453         iommu_mr = memory_region_get_iommu(section->mr);
454     } while (unlikely(iommu_mr));
455 
456     if (page_mask_out) {
457         *page_mask_out = page_mask;
458     }
459     return *section;
460 
461 unassigned:
462     return (MemoryRegionSection) { .mr = &io_mem_unassigned };
463 }
464 
465 /**
466  * flatview_do_translate - translate an address in FlatView
467  *
468  * @fv: the flat view that we want to translate on
469  * @addr: the address to be translated in above address space
470  * @xlat: the translated address offset within memory region. It
471  *        cannot be @NULL.
472  * @plen_out: valid read/write length of the translated address. It
473  *            can be @NULL when we don't care about it.
474  * @page_mask_out: page mask for the translated address. This
475  *            should only be meaningful for IOMMU translated
476  *            addresses, since there may be huge pages that this bit
477  *            would tell. It can be @NULL if we don't care about it.
478  * @is_write: whether the translation operation is for write
479  * @is_mmio: whether this can be MMIO, set true if it can
480  * @target_as: the address space targeted by the IOMMU
481  * @attrs: memory transaction attributes
482  *
483  * This function is called from RCU critical section
484  */
485 static MemoryRegionSection flatview_do_translate(FlatView *fv,
486                                                  hwaddr addr,
487                                                  hwaddr *xlat,
488                                                  hwaddr *plen_out,
489                                                  hwaddr *page_mask_out,
490                                                  bool is_write,
491                                                  bool is_mmio,
492                                                  AddressSpace **target_as,
493                                                  MemTxAttrs attrs)
494 {
495     MemoryRegionSection *section;
496     IOMMUMemoryRegion *iommu_mr;
497     hwaddr plen = (hwaddr)(-1);
498 
499     if (!plen_out) {
500         plen_out = &plen;
501     }
502 
503     section = address_space_translate_internal(
504             flatview_to_dispatch(fv), addr, xlat,
505             plen_out, is_mmio);
506 
507     iommu_mr = memory_region_get_iommu(section->mr);
508     if (unlikely(iommu_mr)) {
509         return address_space_translate_iommu(iommu_mr, xlat,
510                                              plen_out, page_mask_out,
511                                              is_write, is_mmio,
512                                              target_as, attrs);
513     }
514     if (page_mask_out) {
515         /* Not behind an IOMMU, use default page size. */
516         *page_mask_out = ~TARGET_PAGE_MASK;
517     }
518 
519     return *section;
520 }
521 
522 /* Called from RCU critical section */
523 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
524                                             bool is_write, MemTxAttrs attrs)
525 {
526     MemoryRegionSection section;
527     hwaddr xlat, page_mask;
528 
529     /*
530      * This can never be MMIO, and we don't really care about plen,
531      * but page mask.
532      */
533     section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
534                                     NULL, &page_mask, is_write, false, &as,
535                                     attrs);
536 
537     /* Illegal translation */
538     if (section.mr == &io_mem_unassigned) {
539         goto iotlb_fail;
540     }
541 
542     /* Convert memory region offset into address space offset */
543     xlat += section.offset_within_address_space -
544         section.offset_within_region;
545 
546     return (IOMMUTLBEntry) {
547         .target_as = as,
548         .iova = addr & ~page_mask,
549         .translated_addr = xlat & ~page_mask,
550         .addr_mask = page_mask,
551         /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
552         .perm = IOMMU_RW,
553     };
554 
555 iotlb_fail:
556     return (IOMMUTLBEntry) {0};
557 }
558 
559 /* Called from RCU critical section */
560 MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
561                                  hwaddr *plen, bool is_write,
562                                  MemTxAttrs attrs)
563 {
564     MemoryRegion *mr;
565     MemoryRegionSection section;
566     AddressSpace *as = NULL;
567 
568     /* This can be MMIO, so setup MMIO bit. */
569     section = flatview_do_translate(fv, addr, xlat, plen, NULL,
570                                     is_write, true, &as, attrs);
571     mr = section.mr;
572 
573     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
574         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
575         *plen = MIN(page, *plen);
576     }
577 
578     return mr;
579 }
580 
581 typedef struct TCGIOMMUNotifier {
582     IOMMUNotifier n;
583     MemoryRegion *mr;
584     CPUState *cpu;
585     int iommu_idx;
586     bool active;
587 } TCGIOMMUNotifier;
588 
589 static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
590 {
591     TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n);
592 
593     if (!notifier->active) {
594         return;
595     }
596     tlb_flush(notifier->cpu);
597     notifier->active = false;
598     /* We leave the notifier struct on the list to avoid reallocating it later.
599      * Generally the number of IOMMUs a CPU deals with will be small.
600      * In any case we can't unregister the iommu notifier from a notify
601      * callback.
602      */
603 }
604 
605 static void tcg_register_iommu_notifier(CPUState *cpu,
606                                         IOMMUMemoryRegion *iommu_mr,
607                                         int iommu_idx)
608 {
609     /* Make sure this CPU has an IOMMU notifier registered for this
610      * IOMMU/IOMMU index combination, so that we can flush its TLB
611      * when the IOMMU tells us the mappings we've cached have changed.
612      */
613     MemoryRegion *mr = MEMORY_REGION(iommu_mr);
614     TCGIOMMUNotifier *notifier = NULL;
615     int i;
616 
617     for (i = 0; i < cpu->iommu_notifiers->len; i++) {
618         notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
619         if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) {
620             break;
621         }
622     }
623     if (i == cpu->iommu_notifiers->len) {
624         /* Not found, add a new entry at the end of the array */
625         cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1);
626         notifier = g_new0(TCGIOMMUNotifier, 1);
627         g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier;
628 
629         notifier->mr = mr;
630         notifier->iommu_idx = iommu_idx;
631         notifier->cpu = cpu;
632         /* Rather than trying to register interest in the specific part
633          * of the iommu's address space that we've accessed and then
634          * expand it later as subsequent accesses touch more of it, we
635          * just register interest in the whole thing, on the assumption
636          * that iommu reconfiguration will be rare.
637          */
638         iommu_notifier_init(&notifier->n,
639                             tcg_iommu_unmap_notify,
640                             IOMMU_NOTIFIER_UNMAP,
641                             0,
642                             HWADDR_MAX,
643                             iommu_idx);
644         memory_region_register_iommu_notifier(notifier->mr, &notifier->n,
645                                               &error_fatal);
646     }
647 
648     if (!notifier->active) {
649         notifier->active = true;
650     }
651 }
652 
653 void tcg_iommu_free_notifier_list(CPUState *cpu)
654 {
655     /* Destroy the CPU's notifier list */
656     int i;
657     TCGIOMMUNotifier *notifier;
658 
659     for (i = 0; i < cpu->iommu_notifiers->len; i++) {
660         notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
661         memory_region_unregister_iommu_notifier(notifier->mr, &notifier->n);
662         g_free(notifier);
663     }
664     g_array_free(cpu->iommu_notifiers, true);
665 }
666 
667 void tcg_iommu_init_notifier_list(CPUState *cpu)
668 {
669     cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *));
670 }
671 
672 /* Called from RCU critical section */
673 MemoryRegionSection *
674 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
675                                   hwaddr *xlat, hwaddr *plen,
676                                   MemTxAttrs attrs, int *prot)
677 {
678     MemoryRegionSection *section;
679     IOMMUMemoryRegion *iommu_mr;
680     IOMMUMemoryRegionClass *imrc;
681     IOMMUTLBEntry iotlb;
682     int iommu_idx;
683     hwaddr addr = orig_addr;
684     AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
685 
686     for (;;) {
687         section = address_space_translate_internal(d, addr, &addr, plen, false);
688 
689         iommu_mr = memory_region_get_iommu(section->mr);
690         if (!iommu_mr) {
691             break;
692         }
693 
694         imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
695 
696         iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
697         tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
698         /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
699          * doesn't short-cut its translation table walk.
700          */
701         iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
702         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
703                 | (addr & iotlb.addr_mask));
704         /* Update the caller's prot bits to remove permissions the IOMMU
705          * is giving us a failure response for. If we get down to no
706          * permissions left at all we can give up now.
707          */
708         if (!(iotlb.perm & IOMMU_RO)) {
709             *prot &= ~(PAGE_READ | PAGE_EXEC);
710         }
711         if (!(iotlb.perm & IOMMU_WO)) {
712             *prot &= ~PAGE_WRITE;
713         }
714 
715         if (!*prot) {
716             goto translate_fail;
717         }
718 
719         d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
720     }
721 
722     assert(!memory_region_is_iommu(section->mr));
723     *xlat = addr;
724     return section;
725 
726 translate_fail:
727     /*
728      * We should be given a page-aligned address -- certainly
729      * tlb_set_page_with_attrs() does so.  The page offset of xlat
730      * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0.
731      * The page portion of xlat will be logged by memory_region_access_valid()
732      * when this memory access is rejected, so use the original untranslated
733      * physical address.
734      */
735     assert((orig_addr & ~TARGET_PAGE_MASK) == 0);
736     *xlat = orig_addr;
737     return &d->map.sections[PHYS_SECTION_UNASSIGNED];
738 }
739 
740 void cpu_address_space_init(CPUState *cpu, int asidx,
741                             const char *prefix, MemoryRegion *mr)
742 {
743     CPUAddressSpace *newas;
744     AddressSpace *as = g_new0(AddressSpace, 1);
745     char *as_name;
746 
747     assert(mr);
748     as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index);
749     address_space_init(as, mr, as_name);
750     g_free(as_name);
751 
752     /* Target code should have set num_ases before calling us */
753     assert(asidx < cpu->num_ases);
754 
755     if (asidx == 0) {
756         /* address space 0 gets the convenience alias */
757         cpu->as = as;
758     }
759 
760     /* KVM cannot currently support multiple address spaces. */
761     assert(asidx == 0 || !kvm_enabled());
762 
763     if (!cpu->cpu_ases) {
764         cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
765         cpu->cpu_ases_count = cpu->num_ases;
766     }
767 
768     newas = &cpu->cpu_ases[asidx];
769     newas->cpu = cpu;
770     newas->as = as;
771     if (tcg_enabled()) {
772         newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync;
773         newas->tcg_as_listener.commit = tcg_commit;
774         newas->tcg_as_listener.name = "tcg";
775         memory_listener_register(&newas->tcg_as_listener, as);
776     }
777 }
778 
779 void cpu_address_space_destroy(CPUState *cpu, int asidx)
780 {
781     CPUAddressSpace *cpuas;
782 
783     assert(cpu->cpu_ases);
784     assert(asidx >= 0 && asidx < cpu->num_ases);
785     /* KVM cannot currently support multiple address spaces. */
786     assert(asidx == 0 || !kvm_enabled());
787 
788     cpuas = &cpu->cpu_ases[asidx];
789     if (tcg_enabled()) {
790         memory_listener_unregister(&cpuas->tcg_as_listener);
791     }
792 
793     address_space_destroy(cpuas->as);
794     g_free_rcu(cpuas->as, rcu);
795 
796     if (asidx == 0) {
797         /* reset the convenience alias for address space 0 */
798         cpu->as = NULL;
799     }
800 
801     if (--cpu->cpu_ases_count == 0) {
802         g_free(cpu->cpu_ases);
803         cpu->cpu_ases = NULL;
804     }
805 }
806 
807 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
808 {
809     /* Return the AddressSpace corresponding to the specified index */
810     return cpu->cpu_ases[asidx].as;
811 }
812 
813 /* Called from RCU critical section */
814 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
815 {
816     RAMBlock *block;
817 
818     block = qatomic_rcu_read(&ram_list.mru_block);
819     if (block && addr - block->offset < block->max_length) {
820         return block;
821     }
822     RAMBLOCK_FOREACH(block) {
823         if (addr - block->offset < block->max_length) {
824             goto found;
825         }
826     }
827 
828     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
829     abort();
830 
831 found:
832     /* It is safe to write mru_block outside the BQL.  This
833      * is what happens:
834      *
835      *     mru_block = xxx
836      *     rcu_read_unlock()
837      *                                        xxx removed from list
838      *                  rcu_read_lock()
839      *                  read mru_block
840      *                                        mru_block = NULL;
841      *                                        call_rcu(reclaim_ramblock, xxx);
842      *                  rcu_read_unlock()
843      *
844      * qatomic_rcu_set is not needed here.  The block was already published
845      * when it was placed into the list.  Here we're just making an extra
846      * copy of the pointer.
847      */
848     ram_list.mru_block = block;
849     return block;
850 }
851 
852 void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
853 {
854     CPUState *cpu;
855     ram_addr_t start1;
856     RAMBlock *block;
857     ram_addr_t end;
858 
859     assert(tcg_enabled());
860     end = TARGET_PAGE_ALIGN(start + length);
861     start &= TARGET_PAGE_MASK;
862 
863     RCU_READ_LOCK_GUARD();
864     block = qemu_get_ram_block(start);
865     assert(block == qemu_get_ram_block(end - 1));
866     start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
867     CPU_FOREACH(cpu) {
868         tlb_reset_dirty(cpu, start1, length);
869     }
870 }
871 
872 /* Note: start and end must be within the same ram block.  */
873 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
874                                               ram_addr_t length,
875                                               unsigned client)
876 {
877     DirtyMemoryBlocks *blocks;
878     unsigned long end, page, start_page;
879     bool dirty = false;
880     RAMBlock *ramblock;
881     uint64_t mr_offset, mr_size;
882 
883     if (length == 0) {
884         return false;
885     }
886 
887     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
888     start_page = start >> TARGET_PAGE_BITS;
889     page = start_page;
890 
891     WITH_RCU_READ_LOCK_GUARD() {
892         blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
893         ramblock = qemu_get_ram_block(start);
894         /* Range sanity check on the ramblock */
895         assert(start >= ramblock->offset &&
896                start + length <= ramblock->offset + ramblock->used_length);
897 
898         while (page < end) {
899             unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
900             unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
901             unsigned long num = MIN(end - page,
902                                     DIRTY_MEMORY_BLOCK_SIZE - offset);
903 
904             dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
905                                                   offset, num);
906             page += num;
907         }
908 
909         mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset;
910         mr_size = (end - start_page) << TARGET_PAGE_BITS;
911         memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
912     }
913 
914     if (dirty) {
915         cpu_physical_memory_dirty_bits_cleared(start, length);
916     }
917 
918     return dirty;
919 }
920 
921 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
922     (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
923 {
924     DirtyMemoryBlocks *blocks;
925     ram_addr_t start, first, last;
926     unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
927     DirtyBitmapSnapshot *snap;
928     unsigned long page, end, dest;
929 
930     start = memory_region_get_ram_addr(mr);
931     /* We know we're only called for RAM MemoryRegions */
932     assert(start != RAM_ADDR_INVALID);
933     start += offset;
934 
935     first = QEMU_ALIGN_DOWN(start, align);
936     last  = QEMU_ALIGN_UP(start + length, align);
937 
938     snap = g_malloc0(sizeof(*snap) +
939                      ((last - first) >> (TARGET_PAGE_BITS + 3)));
940     snap->start = first;
941     snap->end   = last;
942 
943     page = first >> TARGET_PAGE_BITS;
944     end  = last  >> TARGET_PAGE_BITS;
945     dest = 0;
946 
947     WITH_RCU_READ_LOCK_GUARD() {
948         blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
949 
950         while (page < end) {
951             unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
952             unsigned long ofs = page % DIRTY_MEMORY_BLOCK_SIZE;
953             unsigned long num = MIN(end - page,
954                                     DIRTY_MEMORY_BLOCK_SIZE - ofs);
955 
956             assert(QEMU_IS_ALIGNED(ofs, (1 << BITS_PER_LEVEL)));
957             assert(QEMU_IS_ALIGNED(num,    (1 << BITS_PER_LEVEL)));
958             ofs >>= BITS_PER_LEVEL;
959 
960             bitmap_copy_and_clear_atomic(snap->dirty + dest,
961                                          blocks->blocks[idx] + ofs,
962                                          num);
963             page += num;
964             dest += num >> BITS_PER_LEVEL;
965         }
966     }
967 
968     cpu_physical_memory_dirty_bits_cleared(start, length);
969 
970     memory_region_clear_dirty_bitmap(mr, offset, length);
971 
972     return snap;
973 }
974 
975 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
976                                             ram_addr_t start,
977                                             ram_addr_t length)
978 {
979     unsigned long page, end;
980 
981     assert(start >= snap->start);
982     assert(start + length <= snap->end);
983 
984     end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS;
985     page = (start - snap->start) >> TARGET_PAGE_BITS;
986 
987     while (page < end) {
988         if (test_bit(page, snap->dirty)) {
989             return true;
990         }
991         page++;
992     }
993     return false;
994 }
995 
996 /* Called from RCU critical section */
997 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
998                                        MemoryRegionSection *section)
999 {
1000     AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
1001     return section - d->map.sections;
1002 }
1003 
1004 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
1005                             uint16_t section);
1006 static subpage_t *subpage_init(FlatView *fv, hwaddr base);
1007 
1008 static uint16_t phys_section_add(PhysPageMap *map,
1009                                  MemoryRegionSection *section)
1010 {
1011     /* The physical section number is ORed with a page-aligned
1012      * pointer to produce the iotlb entries.  Thus it should
1013      * never overflow into the page-aligned value.
1014      */
1015     assert(map->sections_nb < TARGET_PAGE_SIZE);
1016 
1017     if (map->sections_nb == map->sections_nb_alloc) {
1018         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1019         map->sections = g_renew(MemoryRegionSection, map->sections,
1020                                 map->sections_nb_alloc);
1021     }
1022     map->sections[map->sections_nb] = *section;
1023     memory_region_ref(section->mr);
1024     return map->sections_nb++;
1025 }
1026 
1027 static void phys_section_destroy(MemoryRegion *mr)
1028 {
1029     bool have_sub_page = mr->subpage;
1030 
1031     memory_region_unref(mr);
1032 
1033     if (have_sub_page) {
1034         subpage_t *subpage = container_of(mr, subpage_t, iomem);
1035         object_unref(OBJECT(&subpage->iomem));
1036         g_free(subpage);
1037     }
1038 }
1039 
1040 static void phys_sections_free(PhysPageMap *map)
1041 {
1042     while (map->sections_nb > 0) {
1043         MemoryRegionSection *section = &map->sections[--map->sections_nb];
1044         phys_section_destroy(section->mr);
1045     }
1046     g_free(map->sections);
1047     g_free(map->nodes);
1048 }
1049 
1050 static void register_subpage(FlatView *fv, MemoryRegionSection *section)
1051 {
1052     AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1053     subpage_t *subpage;
1054     hwaddr base = section->offset_within_address_space
1055         & TARGET_PAGE_MASK;
1056     MemoryRegionSection *existing = phys_page_find(d, base);
1057     MemoryRegionSection subsection = {
1058         .offset_within_address_space = base,
1059         .size = int128_make64(TARGET_PAGE_SIZE),
1060     };
1061     hwaddr start, end;
1062 
1063     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1064 
1065     if (!(existing->mr->subpage)) {
1066         subpage = subpage_init(fv, base);
1067         subsection.fv = fv;
1068         subsection.mr = &subpage->iomem;
1069         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1070                       phys_section_add(&d->map, &subsection));
1071     } else {
1072         subpage = container_of(existing->mr, subpage_t, iomem);
1073     }
1074     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1075     end = start + int128_get64(section->size) - 1;
1076     subpage_register(subpage, start, end,
1077                      phys_section_add(&d->map, section));
1078 }
1079 
1080 
1081 static void register_multipage(FlatView *fv,
1082                                MemoryRegionSection *section)
1083 {
1084     AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1085     hwaddr start_addr = section->offset_within_address_space;
1086     uint16_t section_index = phys_section_add(&d->map, section);
1087     uint64_t num_pages = int128_get64(int128_rshift(section->size,
1088                                                     TARGET_PAGE_BITS));
1089 
1090     assert(num_pages);
1091     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1092 }
1093 
1094 /*
1095  * The range in *section* may look like this:
1096  *
1097  *      |s|PPPPPPP|s|
1098  *
1099  * where s stands for subpage and P for page.
1100  */
1101 void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
1102 {
1103     MemoryRegionSection remain = *section;
1104     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1105 
1106     /* register first subpage */
1107     if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1108         uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
1109                         - remain.offset_within_address_space;
1110 
1111         MemoryRegionSection now = remain;
1112         now.size = int128_min(int128_make64(left), now.size);
1113         register_subpage(fv, &now);
1114         if (int128_eq(remain.size, now.size)) {
1115             return;
1116         }
1117         remain.size = int128_sub(remain.size, now.size);
1118         remain.offset_within_address_space += int128_get64(now.size);
1119         remain.offset_within_region += int128_get64(now.size);
1120     }
1121 
1122     /* register whole pages */
1123     if (int128_ge(remain.size, page_size)) {
1124         MemoryRegionSection now = remain;
1125         now.size = int128_and(now.size, int128_neg(page_size));
1126         register_multipage(fv, &now);
1127         if (int128_eq(remain.size, now.size)) {
1128             return;
1129         }
1130         remain.size = int128_sub(remain.size, now.size);
1131         remain.offset_within_address_space += int128_get64(now.size);
1132         remain.offset_within_region += int128_get64(now.size);
1133     }
1134 
1135     /* register last subpage */
1136     register_subpage(fv, &remain);
1137 }
1138 
1139 void qemu_flush_coalesced_mmio_buffer(void)
1140 {
1141     if (kvm_enabled())
1142         kvm_flush_coalesced_mmio_buffer();
1143 }
1144 
1145 void qemu_mutex_lock_ramlist(void)
1146 {
1147     qemu_mutex_lock(&ram_list.mutex);
1148 }
1149 
1150 void qemu_mutex_unlock_ramlist(void)
1151 {
1152     qemu_mutex_unlock(&ram_list.mutex);
1153 }
1154 
1155 GString *ram_block_format(void)
1156 {
1157     RAMBlock *block;
1158     char *psize;
1159     GString *buf = g_string_new("");
1160 
1161     RCU_READ_LOCK_GUARD();
1162     g_string_append_printf(buf, "%24s %8s  %18s %18s %18s %18s %3s\n",
1163                            "Block Name", "PSize", "Offset", "Used", "Total",
1164                            "HVA", "RO");
1165 
1166     RAMBLOCK_FOREACH(block) {
1167         psize = size_to_str(block->page_size);
1168         g_string_append_printf(buf, "%24s %8s  0x%016" PRIx64 " 0x%016" PRIx64
1169                                " 0x%016" PRIx64 " 0x%016" PRIx64 " %3s\n",
1170                                block->idstr, psize,
1171                                (uint64_t)block->offset,
1172                                (uint64_t)block->used_length,
1173                                (uint64_t)block->max_length,
1174                                (uint64_t)(uintptr_t)block->host,
1175                                block->mr->readonly ? "ro" : "rw");
1176 
1177         g_free(psize);
1178     }
1179 
1180     return buf;
1181 }
1182 
1183 static int find_min_backend_pagesize(Object *obj, void *opaque)
1184 {
1185     long *hpsize_min = opaque;
1186 
1187     if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1188         HostMemoryBackend *backend = MEMORY_BACKEND(obj);
1189         long hpsize = host_memory_backend_pagesize(backend);
1190 
1191         if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) {
1192             *hpsize_min = hpsize;
1193         }
1194     }
1195 
1196     return 0;
1197 }
1198 
1199 static int find_max_backend_pagesize(Object *obj, void *opaque)
1200 {
1201     long *hpsize_max = opaque;
1202 
1203     if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1204         HostMemoryBackend *backend = MEMORY_BACKEND(obj);
1205         long hpsize = host_memory_backend_pagesize(backend);
1206 
1207         if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) {
1208             *hpsize_max = hpsize;
1209         }
1210     }
1211 
1212     return 0;
1213 }
1214 
1215 /*
1216  * TODO: We assume right now that all mapped host memory backends are
1217  * used as RAM, however some might be used for different purposes.
1218  */
1219 long qemu_minrampagesize(void)
1220 {
1221     long hpsize = LONG_MAX;
1222     Object *memdev_root = object_resolve_path("/objects", NULL);
1223 
1224     object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize);
1225     return hpsize;
1226 }
1227 
1228 long qemu_maxrampagesize(void)
1229 {
1230     long pagesize = 0;
1231     Object *memdev_root = object_resolve_path("/objects", NULL);
1232 
1233     object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize);
1234     return pagesize;
1235 }
1236 
1237 #ifdef CONFIG_POSIX
1238 static int64_t get_file_size(int fd)
1239 {
1240     int64_t size;
1241 #if defined(__linux__)
1242     struct stat st;
1243 
1244     if (fstat(fd, &st) < 0) {
1245         return -errno;
1246     }
1247 
1248     /* Special handling for devdax character devices */
1249     if (S_ISCHR(st.st_mode)) {
1250         g_autofree char *subsystem_path = NULL;
1251         g_autofree char *subsystem = NULL;
1252 
1253         subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem",
1254                                          major(st.st_rdev), minor(st.st_rdev));
1255         subsystem = g_file_read_link(subsystem_path, NULL);
1256 
1257         if (subsystem && g_str_has_suffix(subsystem, "/dax")) {
1258             g_autofree char *size_path = NULL;
1259             g_autofree char *size_str = NULL;
1260 
1261             size_path = g_strdup_printf("/sys/dev/char/%d:%d/size",
1262                                     major(st.st_rdev), minor(st.st_rdev));
1263 
1264             if (g_file_get_contents(size_path, &size_str, NULL, NULL)) {
1265                 return g_ascii_strtoll(size_str, NULL, 0);
1266             }
1267         }
1268     }
1269 #endif /* defined(__linux__) */
1270 
1271     /* st.st_size may be zero for special files yet lseek(2) works */
1272     size = lseek(fd, 0, SEEK_END);
1273     if (size < 0) {
1274         return -errno;
1275     }
1276     return size;
1277 }
1278 
1279 static int64_t get_file_align(int fd)
1280 {
1281     int64_t align = -1;
1282 #if defined(__linux__) && defined(CONFIG_LIBDAXCTL)
1283     struct stat st;
1284 
1285     if (fstat(fd, &st) < 0) {
1286         return -errno;
1287     }
1288 
1289     /* Special handling for devdax character devices */
1290     if (S_ISCHR(st.st_mode)) {
1291         g_autofree char *path = NULL;
1292         g_autofree char *rpath = NULL;
1293         struct daxctl_ctx *ctx;
1294         struct daxctl_region *region;
1295         int rc = 0;
1296 
1297         path = g_strdup_printf("/sys/dev/char/%d:%d",
1298                     major(st.st_rdev), minor(st.st_rdev));
1299         rpath = realpath(path, NULL);
1300         if (!rpath) {
1301             return -errno;
1302         }
1303 
1304         rc = daxctl_new(&ctx);
1305         if (rc) {
1306             return -1;
1307         }
1308 
1309         daxctl_region_foreach(ctx, region) {
1310             if (strstr(rpath, daxctl_region_get_path(region))) {
1311                 align = daxctl_region_get_align(region);
1312                 break;
1313             }
1314         }
1315         daxctl_unref(ctx);
1316     }
1317 #endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */
1318 
1319     return align;
1320 }
1321 
1322 static int file_ram_open(const char *path,
1323                          const char *region_name,
1324                          bool readonly,
1325                          bool *created)
1326 {
1327     char *filename;
1328     char *sanitized_name;
1329     char *c;
1330     int fd = -1;
1331 
1332     *created = false;
1333     for (;;) {
1334         fd = open(path, readonly ? O_RDONLY : O_RDWR);
1335         if (fd >= 0) {
1336             /*
1337              * open(O_RDONLY) won't fail with EISDIR. Check manually if we
1338              * opened a directory and fail similarly to how we fail ENOENT
1339              * in readonly mode. Note that mkstemp() would imply O_RDWR.
1340              */
1341             if (readonly) {
1342                 struct stat file_stat;
1343 
1344                 if (fstat(fd, &file_stat)) {
1345                     close(fd);
1346                     if (errno == EINTR) {
1347                         continue;
1348                     }
1349                     return -errno;
1350                 } else if (S_ISDIR(file_stat.st_mode)) {
1351                     close(fd);
1352                     return -EISDIR;
1353                 }
1354             }
1355             /* @path names an existing file, use it */
1356             break;
1357         }
1358         if (errno == ENOENT) {
1359             if (readonly) {
1360                 /* Refuse to create new, readonly files. */
1361                 return -ENOENT;
1362             }
1363             /* @path names a file that doesn't exist, create it */
1364             fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1365             if (fd >= 0) {
1366                 *created = true;
1367                 break;
1368             }
1369         } else if (errno == EISDIR) {
1370             /* @path names a directory, create a file there */
1371             /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1372             sanitized_name = g_strdup(region_name);
1373             for (c = sanitized_name; *c != '\0'; c++) {
1374                 if (*c == '/') {
1375                     *c = '_';
1376                 }
1377             }
1378 
1379             filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1380                                        sanitized_name);
1381             g_free(sanitized_name);
1382 
1383             fd = mkstemp(filename);
1384             if (fd >= 0) {
1385                 unlink(filename);
1386                 g_free(filename);
1387                 break;
1388             }
1389             g_free(filename);
1390         }
1391         if (errno != EEXIST && errno != EINTR) {
1392             return -errno;
1393         }
1394         /*
1395          * Try again on EINTR and EEXIST.  The latter happens when
1396          * something else creates the file between our two open().
1397          */
1398     }
1399 
1400     return fd;
1401 }
1402 
1403 static void *file_ram_alloc(RAMBlock *block,
1404                             ram_addr_t memory,
1405                             int fd,
1406                             bool truncate,
1407                             off_t offset,
1408                             Error **errp)
1409 {
1410     uint32_t qemu_map_flags;
1411     void *area;
1412 
1413     block->page_size = qemu_fd_getpagesize(fd);
1414     if (block->mr->align % block->page_size) {
1415         error_setg(errp, "alignment 0x%" PRIx64
1416                    " must be multiples of page size 0x%zx",
1417                    block->mr->align, block->page_size);
1418         return NULL;
1419     } else if (block->mr->align && !is_power_of_2(block->mr->align)) {
1420         error_setg(errp, "alignment 0x%" PRIx64
1421                    " must be a power of two", block->mr->align);
1422         return NULL;
1423     } else if (offset % block->page_size) {
1424         error_setg(errp, "offset 0x%" PRIx64
1425                    " must be multiples of page size 0x%zx",
1426                    offset, block->page_size);
1427         return NULL;
1428     }
1429     block->mr->align = MAX(block->page_size, block->mr->align);
1430 #if defined(__s390x__)
1431     if (kvm_enabled()) {
1432         block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1433     }
1434 #endif
1435 
1436     if (memory < block->page_size) {
1437         error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1438                    "or larger than page size 0x%zx",
1439                    memory, block->page_size);
1440         return NULL;
1441     }
1442 
1443     memory = ROUND_UP(memory, block->page_size);
1444 
1445     /*
1446      * ftruncate is not supported by hugetlbfs in older
1447      * hosts, so don't bother bailing out on errors.
1448      * If anything goes wrong with it under other filesystems,
1449      * mmap will fail.
1450      *
1451      * Do not truncate the non-empty backend file to avoid corrupting
1452      * the existing data in the file. Disabling shrinking is not
1453      * enough. For example, the current vNVDIMM implementation stores
1454      * the guest NVDIMM labels at the end of the backend file. If the
1455      * backend file is later extended, QEMU will not be able to find
1456      * those labels. Therefore, extending the non-empty backend file
1457      * is disabled as well.
1458      */
1459     if (truncate && ftruncate(fd, offset + memory)) {
1460         perror("ftruncate");
1461     }
1462 
1463     qemu_map_flags = (block->flags & RAM_READONLY) ? QEMU_MAP_READONLY : 0;
1464     qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0;
1465     qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0;
1466     qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0;
1467     area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset);
1468     if (area == MAP_FAILED) {
1469         error_setg_errno(errp, errno,
1470                          "unable to map backing store for guest RAM");
1471         return NULL;
1472     }
1473 
1474     block->fd = fd;
1475     block->fd_offset = offset;
1476     return area;
1477 }
1478 #endif
1479 
1480 /* Allocate space within the ram_addr_t space that governs the
1481  * dirty bitmaps.
1482  * Called with the ramlist lock held.
1483  */
1484 static ram_addr_t find_ram_offset(ram_addr_t size)
1485 {
1486     RAMBlock *block, *next_block;
1487     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1488 
1489     assert(size != 0); /* it would hand out same offset multiple times */
1490 
1491     if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1492         return 0;
1493     }
1494 
1495     RAMBLOCK_FOREACH(block) {
1496         ram_addr_t candidate, next = RAM_ADDR_MAX;
1497 
1498         /* Align blocks to start on a 'long' in the bitmap
1499          * which makes the bitmap sync'ing take the fast path.
1500          */
1501         candidate = block->offset + block->max_length;
1502         candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS);
1503 
1504         /* Search for the closest following block
1505          * and find the gap.
1506          */
1507         RAMBLOCK_FOREACH(next_block) {
1508             if (next_block->offset >= candidate) {
1509                 next = MIN(next, next_block->offset);
1510             }
1511         }
1512 
1513         /* If it fits remember our place and remember the size
1514          * of gap, but keep going so that we might find a smaller
1515          * gap to fill so avoiding fragmentation.
1516          */
1517         if (next - candidate >= size && next - candidate < mingap) {
1518             offset = candidate;
1519             mingap = next - candidate;
1520         }
1521 
1522         trace_find_ram_offset_loop(size, candidate, offset, next, mingap);
1523     }
1524 
1525     if (offset == RAM_ADDR_MAX) {
1526         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1527                 (uint64_t)size);
1528         abort();
1529     }
1530 
1531     trace_find_ram_offset(size, offset);
1532 
1533     return offset;
1534 }
1535 
1536 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1537 {
1538     int ret;
1539 
1540     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1541     if (!machine_dump_guest_core(current_machine)) {
1542         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1543         if (ret) {
1544             perror("qemu_madvise");
1545             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1546                             "but dump-guest-core=off specified\n");
1547         }
1548     }
1549 }
1550 
1551 const char *qemu_ram_get_idstr(RAMBlock *rb)
1552 {
1553     return rb->idstr;
1554 }
1555 
1556 void *qemu_ram_get_host_addr(RAMBlock *rb)
1557 {
1558     return rb->host;
1559 }
1560 
1561 ram_addr_t qemu_ram_get_offset(RAMBlock *rb)
1562 {
1563     return rb->offset;
1564 }
1565 
1566 ram_addr_t qemu_ram_get_used_length(RAMBlock *rb)
1567 {
1568     return rb->used_length;
1569 }
1570 
1571 ram_addr_t qemu_ram_get_max_length(RAMBlock *rb)
1572 {
1573     return rb->max_length;
1574 }
1575 
1576 bool qemu_ram_is_shared(RAMBlock *rb)
1577 {
1578     return rb->flags & RAM_SHARED;
1579 }
1580 
1581 bool qemu_ram_is_noreserve(RAMBlock *rb)
1582 {
1583     return rb->flags & RAM_NORESERVE;
1584 }
1585 
1586 /* Note: Only set at the start of postcopy */
1587 bool qemu_ram_is_uf_zeroable(RAMBlock *rb)
1588 {
1589     return rb->flags & RAM_UF_ZEROPAGE;
1590 }
1591 
1592 void qemu_ram_set_uf_zeroable(RAMBlock *rb)
1593 {
1594     rb->flags |= RAM_UF_ZEROPAGE;
1595 }
1596 
1597 bool qemu_ram_is_migratable(RAMBlock *rb)
1598 {
1599     return rb->flags & RAM_MIGRATABLE;
1600 }
1601 
1602 void qemu_ram_set_migratable(RAMBlock *rb)
1603 {
1604     rb->flags |= RAM_MIGRATABLE;
1605 }
1606 
1607 void qemu_ram_unset_migratable(RAMBlock *rb)
1608 {
1609     rb->flags &= ~RAM_MIGRATABLE;
1610 }
1611 
1612 bool qemu_ram_is_named_file(RAMBlock *rb)
1613 {
1614     return rb->flags & RAM_NAMED_FILE;
1615 }
1616 
1617 int qemu_ram_get_fd(RAMBlock *rb)
1618 {
1619     return rb->fd;
1620 }
1621 
1622 /* Called with the BQL held.  */
1623 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1624 {
1625     RAMBlock *block;
1626 
1627     assert(new_block);
1628     assert(!new_block->idstr[0]);
1629 
1630     if (dev) {
1631         char *id = qdev_get_dev_path(dev);
1632         if (id) {
1633             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1634             g_free(id);
1635         }
1636     }
1637     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1638 
1639     RCU_READ_LOCK_GUARD();
1640     RAMBLOCK_FOREACH(block) {
1641         if (block != new_block &&
1642             !strcmp(block->idstr, new_block->idstr)) {
1643             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1644                     new_block->idstr);
1645             abort();
1646         }
1647     }
1648 }
1649 
1650 /* Called with the BQL held.  */
1651 void qemu_ram_unset_idstr(RAMBlock *block)
1652 {
1653     /* FIXME: arch_init.c assumes that this is not called throughout
1654      * migration.  Ignore the problem since hot-unplug during migration
1655      * does not work anyway.
1656      */
1657     if (block) {
1658         memset(block->idstr, 0, sizeof(block->idstr));
1659     }
1660 }
1661 
1662 size_t qemu_ram_pagesize(RAMBlock *rb)
1663 {
1664     return rb->page_size;
1665 }
1666 
1667 /* Returns the largest size of page in use */
1668 size_t qemu_ram_pagesize_largest(void)
1669 {
1670     RAMBlock *block;
1671     size_t largest = 0;
1672 
1673     RAMBLOCK_FOREACH(block) {
1674         largest = MAX(largest, qemu_ram_pagesize(block));
1675     }
1676 
1677     return largest;
1678 }
1679 
1680 static int memory_try_enable_merging(void *addr, size_t len)
1681 {
1682     if (!machine_mem_merge(current_machine)) {
1683         /* disabled by the user */
1684         return 0;
1685     }
1686 
1687     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1688 }
1689 
1690 /*
1691  * Resizing RAM while migrating can result in the migration being canceled.
1692  * Care has to be taken if the guest might have already detected the memory.
1693  *
1694  * As memory core doesn't know how is memory accessed, it is up to
1695  * resize callback to update device state and/or add assertions to detect
1696  * misuse, if necessary.
1697  */
1698 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1699 {
1700     const ram_addr_t oldsize = block->used_length;
1701     const ram_addr_t unaligned_size = newsize;
1702 
1703     assert(block);
1704 
1705     newsize = TARGET_PAGE_ALIGN(newsize);
1706     newsize = REAL_HOST_PAGE_ALIGN(newsize);
1707 
1708     if (block->used_length == newsize) {
1709         /*
1710          * We don't have to resize the ram block (which only knows aligned
1711          * sizes), however, we have to notify if the unaligned size changed.
1712          */
1713         if (unaligned_size != memory_region_size(block->mr)) {
1714             memory_region_set_size(block->mr, unaligned_size);
1715             if (block->resized) {
1716                 block->resized(block->idstr, unaligned_size, block->host);
1717             }
1718         }
1719         return 0;
1720     }
1721 
1722     if (!(block->flags & RAM_RESIZEABLE)) {
1723         error_setg_errno(errp, EINVAL,
1724                          "Size mismatch: %s: 0x" RAM_ADDR_FMT
1725                          " != 0x" RAM_ADDR_FMT, block->idstr,
1726                          newsize, block->used_length);
1727         return -EINVAL;
1728     }
1729 
1730     if (block->max_length < newsize) {
1731         error_setg_errno(errp, EINVAL,
1732                          "Size too large: %s: 0x" RAM_ADDR_FMT
1733                          " > 0x" RAM_ADDR_FMT, block->idstr,
1734                          newsize, block->max_length);
1735         return -EINVAL;
1736     }
1737 
1738     /* Notify before modifying the ram block and touching the bitmaps. */
1739     if (block->host) {
1740         ram_block_notify_resize(block->host, oldsize, newsize);
1741     }
1742 
1743     cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1744     block->used_length = newsize;
1745     cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1746                                         DIRTY_CLIENTS_ALL);
1747     memory_region_set_size(block->mr, unaligned_size);
1748     if (block->resized) {
1749         block->resized(block->idstr, unaligned_size, block->host);
1750     }
1751     return 0;
1752 }
1753 
1754 /*
1755  * Trigger sync on the given ram block for range [start, start + length]
1756  * with the backing store if one is available.
1757  * Otherwise no-op.
1758  * @Note: this is supposed to be a synchronous op.
1759  */
1760 void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
1761 {
1762     /* The requested range should fit in within the block range */
1763     g_assert((start + length) <= block->used_length);
1764 
1765 #ifdef CONFIG_LIBPMEM
1766     /* The lack of support for pmem should not block the sync */
1767     if (ramblock_is_pmem(block)) {
1768         void *addr = ramblock_ptr(block, start);
1769         pmem_persist(addr, length);
1770         return;
1771     }
1772 #endif
1773     if (block->fd >= 0) {
1774         /**
1775          * Case there is no support for PMEM or the memory has not been
1776          * specified as persistent (or is not one) - use the msync.
1777          * Less optimal but still achieves the same goal
1778          */
1779         void *addr = ramblock_ptr(block, start);
1780         if (qemu_msync(addr, length, block->fd)) {
1781             warn_report("%s: failed to sync memory range: start: "
1782                     RAM_ADDR_FMT " length: " RAM_ADDR_FMT,
1783                     __func__, start, length);
1784         }
1785     }
1786 }
1787 
1788 /* Called with ram_list.mutex held */
1789 static void dirty_memory_extend(ram_addr_t new_ram_size)
1790 {
1791     unsigned int old_num_blocks = ram_list.num_dirty_blocks;
1792     unsigned int new_num_blocks = DIV_ROUND_UP(new_ram_size,
1793                                                DIRTY_MEMORY_BLOCK_SIZE);
1794     int i;
1795 
1796     /* Only need to extend if block count increased */
1797     if (new_num_blocks <= old_num_blocks) {
1798         return;
1799     }
1800 
1801     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1802         DirtyMemoryBlocks *old_blocks;
1803         DirtyMemoryBlocks *new_blocks;
1804         int j;
1805 
1806         old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]);
1807         new_blocks = g_malloc(sizeof(*new_blocks) +
1808                               sizeof(new_blocks->blocks[0]) * new_num_blocks);
1809 
1810         if (old_num_blocks) {
1811             memcpy(new_blocks->blocks, old_blocks->blocks,
1812                    old_num_blocks * sizeof(old_blocks->blocks[0]));
1813         }
1814 
1815         for (j = old_num_blocks; j < new_num_blocks; j++) {
1816             new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1817         }
1818 
1819         qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1820 
1821         if (old_blocks) {
1822             g_free_rcu(old_blocks, rcu);
1823         }
1824     }
1825 
1826     ram_list.num_dirty_blocks = new_num_blocks;
1827 }
1828 
1829 static void ram_block_add(RAMBlock *new_block, Error **errp)
1830 {
1831     const bool noreserve = qemu_ram_is_noreserve(new_block);
1832     const bool shared = qemu_ram_is_shared(new_block);
1833     RAMBlock *block;
1834     RAMBlock *last_block = NULL;
1835     bool free_on_error = false;
1836     ram_addr_t ram_size;
1837     Error *err = NULL;
1838 
1839     qemu_mutex_lock_ramlist();
1840     new_block->offset = find_ram_offset(new_block->max_length);
1841 
1842     if (!new_block->host) {
1843         if (xen_enabled()) {
1844             xen_ram_alloc(new_block->offset, new_block->max_length,
1845                           new_block->mr, &err);
1846             if (err) {
1847                 error_propagate(errp, err);
1848                 qemu_mutex_unlock_ramlist();
1849                 return;
1850             }
1851         } else {
1852             new_block->host = qemu_anon_ram_alloc(new_block->max_length,
1853                                                   &new_block->mr->align,
1854                                                   shared, noreserve);
1855             if (!new_block->host) {
1856                 error_setg_errno(errp, errno,
1857                                  "cannot set up guest memory '%s'",
1858                                  memory_region_name(new_block->mr));
1859                 qemu_mutex_unlock_ramlist();
1860                 return;
1861             }
1862             memory_try_enable_merging(new_block->host, new_block->max_length);
1863             free_on_error = true;
1864         }
1865     }
1866 
1867     if (new_block->flags & RAM_GUEST_MEMFD) {
1868         int ret;
1869 
1870         assert(kvm_enabled());
1871         assert(new_block->guest_memfd < 0);
1872 
1873         ret = ram_block_discard_require(true);
1874         if (ret < 0) {
1875             error_setg_errno(errp, -ret,
1876                              "cannot set up private guest memory: discard currently blocked");
1877             error_append_hint(errp, "Are you using assigned devices?\n");
1878             goto out_free;
1879         }
1880 
1881         new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length,
1882                                                         0, errp);
1883         if (new_block->guest_memfd < 0) {
1884             qemu_mutex_unlock_ramlist();
1885             goto out_free;
1886         }
1887     }
1888 
1889     ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS;
1890     dirty_memory_extend(ram_size);
1891     /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
1892      * QLIST (which has an RCU-friendly variant) does not have insertion at
1893      * tail, so save the last element in last_block.
1894      */
1895     RAMBLOCK_FOREACH(block) {
1896         last_block = block;
1897         if (block->max_length < new_block->max_length) {
1898             break;
1899         }
1900     }
1901     if (block) {
1902         QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1903     } else if (last_block) {
1904         QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1905     } else { /* list is empty */
1906         QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1907     }
1908     ram_list.mru_block = NULL;
1909 
1910     /* Write list before version */
1911     smp_wmb();
1912     ram_list.version++;
1913     qemu_mutex_unlock_ramlist();
1914 
1915     cpu_physical_memory_set_dirty_range(new_block->offset,
1916                                         new_block->used_length,
1917                                         DIRTY_CLIENTS_ALL);
1918 
1919     if (new_block->host) {
1920         qemu_ram_setup_dump(new_block->host, new_block->max_length);
1921         qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1922         /*
1923          * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU
1924          * Configure it unless the machine is a qtest server, in which case
1925          * KVM is not used and it may be forked (eg for fuzzing purposes).
1926          */
1927         if (!qtest_enabled()) {
1928             qemu_madvise(new_block->host, new_block->max_length,
1929                          QEMU_MADV_DONTFORK);
1930         }
1931         ram_block_notify_add(new_block->host, new_block->used_length,
1932                              new_block->max_length);
1933     }
1934     return;
1935 
1936 out_free:
1937     if (free_on_error) {
1938         qemu_anon_ram_free(new_block->host, new_block->max_length);
1939         new_block->host = NULL;
1940     }
1941 }
1942 
1943 #ifdef CONFIG_POSIX
1944 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
1945                                  uint32_t ram_flags, int fd, off_t offset,
1946                                  Error **errp)
1947 {
1948     RAMBlock *new_block;
1949     Error *local_err = NULL;
1950     int64_t file_size, file_align;
1951 
1952     /* Just support these ram flags by now. */
1953     assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE |
1954                           RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY |
1955                           RAM_READONLY_FD | RAM_GUEST_MEMFD)) == 0);
1956 
1957     if (xen_enabled()) {
1958         error_setg(errp, "-mem-path not supported with Xen");
1959         return NULL;
1960     }
1961 
1962     if (kvm_enabled() && !kvm_has_sync_mmu()) {
1963         error_setg(errp,
1964                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1965         return NULL;
1966     }
1967 
1968     size = TARGET_PAGE_ALIGN(size);
1969     size = REAL_HOST_PAGE_ALIGN(size);
1970 
1971     file_size = get_file_size(fd);
1972     if (file_size > offset && file_size < (offset + size)) {
1973         error_setg(errp, "backing store size 0x%" PRIx64
1974                    " does not match 'size' option 0x" RAM_ADDR_FMT,
1975                    file_size, size);
1976         return NULL;
1977     }
1978 
1979     file_align = get_file_align(fd);
1980     if (file_align > 0 && file_align > mr->align) {
1981         error_setg(errp, "backing store align 0x%" PRIx64
1982                    " is larger than 'align' option 0x%" PRIx64,
1983                    file_align, mr->align);
1984         return NULL;
1985     }
1986 
1987     new_block = g_malloc0(sizeof(*new_block));
1988     new_block->mr = mr;
1989     new_block->used_length = size;
1990     new_block->max_length = size;
1991     new_block->flags = ram_flags;
1992     new_block->guest_memfd = -1;
1993     new_block->host = file_ram_alloc(new_block, size, fd, !file_size, offset,
1994                                      errp);
1995     if (!new_block->host) {
1996         g_free(new_block);
1997         return NULL;
1998     }
1999 
2000     ram_block_add(new_block, &local_err);
2001     if (local_err) {
2002         g_free(new_block);
2003         error_propagate(errp, local_err);
2004         return NULL;
2005     }
2006     return new_block;
2007 
2008 }
2009 
2010 
2011 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
2012                                    uint32_t ram_flags, const char *mem_path,
2013                                    off_t offset, Error **errp)
2014 {
2015     int fd;
2016     bool created;
2017     RAMBlock *block;
2018 
2019     fd = file_ram_open(mem_path, memory_region_name(mr),
2020                        !!(ram_flags & RAM_READONLY_FD), &created);
2021     if (fd < 0) {
2022         error_setg_errno(errp, -fd, "can't open backing store %s for guest RAM",
2023                          mem_path);
2024         if (!(ram_flags & RAM_READONLY_FD) && !(ram_flags & RAM_SHARED) &&
2025             fd == -EACCES) {
2026             /*
2027              * If we can open the file R/O (note: will never create a new file)
2028              * and we are dealing with a private mapping, there are still ways
2029              * to consume such files and get RAM instead of ROM.
2030              */
2031             fd = file_ram_open(mem_path, memory_region_name(mr), true,
2032                                &created);
2033             if (fd < 0) {
2034                 return NULL;
2035             }
2036             assert(!created);
2037             close(fd);
2038             error_append_hint(errp, "Consider opening the backing store"
2039                 " read-only but still creating writable RAM using"
2040                 " '-object memory-backend-file,readonly=on,rom=off...'"
2041                 " (see \"VM templating\" documentation)\n");
2042         }
2043         return NULL;
2044     }
2045 
2046     block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset, errp);
2047     if (!block) {
2048         if (created) {
2049             unlink(mem_path);
2050         }
2051         close(fd);
2052         return NULL;
2053     }
2054 
2055     return block;
2056 }
2057 #endif
2058 
2059 static
2060 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
2061                                   void (*resized)(const char*,
2062                                                   uint64_t length,
2063                                                   void *host),
2064                                   void *host, uint32_t ram_flags,
2065                                   MemoryRegion *mr, Error **errp)
2066 {
2067     RAMBlock *new_block;
2068     Error *local_err = NULL;
2069     int align;
2070 
2071     assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC |
2072                           RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
2073     assert(!host ^ (ram_flags & RAM_PREALLOC));
2074 
2075     align = qemu_real_host_page_size();
2076     align = MAX(align, TARGET_PAGE_SIZE);
2077     size = ROUND_UP(size, align);
2078     max_size = ROUND_UP(max_size, align);
2079 
2080     new_block = g_malloc0(sizeof(*new_block));
2081     new_block->mr = mr;
2082     new_block->resized = resized;
2083     new_block->used_length = size;
2084     new_block->max_length = max_size;
2085     assert(max_size >= size);
2086     new_block->fd = -1;
2087     new_block->guest_memfd = -1;
2088     new_block->page_size = qemu_real_host_page_size();
2089     new_block->host = host;
2090     new_block->flags = ram_flags;
2091     ram_block_add(new_block, &local_err);
2092     if (local_err) {
2093         g_free(new_block);
2094         error_propagate(errp, local_err);
2095         return NULL;
2096     }
2097     return new_block;
2098 }
2099 
2100 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2101                                    MemoryRegion *mr, Error **errp)
2102 {
2103     return qemu_ram_alloc_internal(size, size, NULL, host, RAM_PREALLOC, mr,
2104                                    errp);
2105 }
2106 
2107 RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags,
2108                          MemoryRegion *mr, Error **errp)
2109 {
2110     assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
2111     return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp);
2112 }
2113 
2114 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
2115                                      void (*resized)(const char*,
2116                                                      uint64_t length,
2117                                                      void *host),
2118                                      MemoryRegion *mr, Error **errp)
2119 {
2120     return qemu_ram_alloc_internal(size, maxsz, resized, NULL,
2121                                    RAM_RESIZEABLE, mr, errp);
2122 }
2123 
2124 static void reclaim_ramblock(RAMBlock *block)
2125 {
2126     if (block->flags & RAM_PREALLOC) {
2127         ;
2128     } else if (xen_enabled()) {
2129         xen_invalidate_map_cache_entry(block->host);
2130 #ifndef _WIN32
2131     } else if (block->fd >= 0) {
2132         qemu_ram_munmap(block->fd, block->host, block->max_length);
2133         close(block->fd);
2134 #endif
2135     } else {
2136         qemu_anon_ram_free(block->host, block->max_length);
2137     }
2138 
2139     if (block->guest_memfd >= 0) {
2140         close(block->guest_memfd);
2141         ram_block_discard_require(false);
2142     }
2143 
2144     g_free(block);
2145 }
2146 
2147 void qemu_ram_free(RAMBlock *block)
2148 {
2149     if (!block) {
2150         return;
2151     }
2152 
2153     if (block->host) {
2154         ram_block_notify_remove(block->host, block->used_length,
2155                                 block->max_length);
2156     }
2157 
2158     qemu_mutex_lock_ramlist();
2159     QLIST_REMOVE_RCU(block, next);
2160     ram_list.mru_block = NULL;
2161     /* Write list before version */
2162     smp_wmb();
2163     ram_list.version++;
2164     call_rcu(block, reclaim_ramblock, rcu);
2165     qemu_mutex_unlock_ramlist();
2166 }
2167 
2168 #ifndef _WIN32
2169 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2170 {
2171     RAMBlock *block;
2172     ram_addr_t offset;
2173     int flags;
2174     void *area, *vaddr;
2175     int prot;
2176 
2177     RAMBLOCK_FOREACH(block) {
2178         offset = addr - block->offset;
2179         if (offset < block->max_length) {
2180             vaddr = ramblock_ptr(block, offset);
2181             if (block->flags & RAM_PREALLOC) {
2182                 ;
2183             } else if (xen_enabled()) {
2184                 abort();
2185             } else {
2186                 flags = MAP_FIXED;
2187                 flags |= block->flags & RAM_SHARED ?
2188                          MAP_SHARED : MAP_PRIVATE;
2189                 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0;
2190                 prot = PROT_READ;
2191                 prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE;
2192                 if (block->fd >= 0) {
2193                     area = mmap(vaddr, length, prot, flags, block->fd,
2194                                 offset + block->fd_offset);
2195                 } else {
2196                     flags |= MAP_ANONYMOUS;
2197                     area = mmap(vaddr, length, prot, flags, -1, 0);
2198                 }
2199                 if (area != vaddr) {
2200                     error_report("Could not remap addr: "
2201                                  RAM_ADDR_FMT "@" RAM_ADDR_FMT "",
2202                                  length, addr);
2203                     exit(1);
2204                 }
2205                 memory_try_enable_merging(vaddr, length);
2206                 qemu_ram_setup_dump(vaddr, length);
2207             }
2208         }
2209     }
2210 }
2211 #endif /* !_WIN32 */
2212 
2213 /*
2214  * Return a host pointer to guest's ram.
2215  * For Xen, foreign mappings get created if they don't already exist.
2216  *
2217  * @block: block for the RAM to lookup (optional and may be NULL).
2218  * @addr: address within the memory region.
2219  * @size: pointer to requested size (optional and may be NULL).
2220  *        size may get modified and return a value smaller than
2221  *        what was requested.
2222  * @lock: wether to lock the mapping in xen-mapcache until invalidated.
2223  * @is_write: hint wether to map RW or RO in the xen-mapcache.
2224  *            (optional and may always be set to true).
2225  *
2226  * Called within RCU critical section.
2227  */
2228 static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr,
2229                                  hwaddr *size, bool lock,
2230                                  bool is_write)
2231 {
2232     hwaddr len = 0;
2233 
2234     if (size && *size == 0) {
2235         return NULL;
2236     }
2237 
2238     if (block == NULL) {
2239         block = qemu_get_ram_block(addr);
2240         addr -= block->offset;
2241     }
2242     if (size) {
2243         *size = MIN(*size, block->max_length - addr);
2244         len = *size;
2245     }
2246 
2247     if (xen_enabled() && block->host == NULL) {
2248         /* We need to check if the requested address is in the RAM
2249          * because we don't want to map the entire memory in QEMU.
2250          * In that case just map the requested area.
2251          */
2252         if (xen_mr_is_memory(block->mr)) {
2253             return xen_map_cache(block->mr, block->offset + addr,
2254                                  len, block->offset,
2255                                  lock, lock, is_write);
2256         }
2257 
2258         block->host = xen_map_cache(block->mr, block->offset,
2259                                     block->max_length,
2260                                     block->offset,
2261                                     1, lock, is_write);
2262     }
2263 
2264     return ramblock_ptr(block, addr);
2265 }
2266 
2267 /*
2268  * Return a host pointer to ram allocated with qemu_ram_alloc.
2269  * This should not be used for general purpose DMA.  Use address_space_map
2270  * or address_space_rw instead. For local memory (e.g. video ram) that the
2271  * device owns, use memory_region_get_ram_ptr.
2272  *
2273  * Called within RCU critical section.
2274  */
2275 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
2276 {
2277     return qemu_ram_ptr_length(ram_block, addr, NULL, false, true);
2278 }
2279 
2280 /* Return the offset of a hostpointer within a ramblock */
2281 ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host)
2282 {
2283     ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host;
2284     assert((uintptr_t)host >= (uintptr_t)rb->host);
2285     assert(res < rb->max_length);
2286 
2287     return res;
2288 }
2289 
2290 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
2291                                    ram_addr_t *offset)
2292 {
2293     RAMBlock *block;
2294     uint8_t *host = ptr;
2295 
2296     if (xen_enabled()) {
2297         ram_addr_t ram_addr;
2298         RCU_READ_LOCK_GUARD();
2299         ram_addr = xen_ram_addr_from_mapcache(ptr);
2300         if (ram_addr == RAM_ADDR_INVALID) {
2301             return NULL;
2302         }
2303 
2304         block = qemu_get_ram_block(ram_addr);
2305         if (block) {
2306             *offset = ram_addr - block->offset;
2307         }
2308         return block;
2309     }
2310 
2311     RCU_READ_LOCK_GUARD();
2312     block = qatomic_rcu_read(&ram_list.mru_block);
2313     if (block && block->host && host - block->host < block->max_length) {
2314         goto found;
2315     }
2316 
2317     RAMBLOCK_FOREACH(block) {
2318         /* This case append when the block is not mapped. */
2319         if (block->host == NULL) {
2320             continue;
2321         }
2322         if (host - block->host < block->max_length) {
2323             goto found;
2324         }
2325     }
2326 
2327     return NULL;
2328 
2329 found:
2330     *offset = (host - block->host);
2331     if (round_offset) {
2332         *offset &= TARGET_PAGE_MASK;
2333     }
2334     return block;
2335 }
2336 
2337 /*
2338  * Finds the named RAMBlock
2339  *
2340  * name: The name of RAMBlock to find
2341  *
2342  * Returns: RAMBlock (or NULL if not found)
2343  */
2344 RAMBlock *qemu_ram_block_by_name(const char *name)
2345 {
2346     RAMBlock *block;
2347 
2348     RAMBLOCK_FOREACH(block) {
2349         if (!strcmp(name, block->idstr)) {
2350             return block;
2351         }
2352     }
2353 
2354     return NULL;
2355 }
2356 
2357 /*
2358  * Some of the system routines need to translate from a host pointer
2359  * (typically a TLB entry) back to a ram offset.
2360  */
2361 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2362 {
2363     RAMBlock *block;
2364     ram_addr_t offset;
2365 
2366     block = qemu_ram_block_from_host(ptr, false, &offset);
2367     if (!block) {
2368         return RAM_ADDR_INVALID;
2369     }
2370 
2371     return block->offset + offset;
2372 }
2373 
2374 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2375 {
2376     ram_addr_t ram_addr;
2377 
2378     ram_addr = qemu_ram_addr_from_host(ptr);
2379     if (ram_addr == RAM_ADDR_INVALID) {
2380         error_report("Bad ram pointer %p", ptr);
2381         abort();
2382     }
2383     return ram_addr;
2384 }
2385 
2386 static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
2387                                  MemTxAttrs attrs, void *buf, hwaddr len);
2388 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2389                                   const void *buf, hwaddr len);
2390 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
2391                                   bool is_write, MemTxAttrs attrs);
2392 
2393 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2394                                 unsigned len, MemTxAttrs attrs)
2395 {
2396     subpage_t *subpage = opaque;
2397     uint8_t buf[8];
2398     MemTxResult res;
2399 
2400 #if defined(DEBUG_SUBPAGE)
2401     printf("%s: subpage %p len %u addr " HWADDR_FMT_plx "\n", __func__,
2402            subpage, len, addr);
2403 #endif
2404     res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
2405     if (res) {
2406         return res;
2407     }
2408     *data = ldn_p(buf, len);
2409     return MEMTX_OK;
2410 }
2411 
2412 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2413                                  uint64_t value, unsigned len, MemTxAttrs attrs)
2414 {
2415     subpage_t *subpage = opaque;
2416     uint8_t buf[8];
2417 
2418 #if defined(DEBUG_SUBPAGE)
2419     printf("%s: subpage %p len %u addr " HWADDR_FMT_plx
2420            " value %"PRIx64"\n",
2421            __func__, subpage, len, addr, value);
2422 #endif
2423     stn_p(buf, len, value);
2424     return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
2425 }
2426 
2427 static bool subpage_accepts(void *opaque, hwaddr addr,
2428                             unsigned len, bool is_write,
2429                             MemTxAttrs attrs)
2430 {
2431     subpage_t *subpage = opaque;
2432 #if defined(DEBUG_SUBPAGE)
2433     printf("%s: subpage %p %c len %u addr " HWADDR_FMT_plx "\n",
2434            __func__, subpage, is_write ? 'w' : 'r', len, addr);
2435 #endif
2436 
2437     return flatview_access_valid(subpage->fv, addr + subpage->base,
2438                                  len, is_write, attrs);
2439 }
2440 
2441 static const MemoryRegionOps subpage_ops = {
2442     .read_with_attrs = subpage_read,
2443     .write_with_attrs = subpage_write,
2444     .impl.min_access_size = 1,
2445     .impl.max_access_size = 8,
2446     .valid.min_access_size = 1,
2447     .valid.max_access_size = 8,
2448     .valid.accepts = subpage_accepts,
2449     .endianness = DEVICE_NATIVE_ENDIAN,
2450 };
2451 
2452 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
2453                             uint16_t section)
2454 {
2455     int idx, eidx;
2456 
2457     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2458         return -1;
2459     idx = SUBPAGE_IDX(start);
2460     eidx = SUBPAGE_IDX(end);
2461 #if defined(DEBUG_SUBPAGE)
2462     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2463            __func__, mmio, start, end, idx, eidx, section);
2464 #endif
2465     for (; idx <= eidx; idx++) {
2466         mmio->sub_section[idx] = section;
2467     }
2468 
2469     return 0;
2470 }
2471 
2472 static subpage_t *subpage_init(FlatView *fv, hwaddr base)
2473 {
2474     subpage_t *mmio;
2475 
2476     /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */
2477     mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
2478     mmio->fv = fv;
2479     mmio->base = base;
2480     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2481                           NULL, TARGET_PAGE_SIZE);
2482     mmio->iomem.subpage = true;
2483 #if defined(DEBUG_SUBPAGE)
2484     printf("%s: %p base " HWADDR_FMT_plx " len %08x\n", __func__,
2485            mmio, base, TARGET_PAGE_SIZE);
2486 #endif
2487 
2488     return mmio;
2489 }
2490 
2491 static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
2492 {
2493     assert(fv);
2494     MemoryRegionSection section = {
2495         .fv = fv,
2496         .mr = mr,
2497         .offset_within_address_space = 0,
2498         .offset_within_region = 0,
2499         .size = int128_2_64(),
2500     };
2501 
2502     return phys_section_add(map, &section);
2503 }
2504 
2505 MemoryRegionSection *iotlb_to_section(CPUState *cpu,
2506                                       hwaddr index, MemTxAttrs attrs)
2507 {
2508     int asidx = cpu_asidx_from_attrs(cpu, attrs);
2509     CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2510     AddressSpaceDispatch *d = cpuas->memory_dispatch;
2511     int section_index = index & ~TARGET_PAGE_MASK;
2512     MemoryRegionSection *ret;
2513 
2514     assert(section_index < d->map.sections_nb);
2515     ret = d->map.sections + section_index;
2516     assert(ret->mr);
2517     assert(ret->mr->ops);
2518 
2519     return ret;
2520 }
2521 
2522 static void io_mem_init(void)
2523 {
2524     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2525                           NULL, UINT64_MAX);
2526 }
2527 
2528 AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
2529 {
2530     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2531     uint16_t n;
2532 
2533     n = dummy_section(&d->map, fv, &io_mem_unassigned);
2534     assert(n == PHYS_SECTION_UNASSIGNED);
2535 
2536     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2537 
2538     return d;
2539 }
2540 
2541 void address_space_dispatch_free(AddressSpaceDispatch *d)
2542 {
2543     phys_sections_free(&d->map);
2544     g_free(d);
2545 }
2546 
2547 static void do_nothing(CPUState *cpu, run_on_cpu_data d)
2548 {
2549 }
2550 
2551 static void tcg_log_global_after_sync(MemoryListener *listener)
2552 {
2553     CPUAddressSpace *cpuas;
2554 
2555     /* Wait for the CPU to end the current TB.  This avoids the following
2556      * incorrect race:
2557      *
2558      *      vCPU                         migration
2559      *      ----------------------       -------------------------
2560      *      TLB check -> slow path
2561      *        notdirty_mem_write
2562      *          write to RAM
2563      *          mark dirty
2564      *                                   clear dirty flag
2565      *      TLB check -> fast path
2566      *                                   read memory
2567      *        write to RAM
2568      *
2569      * by pushing the migration thread's memory read after the vCPU thread has
2570      * written the memory.
2571      */
2572     if (replay_mode == REPLAY_MODE_NONE) {
2573         /*
2574          * VGA can make calls to this function while updating the screen.
2575          * In record/replay mode this causes a deadlock, because
2576          * run_on_cpu waits for rr mutex. Therefore no races are possible
2577          * in this case and no need for making run_on_cpu when
2578          * record/replay is enabled.
2579          */
2580         cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2581         run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL);
2582     }
2583 }
2584 
2585 static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data)
2586 {
2587     CPUAddressSpace *cpuas = data.host_ptr;
2588 
2589     cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as);
2590     tlb_flush(cpu);
2591 }
2592 
2593 static void tcg_commit(MemoryListener *listener)
2594 {
2595     CPUAddressSpace *cpuas;
2596     CPUState *cpu;
2597 
2598     assert(tcg_enabled());
2599     /* since each CPU stores ram addresses in its TLB cache, we must
2600        reset the modified entries */
2601     cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2602     cpu = cpuas->cpu;
2603 
2604     /*
2605      * Defer changes to as->memory_dispatch until the cpu is quiescent.
2606      * Otherwise we race between (1) other cpu threads and (2) ongoing
2607      * i/o for the current cpu thread, with data cached by mmu_lookup().
2608      *
2609      * In addition, queueing the work function will kick the cpu back to
2610      * the main loop, which will end the RCU critical section and reclaim
2611      * the memory data structures.
2612      *
2613      * That said, the listener is also called during realize, before
2614      * all of the tcg machinery for run-on is initialized: thus halt_cond.
2615      */
2616     if (cpu->halt_cond) {
2617         async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas));
2618     } else {
2619         tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas));
2620     }
2621 }
2622 
2623 static void memory_map_init(void)
2624 {
2625     system_memory = g_malloc(sizeof(*system_memory));
2626 
2627     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2628     address_space_init(&address_space_memory, system_memory, "memory");
2629 
2630     system_io = g_malloc(sizeof(*system_io));
2631     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2632                           65536);
2633     address_space_init(&address_space_io, system_io, "I/O");
2634 }
2635 
2636 MemoryRegion *get_system_memory(void)
2637 {
2638     return system_memory;
2639 }
2640 
2641 MemoryRegion *get_system_io(void)
2642 {
2643     return system_io;
2644 }
2645 
2646 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2647                                      hwaddr length)
2648 {
2649     uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2650     ram_addr_t ramaddr = memory_region_get_ram_addr(mr);
2651 
2652     /* We know we're only called for RAM MemoryRegions */
2653     assert(ramaddr != RAM_ADDR_INVALID);
2654     addr += ramaddr;
2655 
2656     /* No early return if dirty_log_mask is or becomes 0, because
2657      * cpu_physical_memory_set_dirty_range will still call
2658      * xen_modified_memory.
2659      */
2660     if (dirty_log_mask) {
2661         dirty_log_mask =
2662             cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2663     }
2664     if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2665         assert(tcg_enabled());
2666         tb_invalidate_phys_range(addr, addr + length - 1);
2667         dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2668     }
2669     cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2670 }
2671 
2672 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size)
2673 {
2674     /*
2675      * In principle this function would work on other memory region types too,
2676      * but the ROM device use case is the only one where this operation is
2677      * necessary.  Other memory regions should use the
2678      * address_space_read/write() APIs.
2679      */
2680     assert(memory_region_is_romd(mr));
2681 
2682     invalidate_and_set_dirty(mr, addr, size);
2683 }
2684 
2685 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2686 {
2687     unsigned access_size_max = mr->ops->valid.max_access_size;
2688 
2689     /* Regions are assumed to support 1-4 byte accesses unless
2690        otherwise specified.  */
2691     if (access_size_max == 0) {
2692         access_size_max = 4;
2693     }
2694 
2695     /* Bound the maximum access by the alignment of the address.  */
2696     if (!mr->ops->impl.unaligned) {
2697         unsigned align_size_max = addr & -addr;
2698         if (align_size_max != 0 && align_size_max < access_size_max) {
2699             access_size_max = align_size_max;
2700         }
2701     }
2702 
2703     /* Don't attempt accesses larger than the maximum.  */
2704     if (l > access_size_max) {
2705         l = access_size_max;
2706     }
2707     l = pow2floor(l);
2708 
2709     return l;
2710 }
2711 
2712 bool prepare_mmio_access(MemoryRegion *mr)
2713 {
2714     bool release_lock = false;
2715 
2716     if (!bql_locked()) {
2717         bql_lock();
2718         release_lock = true;
2719     }
2720     if (mr->flush_coalesced_mmio) {
2721         qemu_flush_coalesced_mmio_buffer();
2722     }
2723 
2724     return release_lock;
2725 }
2726 
2727 /**
2728  * flatview_access_allowed
2729  * @mr: #MemoryRegion to be accessed
2730  * @attrs: memory transaction attributes
2731  * @addr: address within that memory region
2732  * @len: the number of bytes to access
2733  *
2734  * Check if a memory transaction is allowed.
2735  *
2736  * Returns: true if transaction is allowed, false if denied.
2737  */
2738 static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs,
2739                                     hwaddr addr, hwaddr len)
2740 {
2741     if (likely(!attrs.memory)) {
2742         return true;
2743     }
2744     if (memory_region_is_ram(mr)) {
2745         return true;
2746     }
2747     qemu_log_mask(LOG_INVALID_MEM,
2748                   "Invalid access to non-RAM device at "
2749                   "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", "
2750                   "region '%s'\n", addr, len, memory_region_name(mr));
2751     return false;
2752 }
2753 
2754 static MemTxResult flatview_write_continue_step(MemTxAttrs attrs,
2755                                                 const uint8_t *buf,
2756                                                 hwaddr len, hwaddr mr_addr,
2757                                                 hwaddr *l, MemoryRegion *mr)
2758 {
2759     if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) {
2760         return MEMTX_ACCESS_ERROR;
2761     }
2762 
2763     if (!memory_access_is_direct(mr, true)) {
2764         uint64_t val;
2765         MemTxResult result;
2766         bool release_lock = prepare_mmio_access(mr);
2767 
2768         *l = memory_access_size(mr, *l, mr_addr);
2769         /*
2770          * XXX: could force current_cpu to NULL to avoid
2771          * potential bugs
2772          */
2773 
2774         /*
2775          * Assure Coverity (and ourselves) that we are not going to OVERRUN
2776          * the buffer by following ldn_he_p().
2777          */
2778 #ifdef QEMU_STATIC_ANALYSIS
2779         assert((*l == 1 && len >= 1) ||
2780                (*l == 2 && len >= 2) ||
2781                (*l == 4 && len >= 4) ||
2782                (*l == 8 && len >= 8));
2783 #endif
2784         val = ldn_he_p(buf, *l);
2785         result = memory_region_dispatch_write(mr, mr_addr, val,
2786                                               size_memop(*l), attrs);
2787         if (release_lock) {
2788             bql_unlock();
2789         }
2790 
2791         return result;
2792     } else {
2793         /* RAM case */
2794         uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l,
2795                                                false, true);
2796 
2797         memmove(ram_ptr, buf, *l);
2798         invalidate_and_set_dirty(mr, mr_addr, *l);
2799 
2800         return MEMTX_OK;
2801     }
2802 }
2803 
2804 /* Called within RCU critical section.  */
2805 static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
2806                                            MemTxAttrs attrs,
2807                                            const void *ptr,
2808                                            hwaddr len, hwaddr mr_addr,
2809                                            hwaddr l, MemoryRegion *mr)
2810 {
2811     MemTxResult result = MEMTX_OK;
2812     const uint8_t *buf = ptr;
2813 
2814     for (;;) {
2815         result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l,
2816                                                mr);
2817 
2818         len -= l;
2819         buf += l;
2820         addr += l;
2821 
2822         if (!len) {
2823             break;
2824         }
2825 
2826         l = len;
2827         mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs);
2828     }
2829 
2830     return result;
2831 }
2832 
2833 /* Called from RCU critical section.  */
2834 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2835                                   const void *buf, hwaddr len)
2836 {
2837     hwaddr l;
2838     hwaddr mr_addr;
2839     MemoryRegion *mr;
2840 
2841     l = len;
2842     mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs);
2843     if (!flatview_access_allowed(mr, attrs, addr, len)) {
2844         return MEMTX_ACCESS_ERROR;
2845     }
2846     return flatview_write_continue(fv, addr, attrs, buf, len,
2847                                    mr_addr, l, mr);
2848 }
2849 
2850 static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf,
2851                                                hwaddr len, hwaddr mr_addr,
2852                                                hwaddr *l,
2853                                                MemoryRegion *mr)
2854 {
2855     if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) {
2856         return MEMTX_ACCESS_ERROR;
2857     }
2858 
2859     if (!memory_access_is_direct(mr, false)) {
2860         /* I/O case */
2861         uint64_t val;
2862         MemTxResult result;
2863         bool release_lock = prepare_mmio_access(mr);
2864 
2865         *l = memory_access_size(mr, *l, mr_addr);
2866         result = memory_region_dispatch_read(mr, mr_addr, &val, size_memop(*l),
2867                                              attrs);
2868 
2869         /*
2870          * Assure Coverity (and ourselves) that we are not going to OVERRUN
2871          * the buffer by following stn_he_p().
2872          */
2873 #ifdef QEMU_STATIC_ANALYSIS
2874         assert((*l == 1 && len >= 1) ||
2875                (*l == 2 && len >= 2) ||
2876                (*l == 4 && len >= 4) ||
2877                (*l == 8 && len >= 8));
2878 #endif
2879         stn_he_p(buf, *l, val);
2880 
2881         if (release_lock) {
2882             bql_unlock();
2883         }
2884         return result;
2885     } else {
2886         /* RAM case */
2887         uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l,
2888                                                false, false);
2889 
2890         memcpy(buf, ram_ptr, *l);
2891 
2892         return MEMTX_OK;
2893     }
2894 }
2895 
2896 /* Called within RCU critical section.  */
2897 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2898                                    MemTxAttrs attrs, void *ptr,
2899                                    hwaddr len, hwaddr mr_addr, hwaddr l,
2900                                    MemoryRegion *mr)
2901 {
2902     MemTxResult result = MEMTX_OK;
2903     uint8_t *buf = ptr;
2904 
2905     fuzz_dma_read_cb(addr, len, mr);
2906     for (;;) {
2907         result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr);
2908 
2909         len -= l;
2910         buf += l;
2911         addr += l;
2912 
2913         if (!len) {
2914             break;
2915         }
2916 
2917         l = len;
2918         mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs);
2919     }
2920 
2921     return result;
2922 }
2923 
2924 /* Called from RCU critical section.  */
2925 static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
2926                                  MemTxAttrs attrs, void *buf, hwaddr len)
2927 {
2928     hwaddr l;
2929     hwaddr mr_addr;
2930     MemoryRegion *mr;
2931 
2932     l = len;
2933     mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs);
2934     if (!flatview_access_allowed(mr, attrs, addr, len)) {
2935         return MEMTX_ACCESS_ERROR;
2936     }
2937     return flatview_read_continue(fv, addr, attrs, buf, len,
2938                                   mr_addr, l, mr);
2939 }
2940 
2941 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2942                                     MemTxAttrs attrs, void *buf, hwaddr len)
2943 {
2944     MemTxResult result = MEMTX_OK;
2945     FlatView *fv;
2946 
2947     if (len > 0) {
2948         RCU_READ_LOCK_GUARD();
2949         fv = address_space_to_flatview(as);
2950         result = flatview_read(fv, addr, attrs, buf, len);
2951     }
2952 
2953     return result;
2954 }
2955 
2956 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2957                                 MemTxAttrs attrs,
2958                                 const void *buf, hwaddr len)
2959 {
2960     MemTxResult result = MEMTX_OK;
2961     FlatView *fv;
2962 
2963     if (len > 0) {
2964         RCU_READ_LOCK_GUARD();
2965         fv = address_space_to_flatview(as);
2966         result = flatview_write(fv, addr, attrs, buf, len);
2967     }
2968 
2969     return result;
2970 }
2971 
2972 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2973                              void *buf, hwaddr len, bool is_write)
2974 {
2975     if (is_write) {
2976         return address_space_write(as, addr, attrs, buf, len);
2977     } else {
2978         return address_space_read_full(as, addr, attrs, buf, len);
2979     }
2980 }
2981 
2982 MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
2983                               uint8_t c, hwaddr len, MemTxAttrs attrs)
2984 {
2985 #define FILLBUF_SIZE 512
2986     uint8_t fillbuf[FILLBUF_SIZE];
2987     int l;
2988     MemTxResult error = MEMTX_OK;
2989 
2990     memset(fillbuf, c, FILLBUF_SIZE);
2991     while (len > 0) {
2992         l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
2993         error |= address_space_write(as, addr, attrs, fillbuf, l);
2994         len -= l;
2995         addr += l;
2996     }
2997 
2998     return error;
2999 }
3000 
3001 void cpu_physical_memory_rw(hwaddr addr, void *buf,
3002                             hwaddr len, bool is_write)
3003 {
3004     address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
3005                      buf, len, is_write);
3006 }
3007 
3008 enum write_rom_type {
3009     WRITE_DATA,
3010     FLUSH_CACHE,
3011 };
3012 
3013 static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
3014                                                            hwaddr addr,
3015                                                            MemTxAttrs attrs,
3016                                                            const void *ptr,
3017                                                            hwaddr len,
3018                                                            enum write_rom_type type)
3019 {
3020     hwaddr l;
3021     uint8_t *ram_ptr;
3022     hwaddr addr1;
3023     MemoryRegion *mr;
3024     const uint8_t *buf = ptr;
3025 
3026     RCU_READ_LOCK_GUARD();
3027     while (len > 0) {
3028         l = len;
3029         mr = address_space_translate(as, addr, &addr1, &l, true, attrs);
3030 
3031         if (!(memory_region_is_ram(mr) ||
3032               memory_region_is_romd(mr))) {
3033             l = memory_access_size(mr, l, addr1);
3034         } else {
3035             /* ROM/RAM case */
3036             ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3037             switch (type) {
3038             case WRITE_DATA:
3039                 memcpy(ram_ptr, buf, l);
3040                 invalidate_and_set_dirty(mr, addr1, l);
3041                 break;
3042             case FLUSH_CACHE:
3043                 flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l);
3044                 break;
3045             }
3046         }
3047         len -= l;
3048         buf += l;
3049         addr += l;
3050     }
3051     return MEMTX_OK;
3052 }
3053 
3054 /* used for ROM loading : can write in RAM and ROM */
3055 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
3056                                     MemTxAttrs attrs,
3057                                     const void *buf, hwaddr len)
3058 {
3059     return address_space_write_rom_internal(as, addr, attrs,
3060                                             buf, len, WRITE_DATA);
3061 }
3062 
3063 void cpu_flush_icache_range(hwaddr start, hwaddr len)
3064 {
3065     /*
3066      * This function should do the same thing as an icache flush that was
3067      * triggered from within the guest. For TCG we are always cache coherent,
3068      * so there is no need to flush anything. For KVM / Xen we need to flush
3069      * the host's instruction cache at least.
3070      */
3071     if (tcg_enabled()) {
3072         return;
3073     }
3074 
3075     address_space_write_rom_internal(&address_space_memory,
3076                                      start, MEMTXATTRS_UNSPECIFIED,
3077                                      NULL, len, FLUSH_CACHE);
3078 }
3079 
3080 /*
3081  * A magic value stored in the first 8 bytes of the bounce buffer struct. Used
3082  * to detect illegal pointers passed to address_space_unmap.
3083  */
3084 #define BOUNCE_BUFFER_MAGIC 0xb4017ceb4ffe12ed
3085 
3086 typedef struct {
3087     uint64_t magic;
3088     MemoryRegion *mr;
3089     hwaddr addr;
3090     size_t len;
3091     uint8_t buffer[];
3092 } BounceBuffer;
3093 
3094 static void
3095 address_space_unregister_map_client_do(AddressSpaceMapClient *client)
3096 {
3097     QLIST_REMOVE(client, link);
3098     g_free(client);
3099 }
3100 
3101 static void address_space_notify_map_clients_locked(AddressSpace *as)
3102 {
3103     AddressSpaceMapClient *client;
3104 
3105     while (!QLIST_EMPTY(&as->map_client_list)) {
3106         client = QLIST_FIRST(&as->map_client_list);
3107         qemu_bh_schedule(client->bh);
3108         address_space_unregister_map_client_do(client);
3109     }
3110 }
3111 
3112 void address_space_register_map_client(AddressSpace *as, QEMUBH *bh)
3113 {
3114     AddressSpaceMapClient *client = g_malloc(sizeof(*client));
3115 
3116     QEMU_LOCK_GUARD(&as->map_client_list_lock);
3117     client->bh = bh;
3118     QLIST_INSERT_HEAD(&as->map_client_list, client, link);
3119     /* Write map_client_list before reading bounce_buffer_size. */
3120     smp_mb();
3121     if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) {
3122         address_space_notify_map_clients_locked(as);
3123     }
3124 }
3125 
3126 void cpu_exec_init_all(void)
3127 {
3128     qemu_mutex_init(&ram_list.mutex);
3129     /* The data structures we set up here depend on knowing the page size,
3130      * so no more changes can be made after this point.
3131      * In an ideal world, nothing we did before we had finished the
3132      * machine setup would care about the target page size, and we could
3133      * do this much later, rather than requiring board models to state
3134      * up front what their requirements are.
3135      */
3136     finalize_target_page_bits();
3137     io_mem_init();
3138     memory_map_init();
3139 }
3140 
3141 void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh)
3142 {
3143     AddressSpaceMapClient *client;
3144 
3145     QEMU_LOCK_GUARD(&as->map_client_list_lock);
3146     QLIST_FOREACH(client, &as->map_client_list, link) {
3147         if (client->bh == bh) {
3148             address_space_unregister_map_client_do(client);
3149             break;
3150         }
3151     }
3152 }
3153 
3154 static void address_space_notify_map_clients(AddressSpace *as)
3155 {
3156     QEMU_LOCK_GUARD(&as->map_client_list_lock);
3157     address_space_notify_map_clients_locked(as);
3158 }
3159 
3160 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
3161                                   bool is_write, MemTxAttrs attrs)
3162 {
3163     MemoryRegion *mr;
3164     hwaddr l, xlat;
3165 
3166     while (len > 0) {
3167         l = len;
3168         mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
3169         if (!memory_access_is_direct(mr, is_write)) {
3170             l = memory_access_size(mr, l, addr);
3171             if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) {
3172                 return false;
3173             }
3174         }
3175 
3176         len -= l;
3177         addr += l;
3178     }
3179     return true;
3180 }
3181 
3182 bool address_space_access_valid(AddressSpace *as, hwaddr addr,
3183                                 hwaddr len, bool is_write,
3184                                 MemTxAttrs attrs)
3185 {
3186     FlatView *fv;
3187 
3188     RCU_READ_LOCK_GUARD();
3189     fv = address_space_to_flatview(as);
3190     return flatview_access_valid(fv, addr, len, is_write, attrs);
3191 }
3192 
3193 static hwaddr
3194 flatview_extend_translation(FlatView *fv, hwaddr addr,
3195                             hwaddr target_len,
3196                             MemoryRegion *mr, hwaddr base, hwaddr len,
3197                             bool is_write, MemTxAttrs attrs)
3198 {
3199     hwaddr done = 0;
3200     hwaddr xlat;
3201     MemoryRegion *this_mr;
3202 
3203     for (;;) {
3204         target_len -= len;
3205         addr += len;
3206         done += len;
3207         if (target_len == 0) {
3208             return done;
3209         }
3210 
3211         len = target_len;
3212         this_mr = flatview_translate(fv, addr, &xlat,
3213                                      &len, is_write, attrs);
3214         if (this_mr != mr || xlat != base + done) {
3215             return done;
3216         }
3217     }
3218 }
3219 
3220 /* Map a physical memory region into a host virtual address.
3221  * May map a subset of the requested range, given by and returned in *plen.
3222  * May return NULL if resources needed to perform the mapping are exhausted.
3223  * Use only for reads OR writes - not for read-modify-write operations.
3224  * Use address_space_register_map_client() to know when retrying the map
3225  * operation is likely to succeed.
3226  */
3227 void *address_space_map(AddressSpace *as,
3228                         hwaddr addr,
3229                         hwaddr *plen,
3230                         bool is_write,
3231                         MemTxAttrs attrs)
3232 {
3233     hwaddr len = *plen;
3234     hwaddr l, xlat;
3235     MemoryRegion *mr;
3236     FlatView *fv;
3237 
3238     trace_address_space_map(as, addr, len, is_write, *(uint32_t *) &attrs);
3239 
3240     if (len == 0) {
3241         return NULL;
3242     }
3243 
3244     l = len;
3245     RCU_READ_LOCK_GUARD();
3246     fv = address_space_to_flatview(as);
3247     mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
3248 
3249     if (!memory_access_is_direct(mr, is_write)) {
3250         size_t used = qatomic_read(&as->bounce_buffer_size);
3251         for (;;) {
3252             hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l);
3253             size_t new_size = used + alloc;
3254             size_t actual =
3255                 qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size);
3256             if (actual == used) {
3257                 l = alloc;
3258                 break;
3259             }
3260             used = actual;
3261         }
3262 
3263         if (l == 0) {
3264             *plen = 0;
3265             return NULL;
3266         }
3267 
3268         BounceBuffer *bounce = g_malloc0(l + sizeof(BounceBuffer));
3269         bounce->magic = BOUNCE_BUFFER_MAGIC;
3270         memory_region_ref(mr);
3271         bounce->mr = mr;
3272         bounce->addr = addr;
3273         bounce->len = l;
3274 
3275         if (!is_write) {
3276             flatview_read(fv, addr, attrs,
3277                           bounce->buffer, l);
3278         }
3279 
3280         *plen = l;
3281         return bounce->buffer;
3282     }
3283 
3284     memory_region_ref(mr);
3285     *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
3286                                         l, is_write, attrs);
3287     fuzz_dma_read_cb(addr, *plen, mr);
3288     return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true, is_write);
3289 }
3290 
3291 /* Unmaps a memory region previously mapped by address_space_map().
3292  * Will also mark the memory as dirty if is_write is true.  access_len gives
3293  * the amount of memory that was actually read or written by the caller.
3294  */
3295 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3296                          bool is_write, hwaddr access_len)
3297 {
3298     MemoryRegion *mr;
3299     ram_addr_t addr1;
3300 
3301     mr = memory_region_from_host(buffer, &addr1);
3302     if (mr != NULL) {
3303         if (is_write) {
3304             invalidate_and_set_dirty(mr, addr1, access_len);
3305         }
3306         if (xen_enabled()) {
3307             xen_invalidate_map_cache_entry(buffer);
3308         }
3309         memory_region_unref(mr);
3310         return;
3311     }
3312 
3313 
3314     BounceBuffer *bounce = container_of(buffer, BounceBuffer, buffer);
3315     assert(bounce->magic == BOUNCE_BUFFER_MAGIC);
3316 
3317     if (is_write) {
3318         address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED,
3319                             bounce->buffer, access_len);
3320     }
3321 
3322     qatomic_sub(&as->bounce_buffer_size, bounce->len);
3323     bounce->magic = ~BOUNCE_BUFFER_MAGIC;
3324     memory_region_unref(bounce->mr);
3325     g_free(bounce);
3326     /* Write bounce_buffer_size before reading map_client_list. */
3327     smp_mb();
3328     address_space_notify_map_clients(as);
3329 }
3330 
3331 void *cpu_physical_memory_map(hwaddr addr,
3332                               hwaddr *plen,
3333                               bool is_write)
3334 {
3335     return address_space_map(&address_space_memory, addr, plen, is_write,
3336                              MEMTXATTRS_UNSPECIFIED);
3337 }
3338 
3339 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3340                                bool is_write, hwaddr access_len)
3341 {
3342     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3343 }
3344 
3345 #define ARG1_DECL                AddressSpace *as
3346 #define ARG1                     as
3347 #define SUFFIX
3348 #define TRANSLATE(...)           address_space_translate(as, __VA_ARGS__)
3349 #define RCU_READ_LOCK(...)       rcu_read_lock()
3350 #define RCU_READ_UNLOCK(...)     rcu_read_unlock()
3351 #include "memory_ldst.c.inc"
3352 
3353 int64_t address_space_cache_init(MemoryRegionCache *cache,
3354                                  AddressSpace *as,
3355                                  hwaddr addr,
3356                                  hwaddr len,
3357                                  bool is_write)
3358 {
3359     AddressSpaceDispatch *d;
3360     hwaddr l;
3361     MemoryRegion *mr;
3362     Int128 diff;
3363 
3364     assert(len > 0);
3365 
3366     l = len;
3367     cache->fv = address_space_get_flatview(as);
3368     d = flatview_to_dispatch(cache->fv);
3369     cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true);
3370 
3371     /*
3372      * cache->xlat is now relative to cache->mrs.mr, not to the section itself.
3373      * Take that into account to compute how many bytes are there between
3374      * cache->xlat and the end of the section.
3375      */
3376     diff = int128_sub(cache->mrs.size,
3377                       int128_make64(cache->xlat - cache->mrs.offset_within_region));
3378     l = int128_get64(int128_min(diff, int128_make64(l)));
3379 
3380     mr = cache->mrs.mr;
3381     memory_region_ref(mr);
3382     if (memory_access_is_direct(mr, is_write)) {
3383         /* We don't care about the memory attributes here as we're only
3384          * doing this if we found actual RAM, which behaves the same
3385          * regardless of attributes; so UNSPECIFIED is fine.
3386          */
3387         l = flatview_extend_translation(cache->fv, addr, len, mr,
3388                                         cache->xlat, l, is_write,
3389                                         MEMTXATTRS_UNSPECIFIED);
3390         cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true,
3391                                          is_write);
3392     } else {
3393         cache->ptr = NULL;
3394     }
3395 
3396     cache->len = l;
3397     cache->is_write = is_write;
3398     return l;
3399 }
3400 
3401 void address_space_cache_invalidate(MemoryRegionCache *cache,
3402                                     hwaddr addr,
3403                                     hwaddr access_len)
3404 {
3405     assert(cache->is_write);
3406     if (likely(cache->ptr)) {
3407         invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len);
3408     }
3409 }
3410 
3411 void address_space_cache_destroy(MemoryRegionCache *cache)
3412 {
3413     if (!cache->mrs.mr) {
3414         return;
3415     }
3416 
3417     if (xen_enabled()) {
3418         xen_invalidate_map_cache_entry(cache->ptr);
3419     }
3420     memory_region_unref(cache->mrs.mr);
3421     flatview_unref(cache->fv);
3422     cache->mrs.mr = NULL;
3423     cache->fv = NULL;
3424 }
3425 
3426 /* Called from RCU critical section.  This function has the same
3427  * semantics as address_space_translate, but it only works on a
3428  * predefined range of a MemoryRegion that was mapped with
3429  * address_space_cache_init.
3430  */
3431 static inline MemoryRegion *address_space_translate_cached(
3432     MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat,
3433     hwaddr *plen, bool is_write, MemTxAttrs attrs)
3434 {
3435     MemoryRegionSection section;
3436     MemoryRegion *mr;
3437     IOMMUMemoryRegion *iommu_mr;
3438     AddressSpace *target_as;
3439 
3440     assert(!cache->ptr);
3441     *xlat = addr + cache->xlat;
3442 
3443     mr = cache->mrs.mr;
3444     iommu_mr = memory_region_get_iommu(mr);
3445     if (!iommu_mr) {
3446         /* MMIO region.  */
3447         return mr;
3448     }
3449 
3450     section = address_space_translate_iommu(iommu_mr, xlat, plen,
3451                                             NULL, is_write, true,
3452                                             &target_as, attrs);
3453     return section.mr;
3454 }
3455 
3456 /* Called within RCU critical section.  */
3457 static MemTxResult address_space_write_continue_cached(MemTxAttrs attrs,
3458                                                        const void *ptr,
3459                                                        hwaddr len,
3460                                                        hwaddr mr_addr,
3461                                                        hwaddr l,
3462                                                        MemoryRegion *mr)
3463 {
3464     MemTxResult result = MEMTX_OK;
3465     const uint8_t *buf = ptr;
3466 
3467     for (;;) {
3468         result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l,
3469                                                mr);
3470 
3471         len -= l;
3472         buf += l;
3473         mr_addr += l;
3474 
3475         if (!len) {
3476             break;
3477         }
3478 
3479         l = len;
3480     }
3481 
3482     return result;
3483 }
3484 
3485 /* Called within RCU critical section.  */
3486 static MemTxResult address_space_read_continue_cached(MemTxAttrs attrs,
3487                                                       void *ptr, hwaddr len,
3488                                                       hwaddr mr_addr, hwaddr l,
3489                                                       MemoryRegion *mr)
3490 {
3491     MemTxResult result = MEMTX_OK;
3492     uint8_t *buf = ptr;
3493 
3494     for (;;) {
3495         result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr);
3496         len -= l;
3497         buf += l;
3498         mr_addr += l;
3499 
3500         if (!len) {
3501             break;
3502         }
3503         l = len;
3504     }
3505 
3506     return result;
3507 }
3508 
3509 /* Called from RCU critical section. address_space_read_cached uses this
3510  * out of line function when the target is an MMIO or IOMMU region.
3511  */
3512 MemTxResult
3513 address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
3514                                    void *buf, hwaddr len)
3515 {
3516     hwaddr mr_addr, l;
3517     MemoryRegion *mr;
3518 
3519     l = len;
3520     mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false,
3521                                         MEMTXATTRS_UNSPECIFIED);
3522     return address_space_read_continue_cached(MEMTXATTRS_UNSPECIFIED,
3523                                               buf, len, mr_addr, l, mr);
3524 }
3525 
3526 /* Called from RCU critical section. address_space_write_cached uses this
3527  * out of line function when the target is an MMIO or IOMMU region.
3528  */
3529 MemTxResult
3530 address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
3531                                     const void *buf, hwaddr len)
3532 {
3533     hwaddr mr_addr, l;
3534     MemoryRegion *mr;
3535 
3536     l = len;
3537     mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true,
3538                                         MEMTXATTRS_UNSPECIFIED);
3539     return address_space_write_continue_cached(MEMTXATTRS_UNSPECIFIED,
3540                                                buf, len, mr_addr, l, mr);
3541 }
3542 
3543 #define ARG1_DECL                MemoryRegionCache *cache
3544 #define ARG1                     cache
3545 #define SUFFIX                   _cached_slow
3546 #define TRANSLATE(...)           address_space_translate_cached(cache, __VA_ARGS__)
3547 #define RCU_READ_LOCK()          ((void)0)
3548 #define RCU_READ_UNLOCK()        ((void)0)
3549 #include "memory_ldst.c.inc"
3550 
3551 /* virtual memory access for debug (includes writing to ROM) */
3552 int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
3553                         void *ptr, size_t len, bool is_write)
3554 {
3555     hwaddr phys_addr;
3556     vaddr l, page;
3557     uint8_t *buf = ptr;
3558 
3559     cpu_synchronize_state(cpu);
3560     while (len > 0) {
3561         int asidx;
3562         MemTxAttrs attrs;
3563         MemTxResult res;
3564 
3565         page = addr & TARGET_PAGE_MASK;
3566         phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3567         asidx = cpu_asidx_from_attrs(cpu, attrs);
3568         /* if no physical page mapped, return an error */
3569         if (phys_addr == -1)
3570             return -1;
3571         l = (page + TARGET_PAGE_SIZE) - addr;
3572         if (l > len)
3573             l = len;
3574         phys_addr += (addr & ~TARGET_PAGE_MASK);
3575         if (is_write) {
3576             res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
3577                                           attrs, buf, l);
3578         } else {
3579             res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr,
3580                                      attrs, buf, l);
3581         }
3582         if (res != MEMTX_OK) {
3583             return -1;
3584         }
3585         len -= l;
3586         buf += l;
3587         addr += l;
3588     }
3589     return 0;
3590 }
3591 
3592 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3593 {
3594     MemoryRegion*mr;
3595     hwaddr l = 1;
3596 
3597     RCU_READ_LOCK_GUARD();
3598     mr = address_space_translate(&address_space_memory,
3599                                  phys_addr, &phys_addr, &l, false,
3600                                  MEMTXATTRS_UNSPECIFIED);
3601 
3602     return !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3603 }
3604 
3605 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3606 {
3607     RAMBlock *block;
3608     int ret = 0;
3609 
3610     RCU_READ_LOCK_GUARD();
3611     RAMBLOCK_FOREACH(block) {
3612         ret = func(block, opaque);
3613         if (ret) {
3614             break;
3615         }
3616     }
3617     return ret;
3618 }
3619 
3620 /*
3621  * Unmap pages of memory from start to start+length such that
3622  * they a) read as 0, b) Trigger whatever fault mechanism
3623  * the OS provides for postcopy.
3624  * The pages must be unmapped by the end of the function.
3625  * Returns: 0 on success, none-0 on failure
3626  *
3627  */
3628 int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
3629 {
3630     int ret = -1;
3631 
3632     uint8_t *host_startaddr = rb->host + start;
3633 
3634     if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) {
3635         error_report("%s: Unaligned start address: %p",
3636                      __func__, host_startaddr);
3637         goto err;
3638     }
3639 
3640     if ((start + length) <= rb->max_length) {
3641         bool need_madvise, need_fallocate;
3642         if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
3643             error_report("%s: Unaligned length: %zx", __func__, length);
3644             goto err;
3645         }
3646 
3647         errno = ENOTSUP; /* If we are missing MADVISE etc */
3648 
3649         /* The logic here is messy;
3650          *    madvise DONTNEED fails for hugepages
3651          *    fallocate works on hugepages and shmem
3652          *    shared anonymous memory requires madvise REMOVE
3653          */
3654         need_madvise = (rb->page_size == qemu_real_host_page_size());
3655         need_fallocate = rb->fd != -1;
3656         if (need_fallocate) {
3657             /* For a file, this causes the area of the file to be zero'd
3658              * if read, and for hugetlbfs also causes it to be unmapped
3659              * so a userfault will trigger.
3660              */
3661 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3662             /*
3663              * fallocate() will fail with readonly files. Let's print a
3664              * proper error message.
3665              */
3666             if (rb->flags & RAM_READONLY_FD) {
3667                 error_report("%s: Discarding RAM with readonly files is not"
3668                              " supported", __func__);
3669                 goto err;
3670 
3671             }
3672             /*
3673              * We'll discard data from the actual file, even though we only
3674              * have a MAP_PRIVATE mapping, possibly messing with other
3675              * MAP_PRIVATE/MAP_SHARED mappings. There is no easy way to
3676              * change that behavior whithout violating the promised
3677              * semantics of ram_block_discard_range().
3678              *
3679              * Only warn, because it works as long as nobody else uses that
3680              * file.
3681              */
3682             if (!qemu_ram_is_shared(rb)) {
3683                 warn_report_once("%s: Discarding RAM"
3684                                  " in private file mappings is possibly"
3685                                  " dangerous, because it will modify the"
3686                                  " underlying file and will affect other"
3687                                  " users of the file", __func__);
3688             }
3689 
3690             ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3691                             start, length);
3692             if (ret) {
3693                 ret = -errno;
3694                 error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)",
3695                              __func__, rb->idstr, start, length, ret);
3696                 goto err;
3697             }
3698 #else
3699             ret = -ENOSYS;
3700             error_report("%s: fallocate not available/file"
3701                          "%s:%" PRIx64 " +%zx (%d)",
3702                          __func__, rb->idstr, start, length, ret);
3703             goto err;
3704 #endif
3705         }
3706         if (need_madvise) {
3707             /* For normal RAM this causes it to be unmapped,
3708              * for shared memory it causes the local mapping to disappear
3709              * and to fall back on the file contents (which we just
3710              * fallocate'd away).
3711              */
3712 #if defined(CONFIG_MADVISE)
3713             if (qemu_ram_is_shared(rb) && rb->fd < 0) {
3714                 ret = madvise(host_startaddr, length, QEMU_MADV_REMOVE);
3715             } else {
3716                 ret = madvise(host_startaddr, length, QEMU_MADV_DONTNEED);
3717             }
3718             if (ret) {
3719                 ret = -errno;
3720                 error_report("%s: Failed to discard range "
3721                              "%s:%" PRIx64 " +%zx (%d)",
3722                              __func__, rb->idstr, start, length, ret);
3723                 goto err;
3724             }
3725 #else
3726             ret = -ENOSYS;
3727             error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)",
3728                          __func__, rb->idstr, start, length, ret);
3729             goto err;
3730 #endif
3731         }
3732         trace_ram_block_discard_range(rb->idstr, host_startaddr, length,
3733                                       need_madvise, need_fallocate, ret);
3734     } else {
3735         error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")",
3736                      __func__, rb->idstr, start, length, rb->max_length);
3737     }
3738 
3739 err:
3740     return ret;
3741 }
3742 
3743 int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
3744                                         size_t length)
3745 {
3746     int ret = -1;
3747 
3748 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3749     ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3750                     start, length);
3751 
3752     if (ret) {
3753         ret = -errno;
3754         error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)",
3755                      __func__, rb->idstr, start, length, ret);
3756     }
3757 #else
3758     ret = -ENOSYS;
3759     error_report("%s: fallocate not available %s:%" PRIx64 " +%zx (%d)",
3760                  __func__, rb->idstr, start, length, ret);
3761 #endif
3762 
3763     return ret;
3764 }
3765 
3766 bool ramblock_is_pmem(RAMBlock *rb)
3767 {
3768     return rb->flags & RAM_PMEM;
3769 }
3770 
3771 static void mtree_print_phys_entries(int start, int end, int skip, int ptr)
3772 {
3773     if (start == end - 1) {
3774         qemu_printf("\t%3d      ", start);
3775     } else {
3776         qemu_printf("\t%3d..%-3d ", start, end - 1);
3777     }
3778     qemu_printf(" skip=%d ", skip);
3779     if (ptr == PHYS_MAP_NODE_NIL) {
3780         qemu_printf(" ptr=NIL");
3781     } else if (!skip) {
3782         qemu_printf(" ptr=#%d", ptr);
3783     } else {
3784         qemu_printf(" ptr=[%d]", ptr);
3785     }
3786     qemu_printf("\n");
3787 }
3788 
3789 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3790                            int128_sub((size), int128_one())) : 0)
3791 
3792 void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root)
3793 {
3794     int i;
3795 
3796     qemu_printf("  Dispatch\n");
3797     qemu_printf("    Physical sections\n");
3798 
3799     for (i = 0; i < d->map.sections_nb; ++i) {
3800         MemoryRegionSection *s = d->map.sections + i;
3801         const char *names[] = { " [unassigned]", " [not dirty]",
3802                                 " [ROM]", " [watch]" };
3803 
3804         qemu_printf("      #%d @" HWADDR_FMT_plx ".." HWADDR_FMT_plx
3805                     " %s%s%s%s%s",
3806             i,
3807             s->offset_within_address_space,
3808             s->offset_within_address_space + MR_SIZE(s->size),
3809             s->mr->name ? s->mr->name : "(noname)",
3810             i < ARRAY_SIZE(names) ? names[i] : "",
3811             s->mr == root ? " [ROOT]" : "",
3812             s == d->mru_section ? " [MRU]" : "",
3813             s->mr->is_iommu ? " [iommu]" : "");
3814 
3815         if (s->mr->alias) {
3816             qemu_printf(" alias=%s", s->mr->alias->name ?
3817                     s->mr->alias->name : "noname");
3818         }
3819         qemu_printf("\n");
3820     }
3821 
3822     qemu_printf("    Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
3823                P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip);
3824     for (i = 0; i < d->map.nodes_nb; ++i) {
3825         int j, jprev;
3826         PhysPageEntry prev;
3827         Node *n = d->map.nodes + i;
3828 
3829         qemu_printf("      [%d]\n", i);
3830 
3831         for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) {
3832             PhysPageEntry *pe = *n + j;
3833 
3834             if (pe->ptr == prev.ptr && pe->skip == prev.skip) {
3835                 continue;
3836             }
3837 
3838             mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr);
3839 
3840             jprev = j;
3841             prev = *pe;
3842         }
3843 
3844         if (jprev != ARRAY_SIZE(*n)) {
3845             mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr);
3846         }
3847     }
3848 }
3849 
3850 /* Require any discards to work. */
3851 static unsigned int ram_block_discard_required_cnt;
3852 /* Require only coordinated discards to work. */
3853 static unsigned int ram_block_coordinated_discard_required_cnt;
3854 /* Disable any discards. */
3855 static unsigned int ram_block_discard_disabled_cnt;
3856 /* Disable only uncoordinated discards. */
3857 static unsigned int ram_block_uncoordinated_discard_disabled_cnt;
3858 static QemuMutex ram_block_discard_disable_mutex;
3859 
3860 static void ram_block_discard_disable_mutex_lock(void)
3861 {
3862     static gsize initialized;
3863 
3864     if (g_once_init_enter(&initialized)) {
3865         qemu_mutex_init(&ram_block_discard_disable_mutex);
3866         g_once_init_leave(&initialized, 1);
3867     }
3868     qemu_mutex_lock(&ram_block_discard_disable_mutex);
3869 }
3870 
3871 static void ram_block_discard_disable_mutex_unlock(void)
3872 {
3873     qemu_mutex_unlock(&ram_block_discard_disable_mutex);
3874 }
3875 
3876 int ram_block_discard_disable(bool state)
3877 {
3878     int ret = 0;
3879 
3880     ram_block_discard_disable_mutex_lock();
3881     if (!state) {
3882         ram_block_discard_disabled_cnt--;
3883     } else if (ram_block_discard_required_cnt ||
3884                ram_block_coordinated_discard_required_cnt) {
3885         ret = -EBUSY;
3886     } else {
3887         ram_block_discard_disabled_cnt++;
3888     }
3889     ram_block_discard_disable_mutex_unlock();
3890     return ret;
3891 }
3892 
3893 int ram_block_uncoordinated_discard_disable(bool state)
3894 {
3895     int ret = 0;
3896 
3897     ram_block_discard_disable_mutex_lock();
3898     if (!state) {
3899         ram_block_uncoordinated_discard_disabled_cnt--;
3900     } else if (ram_block_discard_required_cnt) {
3901         ret = -EBUSY;
3902     } else {
3903         ram_block_uncoordinated_discard_disabled_cnt++;
3904     }
3905     ram_block_discard_disable_mutex_unlock();
3906     return ret;
3907 }
3908 
3909 int ram_block_discard_require(bool state)
3910 {
3911     int ret = 0;
3912 
3913     ram_block_discard_disable_mutex_lock();
3914     if (!state) {
3915         ram_block_discard_required_cnt--;
3916     } else if (ram_block_discard_disabled_cnt ||
3917                ram_block_uncoordinated_discard_disabled_cnt) {
3918         ret = -EBUSY;
3919     } else {
3920         ram_block_discard_required_cnt++;
3921     }
3922     ram_block_discard_disable_mutex_unlock();
3923     return ret;
3924 }
3925 
3926 int ram_block_coordinated_discard_require(bool state)
3927 {
3928     int ret = 0;
3929 
3930     ram_block_discard_disable_mutex_lock();
3931     if (!state) {
3932         ram_block_coordinated_discard_required_cnt--;
3933     } else if (ram_block_discard_disabled_cnt) {
3934         ret = -EBUSY;
3935     } else {
3936         ram_block_coordinated_discard_required_cnt++;
3937     }
3938     ram_block_discard_disable_mutex_unlock();
3939     return ret;
3940 }
3941 
3942 bool ram_block_discard_is_disabled(void)
3943 {
3944     return qatomic_read(&ram_block_discard_disabled_cnt) ||
3945            qatomic_read(&ram_block_uncoordinated_discard_disabled_cnt);
3946 }
3947 
3948 bool ram_block_discard_is_required(void)
3949 {
3950     return qatomic_read(&ram_block_discard_required_cnt) ||
3951            qatomic_read(&ram_block_coordinated_discard_required_cnt);
3952 }
3953