1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef SYSTEM_MEMORY_H
15 #define SYSTEM_MEMORY_H
16
17 #include "exec/cpu-common.h"
18 #include "exec/hwaddr.h"
19 #include "exec/memattrs.h"
20 #include "exec/memop.h"
21 #include "exec/ramlist.h"
22 #include "exec/tswap.h"
23 #include "qemu/bswap.h"
24 #include "qemu/queue.h"
25 #include "qemu/int128.h"
26 #include "qemu/range.h"
27 #include "qemu/notify.h"
28 #include "qom/object.h"
29 #include "qemu/rcu.h"
30
31 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
32
33 #define MAX_PHYS_ADDR_SPACE_BITS 62
34 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
35
36 #define TYPE_MEMORY_REGION "memory-region"
37 DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
38 TYPE_MEMORY_REGION)
39
40 #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
41 typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
42 DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
43 IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
44
45 #define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
46 typedef struct RamDiscardManagerClass RamDiscardManagerClass;
47 typedef struct RamDiscardManager RamDiscardManager;
48 DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
49 RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
50
51 #ifdef CONFIG_FUZZ
52 void fuzz_dma_read_cb(size_t addr,
53 size_t len,
54 MemoryRegion *mr);
55 #else
fuzz_dma_read_cb(size_t addr,size_t len,MemoryRegion * mr)56 static inline void fuzz_dma_read_cb(size_t addr,
57 size_t len,
58 MemoryRegion *mr)
59 {
60 /* Do Nothing */
61 }
62 #endif
63
64 /* Possible bits for global_dirty_log_{start|stop} */
65
66 /* Dirty tracking enabled because migration is running */
67 #define GLOBAL_DIRTY_MIGRATION (1U << 0)
68
69 /* Dirty tracking enabled because measuring dirty rate */
70 #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
71
72 /* Dirty tracking enabled because dirty limit */
73 #define GLOBAL_DIRTY_LIMIT (1U << 2)
74
75 #define GLOBAL_DIRTY_MASK (0x7)
76
77 extern unsigned int global_dirty_tracking;
78
79 typedef struct MemoryRegionOps MemoryRegionOps;
80
81 struct ReservedRegion {
82 Range range;
83 unsigned type;
84 };
85
86 /**
87 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
88 *
89 * @mr: the region, or %NULL if empty
90 * @fv: the flat view of the address space the region is mapped in
91 * @offset_within_region: the beginning of the section, relative to @mr's start
92 * @size: the size of the section; will not exceed @mr's boundaries
93 * @offset_within_address_space: the address of the first byte of the section
94 * relative to the region's address space
95 * @readonly: writes to this section are ignored
96 * @nonvolatile: this section is non-volatile
97 * @unmergeable: this section should not get merged with adjacent sections
98 */
99 struct MemoryRegionSection {
100 Int128 size;
101 MemoryRegion *mr;
102 FlatView *fv;
103 hwaddr offset_within_region;
104 hwaddr offset_within_address_space;
105 bool readonly;
106 bool nonvolatile;
107 bool unmergeable;
108 };
109
110 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
111
112 /* See address_space_translate: bit 0 is read, bit 1 is write. */
113 typedef enum {
114 IOMMU_NONE = 0,
115 IOMMU_RO = 1,
116 IOMMU_WO = 2,
117 IOMMU_RW = 3,
118 } IOMMUAccessFlags;
119
120 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
121
122 struct IOMMUTLBEntry {
123 AddressSpace *target_as;
124 hwaddr iova;
125 hwaddr translated_addr;
126 hwaddr addr_mask; /* 0xfff = 4k translation */
127 IOMMUAccessFlags perm;
128 };
129
130 /*
131 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
132 * register with one or multiple IOMMU Notifier capability bit(s).
133 *
134 * Normally there're two use cases for the notifiers:
135 *
136 * (1) When the device needs accurate synchronizations of the vIOMMU page
137 * tables, it needs to register with both MAP|UNMAP notifies (which
138 * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
139 *
140 * Regarding to accurate synchronization, it's when the notified
141 * device maintains a shadow page table and must be notified on each
142 * guest MAP (page table entry creation) and UNMAP (invalidation)
143 * events (e.g. VFIO). Both notifications must be accurate so that
144 * the shadow page table is fully in sync with the guest view.
145 *
146 * (2) When the device doesn't need accurate synchronizations of the
147 * vIOMMU page tables, it needs to register only with UNMAP or
148 * DEVIOTLB_UNMAP notifies.
149 *
150 * It's when the device maintains a cache of IOMMU translations
151 * (IOTLB) and is able to fill that cache by requesting translations
152 * from the vIOMMU through a protocol similar to ATS (Address
153 * Translation Service).
154 *
155 * Note that in this mode the vIOMMU will not maintain a shadowed
156 * page table for the address space, and the UNMAP messages can cover
157 * more than the pages that used to get mapped. The IOMMU notifiee
158 * should be able to take care of over-sized invalidations.
159 */
160 typedef enum {
161 IOMMU_NOTIFIER_NONE = 0,
162 /* Notify cache invalidations */
163 IOMMU_NOTIFIER_UNMAP = 0x1,
164 /* Notify entry changes (newly created entries) */
165 IOMMU_NOTIFIER_MAP = 0x2,
166 /* Notify changes on device IOTLB entries */
167 IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
168 } IOMMUNotifierFlag;
169
170 #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
171 #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
172 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
173 IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
174
175 struct IOMMUNotifier;
176 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
177 IOMMUTLBEntry *data);
178
179 struct IOMMUNotifier {
180 IOMMUNotify notify;
181 IOMMUNotifierFlag notifier_flags;
182 /* Notify for address space range start <= addr <= end */
183 hwaddr start;
184 hwaddr end;
185 int iommu_idx;
186 QLIST_ENTRY(IOMMUNotifier) node;
187 };
188 typedef struct IOMMUNotifier IOMMUNotifier;
189
190 typedef struct IOMMUTLBEvent {
191 IOMMUNotifierFlag type;
192 IOMMUTLBEntry entry;
193 } IOMMUTLBEvent;
194
195 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
196 #define RAM_PREALLOC (1 << 0)
197
198 /* RAM is mmap-ed with MAP_SHARED */
199 #define RAM_SHARED (1 << 1)
200
201 /* Only a portion of RAM (used_length) is actually used, and migrated.
202 * Resizing RAM while migrating can result in the migration being canceled.
203 */
204 #define RAM_RESIZEABLE (1 << 2)
205
206 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
207 * zero the page and wake waiting processes.
208 * (Set during postcopy)
209 */
210 #define RAM_UF_ZEROPAGE (1 << 3)
211
212 /* RAM can be migrated */
213 #define RAM_MIGRATABLE (1 << 4)
214
215 /* RAM is a persistent kind memory */
216 #define RAM_PMEM (1 << 5)
217
218
219 /*
220 * UFFDIO_WRITEPROTECT is used on this RAMBlock to
221 * support 'write-tracking' migration type.
222 * Implies ram_state->ram_wt_enabled.
223 */
224 #define RAM_UF_WRITEPROTECT (1 << 6)
225
226 /*
227 * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
228 * pages if applicable) is skipped: will bail out if not supported. When not
229 * set, the OS will do the reservation, if supported for the memory type.
230 */
231 #define RAM_NORESERVE (1 << 7)
232
233 /* RAM that isn't accessible through normal means. */
234 #define RAM_PROTECTED (1 << 8)
235
236 /* RAM is an mmap-ed named file */
237 #define RAM_NAMED_FILE (1 << 9)
238
239 /* RAM is mmap-ed read-only */
240 #define RAM_READONLY (1 << 10)
241
242 /* RAM FD is opened read-only */
243 #define RAM_READONLY_FD (1 << 11)
244
245 /* RAM can be private that has kvm guest memfd backend */
246 #define RAM_GUEST_MEMFD (1 << 12)
247
248 /*
249 * In RAMBlock creation functions, if MAP_SHARED is 0 in the flags parameter,
250 * the implementation may still create a shared mapping if other conditions
251 * require it. Callers who specifically want a private mapping, eg objects
252 * specified by the user, must pass RAM_PRIVATE.
253 * After RAMBlock creation, MAP_SHARED in the block's flags indicates whether
254 * the block is shared or private, and MAP_PRIVATE is omitted.
255 */
256 #define RAM_PRIVATE (1 << 13)
257
iommu_notifier_init(IOMMUNotifier * n,IOMMUNotify fn,IOMMUNotifierFlag flags,hwaddr start,hwaddr end,int iommu_idx)258 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
259 IOMMUNotifierFlag flags,
260 hwaddr start, hwaddr end,
261 int iommu_idx)
262 {
263 n->notify = fn;
264 n->notifier_flags = flags;
265 n->start = start;
266 n->end = end;
267 n->iommu_idx = iommu_idx;
268 }
269
270 /*
271 * Memory region callbacks
272 */
273 struct MemoryRegionOps {
274 /* Read from the memory region. @addr is relative to @mr; @size is
275 * in bytes. */
276 uint64_t (*read)(void *opaque,
277 hwaddr addr,
278 unsigned size);
279 /* Write to the memory region. @addr is relative to @mr; @size is
280 * in bytes. */
281 void (*write)(void *opaque,
282 hwaddr addr,
283 uint64_t data,
284 unsigned size);
285
286 MemTxResult (*read_with_attrs)(void *opaque,
287 hwaddr addr,
288 uint64_t *data,
289 unsigned size,
290 MemTxAttrs attrs);
291 MemTxResult (*write_with_attrs)(void *opaque,
292 hwaddr addr,
293 uint64_t data,
294 unsigned size,
295 MemTxAttrs attrs);
296
297 enum device_endian endianness;
298 /* Guest-visible constraints: */
299 struct {
300 /* If nonzero, specify bounds on access sizes beyond which a machine
301 * check is thrown.
302 */
303 unsigned min_access_size;
304 unsigned max_access_size;
305 /* If true, unaligned accesses are supported. Otherwise unaligned
306 * accesses throw machine checks.
307 */
308 bool unaligned;
309 /*
310 * If present, and returns #false, the transaction is not accepted
311 * by the device (and results in machine dependent behaviour such
312 * as a machine check exception).
313 */
314 bool (*accepts)(void *opaque, hwaddr addr,
315 unsigned size, bool is_write,
316 MemTxAttrs attrs);
317 } valid;
318 /* Internal implementation constraints: */
319 struct {
320 /* If nonzero, specifies the minimum size implemented. Smaller sizes
321 * will be rounded upwards and a partial result will be returned.
322 */
323 unsigned min_access_size;
324 /* If nonzero, specifies the maximum size implemented. Larger sizes
325 * will be done as a series of accesses with smaller sizes.
326 */
327 unsigned max_access_size;
328 /* If true, unaligned accesses are supported. Otherwise all accesses
329 * are converted to (possibly multiple) naturally aligned accesses.
330 */
331 bool unaligned;
332 } impl;
333 };
334
335 typedef struct MemoryRegionClass {
336 /* private */
337 ObjectClass parent_class;
338 } MemoryRegionClass;
339
340
341 enum IOMMUMemoryRegionAttr {
342 IOMMU_ATTR_SPAPR_TCE_FD
343 };
344
345 /*
346 * IOMMUMemoryRegionClass:
347 *
348 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
349 * and provide an implementation of at least the @translate method here
350 * to handle requests to the memory region. Other methods are optional.
351 *
352 * The IOMMU implementation must use the IOMMU notifier infrastructure
353 * to report whenever mappings are changed, by calling
354 * memory_region_notify_iommu() (or, if necessary, by calling
355 * memory_region_notify_iommu_one() for each registered notifier).
356 *
357 * Conceptually an IOMMU provides a mapping from input address
358 * to an output TLB entry. If the IOMMU is aware of memory transaction
359 * attributes and the output TLB entry depends on the transaction
360 * attributes, we represent this using IOMMU indexes. Each index
361 * selects a particular translation table that the IOMMU has:
362 *
363 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
364 *
365 * @translate takes an input address and an IOMMU index
366 *
367 * and the mapping returned can only depend on the input address and the
368 * IOMMU index.
369 *
370 * Most IOMMUs don't care about the transaction attributes and support
371 * only a single IOMMU index. A more complex IOMMU might have one index
372 * for secure transactions and one for non-secure transactions.
373 */
374 struct IOMMUMemoryRegionClass {
375 /* private: */
376 MemoryRegionClass parent_class;
377
378 /* public: */
379 /**
380 * @translate:
381 *
382 * Return a TLB entry that contains a given address.
383 *
384 * The IOMMUAccessFlags indicated via @flag are optional and may
385 * be specified as IOMMU_NONE to indicate that the caller needs
386 * the full translation information for both reads and writes. If
387 * the access flags are specified then the IOMMU implementation
388 * may use this as an optimization, to stop doing a page table
389 * walk as soon as it knows that the requested permissions are not
390 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
391 * full page table walk and report the permissions in the returned
392 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
393 * return different mappings for reads and writes.)
394 *
395 * The returned information remains valid while the caller is
396 * holding the big QEMU lock or is inside an RCU critical section;
397 * if the caller wishes to cache the mapping beyond that it must
398 * register an IOMMU notifier so it can invalidate its cached
399 * information when the IOMMU mapping changes.
400 *
401 * @iommu: the IOMMUMemoryRegion
402 *
403 * @hwaddr: address to be translated within the memory region
404 *
405 * @flag: requested access permission
406 *
407 * @iommu_idx: IOMMU index for the translation
408 */
409 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
410 IOMMUAccessFlags flag, int iommu_idx);
411 /**
412 * @get_min_page_size:
413 *
414 * Returns minimum supported page size in bytes.
415 *
416 * If this method is not provided then the minimum is assumed to
417 * be TARGET_PAGE_SIZE.
418 *
419 * @iommu: the IOMMUMemoryRegion
420 */
421 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
422 /**
423 * @notify_flag_changed:
424 *
425 * Called when IOMMU Notifier flag changes (ie when the set of
426 * events which IOMMU users are requesting notification for changes).
427 * Optional method -- need not be provided if the IOMMU does not
428 * need to know exactly which events must be notified.
429 *
430 * @iommu: the IOMMUMemoryRegion
431 *
432 * @old_flags: events which previously needed to be notified
433 *
434 * @new_flags: events which now need to be notified
435 *
436 * Returns 0 on success, or a negative errno; in particular
437 * returns -EINVAL if the new flag bitmap is not supported by the
438 * IOMMU memory region. In case of failure, the error object
439 * must be created
440 */
441 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
442 IOMMUNotifierFlag old_flags,
443 IOMMUNotifierFlag new_flags,
444 Error **errp);
445 /**
446 * @replay:
447 *
448 * Called to handle memory_region_iommu_replay().
449 *
450 * The default implementation of memory_region_iommu_replay() is to
451 * call the IOMMU translate method for every page in the address space
452 * with flag == IOMMU_NONE and then call the notifier if translate
453 * returns a valid mapping. If this method is implemented then it
454 * overrides the default behaviour, and must provide the full semantics
455 * of memory_region_iommu_replay(), by calling @notifier for every
456 * translation present in the IOMMU.
457 *
458 * Optional method -- an IOMMU only needs to provide this method
459 * if the default is inefficient or produces undesirable side effects.
460 *
461 * Note: this is not related to record-and-replay functionality.
462 */
463 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
464
465 /**
466 * @get_attr:
467 *
468 * Get IOMMU misc attributes. This is an optional method that
469 * can be used to allow users of the IOMMU to get implementation-specific
470 * information. The IOMMU implements this method to handle calls
471 * by IOMMU users to memory_region_iommu_get_attr() by filling in
472 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
473 * the IOMMU supports. If the method is unimplemented then
474 * memory_region_iommu_get_attr() will always return -EINVAL.
475 *
476 * @iommu: the IOMMUMemoryRegion
477 *
478 * @attr: attribute being queried
479 *
480 * @data: memory to fill in with the attribute data
481 *
482 * Returns 0 on success, or a negative errno; in particular
483 * returns -EINVAL for unrecognized or unimplemented attribute types.
484 */
485 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
486 void *data);
487
488 /**
489 * @attrs_to_index:
490 *
491 * Return the IOMMU index to use for a given set of transaction attributes.
492 *
493 * Optional method: if an IOMMU only supports a single IOMMU index then
494 * the default implementation of memory_region_iommu_attrs_to_index()
495 * will return 0.
496 *
497 * The indexes supported by an IOMMU must be contiguous, starting at 0.
498 *
499 * @iommu: the IOMMUMemoryRegion
500 * @attrs: memory transaction attributes
501 */
502 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
503
504 /**
505 * @num_indexes:
506 *
507 * Return the number of IOMMU indexes this IOMMU supports.
508 *
509 * Optional method: if this method is not provided, then
510 * memory_region_iommu_num_indexes() will return 1, indicating that
511 * only a single IOMMU index is supported.
512 *
513 * @iommu: the IOMMUMemoryRegion
514 */
515 int (*num_indexes)(IOMMUMemoryRegion *iommu);
516 };
517
518 typedef struct RamDiscardListener RamDiscardListener;
519 typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
520 MemoryRegionSection *section);
521 typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
522 MemoryRegionSection *section);
523
524 struct RamDiscardListener {
525 /*
526 * @notify_populate:
527 *
528 * Notification that previously discarded memory is about to get populated.
529 * Listeners are able to object. If any listener objects, already
530 * successfully notified listeners are notified about a discard again.
531 *
532 * @rdl: the #RamDiscardListener getting notified
533 * @section: the #MemoryRegionSection to get populated. The section
534 * is aligned within the memory region to the minimum granularity
535 * unless it would exceed the registered section.
536 *
537 * Returns 0 on success. If the notification is rejected by the listener,
538 * an error is returned.
539 */
540 NotifyRamPopulate notify_populate;
541
542 /*
543 * @notify_discard:
544 *
545 * Notification that previously populated memory was discarded successfully
546 * and listeners should drop all references to such memory and prevent
547 * new population (e.g., unmap).
548 *
549 * @rdl: the #RamDiscardListener getting notified
550 * @section: the #MemoryRegionSection to get populated. The section
551 * is aligned within the memory region to the minimum granularity
552 * unless it would exceed the registered section.
553 */
554 NotifyRamDiscard notify_discard;
555
556 /*
557 * @double_discard_supported:
558 *
559 * The listener suppors getting @notify_discard notifications that span
560 * already discarded parts.
561 */
562 bool double_discard_supported;
563
564 MemoryRegionSection *section;
565 QLIST_ENTRY(RamDiscardListener) next;
566 };
567
ram_discard_listener_init(RamDiscardListener * rdl,NotifyRamPopulate populate_fn,NotifyRamDiscard discard_fn,bool double_discard_supported)568 static inline void ram_discard_listener_init(RamDiscardListener *rdl,
569 NotifyRamPopulate populate_fn,
570 NotifyRamDiscard discard_fn,
571 bool double_discard_supported)
572 {
573 rdl->notify_populate = populate_fn;
574 rdl->notify_discard = discard_fn;
575 rdl->double_discard_supported = double_discard_supported;
576 }
577
578 typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
579 typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
580
581 /*
582 * RamDiscardManagerClass:
583 *
584 * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
585 * regions are currently populated to be used/accessed by the VM, notifying
586 * after parts were discarded (freeing up memory) and before parts will be
587 * populated (consuming memory), to be used/accessed by the VM.
588 *
589 * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
590 * #MemoryRegion isn't mapped into an address space yet (either directly
591 * or via an alias); it cannot change while the #MemoryRegion is
592 * mapped into an address space.
593 *
594 * The #RamDiscardManager is intended to be used by technologies that are
595 * incompatible with discarding of RAM (e.g., VFIO, which may pin all
596 * memory inside a #MemoryRegion), and require proper coordination to only
597 * map the currently populated parts, to hinder parts that are expected to
598 * remain discarded from silently getting populated and consuming memory.
599 * Technologies that support discarding of RAM don't have to bother and can
600 * simply map the whole #MemoryRegion.
601 *
602 * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
603 * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
604 * Logically unplugging memory consists of discarding RAM. The VM agreed to not
605 * access unplugged (discarded) memory - especially via DMA. virtio-mem will
606 * properly coordinate with listeners before memory is plugged (populated),
607 * and after memory is unplugged (discarded).
608 *
609 * Listeners are called in multiples of the minimum granularity (unless it
610 * would exceed the registered range) and changes are aligned to the minimum
611 * granularity within the #MemoryRegion. Listeners have to prepare for memory
612 * becoming discarded in a different granularity than it was populated and the
613 * other way around.
614 */
615 struct RamDiscardManagerClass {
616 /* private */
617 InterfaceClass parent_class;
618
619 /* public */
620
621 /**
622 * @get_min_granularity:
623 *
624 * Get the minimum granularity in which listeners will get notified
625 * about changes within the #MemoryRegion via the #RamDiscardManager.
626 *
627 * @rdm: the #RamDiscardManager
628 * @mr: the #MemoryRegion
629 *
630 * Returns the minimum granularity.
631 */
632 uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
633 const MemoryRegion *mr);
634
635 /**
636 * @is_populated:
637 *
638 * Check whether the given #MemoryRegionSection is completely populated
639 * (i.e., no parts are currently discarded) via the #RamDiscardManager.
640 * There are no alignment requirements.
641 *
642 * @rdm: the #RamDiscardManager
643 * @section: the #MemoryRegionSection
644 *
645 * Returns whether the given range is completely populated.
646 */
647 bool (*is_populated)(const RamDiscardManager *rdm,
648 const MemoryRegionSection *section);
649
650 /**
651 * @replay_populated:
652 *
653 * Call the #ReplayRamPopulate callback for all populated parts within the
654 * #MemoryRegionSection via the #RamDiscardManager.
655 *
656 * In case any call fails, no further calls are made.
657 *
658 * @rdm: the #RamDiscardManager
659 * @section: the #MemoryRegionSection
660 * @replay_fn: the #ReplayRamPopulate callback
661 * @opaque: pointer to forward to the callback
662 *
663 * Returns 0 on success, or a negative error if any notification failed.
664 */
665 int (*replay_populated)(const RamDiscardManager *rdm,
666 MemoryRegionSection *section,
667 ReplayRamPopulate replay_fn, void *opaque);
668
669 /**
670 * @replay_discarded:
671 *
672 * Call the #ReplayRamDiscard callback for all discarded parts within the
673 * #MemoryRegionSection via the #RamDiscardManager.
674 *
675 * @rdm: the #RamDiscardManager
676 * @section: the #MemoryRegionSection
677 * @replay_fn: the #ReplayRamDiscard callback
678 * @opaque: pointer to forward to the callback
679 */
680 void (*replay_discarded)(const RamDiscardManager *rdm,
681 MemoryRegionSection *section,
682 ReplayRamDiscard replay_fn, void *opaque);
683
684 /**
685 * @register_listener:
686 *
687 * Register a #RamDiscardListener for the given #MemoryRegionSection and
688 * immediately notify the #RamDiscardListener about all populated parts
689 * within the #MemoryRegionSection via the #RamDiscardManager.
690 *
691 * In case any notification fails, no further notifications are triggered
692 * and an error is logged.
693 *
694 * @rdm: the #RamDiscardManager
695 * @rdl: the #RamDiscardListener
696 * @section: the #MemoryRegionSection
697 */
698 void (*register_listener)(RamDiscardManager *rdm,
699 RamDiscardListener *rdl,
700 MemoryRegionSection *section);
701
702 /**
703 * @unregister_listener:
704 *
705 * Unregister a previously registered #RamDiscardListener via the
706 * #RamDiscardManager after notifying the #RamDiscardListener about all
707 * populated parts becoming unpopulated within the registered
708 * #MemoryRegionSection.
709 *
710 * @rdm: the #RamDiscardManager
711 * @rdl: the #RamDiscardListener
712 */
713 void (*unregister_listener)(RamDiscardManager *rdm,
714 RamDiscardListener *rdl);
715 };
716
717 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
718 const MemoryRegion *mr);
719
720 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
721 const MemoryRegionSection *section);
722
723 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
724 MemoryRegionSection *section,
725 ReplayRamPopulate replay_fn,
726 void *opaque);
727
728 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
729 MemoryRegionSection *section,
730 ReplayRamDiscard replay_fn,
731 void *opaque);
732
733 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
734 RamDiscardListener *rdl,
735 MemoryRegionSection *section);
736
737 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
738 RamDiscardListener *rdl);
739
740 /**
741 * memory_get_xlat_addr: Extract addresses from a TLB entry
742 *
743 * @iotlb: pointer to an #IOMMUTLBEntry
744 * @vaddr: virtual address
745 * @ram_addr: RAM address
746 * @read_only: indicates if writes are allowed
747 * @mr_has_discard_manager: indicates memory is controlled by a
748 * RamDiscardManager
749 * @errp: pointer to Error*, to store an error if it happens.
750 *
751 * Return: true on success, else false setting @errp with error.
752 */
753 bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
754 ram_addr_t *ram_addr, bool *read_only,
755 bool *mr_has_discard_manager, Error **errp);
756
757 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
758 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
759
760 /** MemoryRegion:
761 *
762 * A struct representing a memory region.
763 */
764 struct MemoryRegion {
765 Object parent_obj;
766
767 /* private: */
768
769 /* The following fields should fit in a cache line */
770 bool romd_mode;
771 bool ram;
772 bool subpage;
773 bool readonly; /* For RAM regions */
774 bool nonvolatile;
775 bool rom_device;
776 bool flush_coalesced_mmio;
777 bool unmergeable;
778 uint8_t dirty_log_mask;
779 bool is_iommu;
780 RAMBlock *ram_block;
781 Object *owner;
782 /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
783 DeviceState *dev;
784
785 const MemoryRegionOps *ops;
786 void *opaque;
787 MemoryRegion *container;
788 int mapped_via_alias; /* Mapped via an alias, container might be NULL */
789 Int128 size;
790 hwaddr addr;
791 void (*destructor)(MemoryRegion *mr);
792 uint64_t align;
793 bool terminates;
794 bool ram_device;
795 bool enabled;
796 uint8_t vga_logging_count;
797 MemoryRegion *alias;
798 hwaddr alias_offset;
799 int32_t priority;
800 QTAILQ_HEAD(, MemoryRegion) subregions;
801 QTAILQ_ENTRY(MemoryRegion) subregions_link;
802 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
803 const char *name;
804 unsigned ioeventfd_nb;
805 MemoryRegionIoeventfd *ioeventfds;
806 RamDiscardManager *rdm; /* Only for RAM */
807
808 /* For devices designed to perform re-entrant IO into their own IO MRs */
809 bool disable_reentrancy_guard;
810 };
811
812 struct IOMMUMemoryRegion {
813 MemoryRegion parent_obj;
814
815 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
816 IOMMUNotifierFlag iommu_notify_flags;
817 };
818
819 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
820 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
821
822 #define MEMORY_LISTENER_PRIORITY_MIN 0
823 #define MEMORY_LISTENER_PRIORITY_ACCEL 10
824 #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
825
826 /**
827 * struct MemoryListener: callbacks structure for updates to the physical memory map
828 *
829 * Allows a component to adjust to changes in the guest-visible memory map.
830 * Use with memory_listener_register() and memory_listener_unregister().
831 */
832 struct MemoryListener {
833 /**
834 * @begin:
835 *
836 * Called at the beginning of an address space update transaction.
837 * Followed by calls to #MemoryListener.region_add(),
838 * #MemoryListener.region_del(), #MemoryListener.region_nop(),
839 * #MemoryListener.log_start() and #MemoryListener.log_stop() in
840 * increasing address order.
841 *
842 * @listener: The #MemoryListener.
843 */
844 void (*begin)(MemoryListener *listener);
845
846 /**
847 * @commit:
848 *
849 * Called at the end of an address space update transaction,
850 * after the last call to #MemoryListener.region_add(),
851 * #MemoryListener.region_del() or #MemoryListener.region_nop(),
852 * #MemoryListener.log_start() and #MemoryListener.log_stop().
853 *
854 * @listener: The #MemoryListener.
855 */
856 void (*commit)(MemoryListener *listener);
857
858 /**
859 * @region_add:
860 *
861 * Called during an address space update transaction,
862 * for a section of the address space that is new in this address space
863 * space since the last transaction.
864 *
865 * @listener: The #MemoryListener.
866 * @section: The new #MemoryRegionSection.
867 */
868 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
869
870 /**
871 * @region_del:
872 *
873 * Called during an address space update transaction,
874 * for a section of the address space that has disappeared in the address
875 * space since the last transaction.
876 *
877 * @listener: The #MemoryListener.
878 * @section: The old #MemoryRegionSection.
879 */
880 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
881
882 /**
883 * @region_nop:
884 *
885 * Called during an address space update transaction,
886 * for a section of the address space that is in the same place in the address
887 * space as in the last transaction.
888 *
889 * @listener: The #MemoryListener.
890 * @section: The #MemoryRegionSection.
891 */
892 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
893
894 /**
895 * @log_start:
896 *
897 * Called during an address space update transaction, after
898 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
899 * #MemoryListener.region_nop(), if dirty memory logging clients have
900 * become active since the last transaction.
901 *
902 * @listener: The #MemoryListener.
903 * @section: The #MemoryRegionSection.
904 * @old: A bitmap of dirty memory logging clients that were active in
905 * the previous transaction.
906 * @new: A bitmap of dirty memory logging clients that are active in
907 * the current transaction.
908 */
909 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
910 int old_val, int new_val);
911
912 /**
913 * @log_stop:
914 *
915 * Called during an address space update transaction, after
916 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
917 * #MemoryListener.region_nop() and possibly after
918 * #MemoryListener.log_start(), if dirty memory logging clients have
919 * become inactive since the last transaction.
920 *
921 * @listener: The #MemoryListener.
922 * @section: The #MemoryRegionSection.
923 * @old: A bitmap of dirty memory logging clients that were active in
924 * the previous transaction.
925 * @new: A bitmap of dirty memory logging clients that are active in
926 * the current transaction.
927 */
928 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
929 int old_val, int new_val);
930
931 /**
932 * @log_sync:
933 *
934 * Called by memory_region_snapshot_and_clear_dirty() and
935 * memory_global_dirty_log_sync(), before accessing QEMU's "official"
936 * copy of the dirty memory bitmap for a #MemoryRegionSection.
937 *
938 * @listener: The #MemoryListener.
939 * @section: The #MemoryRegionSection.
940 */
941 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
942
943 /**
944 * @log_sync_global:
945 *
946 * This is the global version of @log_sync when the listener does
947 * not have a way to synchronize the log with finer granularity.
948 * When the listener registers with @log_sync_global defined, then
949 * its @log_sync must be NULL. Vice versa.
950 *
951 * @listener: The #MemoryListener.
952 * @last_stage: The last stage to synchronize the log during migration.
953 * The caller should guarantee that the synchronization with true for
954 * @last_stage is triggered for once after all VCPUs have been stopped.
955 */
956 void (*log_sync_global)(MemoryListener *listener, bool last_stage);
957
958 /**
959 * @log_clear:
960 *
961 * Called before reading the dirty memory bitmap for a
962 * #MemoryRegionSection.
963 *
964 * @listener: The #MemoryListener.
965 * @section: The #MemoryRegionSection.
966 */
967 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
968
969 /**
970 * @log_global_start:
971 *
972 * Called by memory_global_dirty_log_start(), which
973 * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
974 * the address space. #MemoryListener.log_global_start() is also
975 * called when a #MemoryListener is added, if global dirty logging is
976 * active at that time.
977 *
978 * @listener: The #MemoryListener.
979 * @errp: pointer to Error*, to store an error if it happens.
980 *
981 * Return: true on success, else false setting @errp with error.
982 */
983 bool (*log_global_start)(MemoryListener *listener, Error **errp);
984
985 /**
986 * @log_global_stop:
987 *
988 * Called by memory_global_dirty_log_stop(), which
989 * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
990 * the address space.
991 *
992 * @listener: The #MemoryListener.
993 */
994 void (*log_global_stop)(MemoryListener *listener);
995
996 /**
997 * @log_global_after_sync:
998 *
999 * Called after reading the dirty memory bitmap
1000 * for any #MemoryRegionSection.
1001 *
1002 * @listener: The #MemoryListener.
1003 */
1004 void (*log_global_after_sync)(MemoryListener *listener);
1005
1006 /**
1007 * @eventfd_add:
1008 *
1009 * Called during an address space update transaction,
1010 * for a section of the address space that has had a new ioeventfd
1011 * registration since the last transaction.
1012 *
1013 * @listener: The #MemoryListener.
1014 * @section: The new #MemoryRegionSection.
1015 * @match_data: The @match_data parameter for the new ioeventfd.
1016 * @data: The @data parameter for the new ioeventfd.
1017 * @e: The #EventNotifier parameter for the new ioeventfd.
1018 */
1019 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
1020 bool match_data, uint64_t data, EventNotifier *e);
1021
1022 /**
1023 * @eventfd_del:
1024 *
1025 * Called during an address space update transaction,
1026 * for a section of the address space that has dropped an ioeventfd
1027 * registration since the last transaction.
1028 *
1029 * @listener: The #MemoryListener.
1030 * @section: The new #MemoryRegionSection.
1031 * @match_data: The @match_data parameter for the dropped ioeventfd.
1032 * @data: The @data parameter for the dropped ioeventfd.
1033 * @e: The #EventNotifier parameter for the dropped ioeventfd.
1034 */
1035 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
1036 bool match_data, uint64_t data, EventNotifier *e);
1037
1038 /**
1039 * @coalesced_io_add:
1040 *
1041 * Called during an address space update transaction,
1042 * for a section of the address space that has had a new coalesced
1043 * MMIO range registration since the last transaction.
1044 *
1045 * @listener: The #MemoryListener.
1046 * @section: The new #MemoryRegionSection.
1047 * @addr: The starting address for the coalesced MMIO range.
1048 * @len: The length of the coalesced MMIO range.
1049 */
1050 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
1051 hwaddr addr, hwaddr len);
1052
1053 /**
1054 * @coalesced_io_del:
1055 *
1056 * Called during an address space update transaction,
1057 * for a section of the address space that has dropped a coalesced
1058 * MMIO range since the last transaction.
1059 *
1060 * @listener: The #MemoryListener.
1061 * @section: The new #MemoryRegionSection.
1062 * @addr: The starting address for the coalesced MMIO range.
1063 * @len: The length of the coalesced MMIO range.
1064 */
1065 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
1066 hwaddr addr, hwaddr len);
1067 /**
1068 * @priority:
1069 *
1070 * Govern the order in which memory listeners are invoked. Lower priorities
1071 * are invoked earlier for "add" or "start" callbacks, and later for "delete"
1072 * or "stop" callbacks.
1073 */
1074 unsigned priority;
1075
1076 /**
1077 * @name:
1078 *
1079 * Name of the listener. It can be used in contexts where we'd like to
1080 * identify one memory listener with the rest.
1081 */
1082 const char *name;
1083
1084 /* private: */
1085 AddressSpace *address_space;
1086 QTAILQ_ENTRY(MemoryListener) link;
1087 QTAILQ_ENTRY(MemoryListener) link_as;
1088 };
1089
1090 typedef struct AddressSpaceMapClient {
1091 QEMUBH *bh;
1092 QLIST_ENTRY(AddressSpaceMapClient) link;
1093 } AddressSpaceMapClient;
1094
1095 #define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096)
1096
1097 /**
1098 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
1099 */
1100 struct AddressSpace {
1101 /* private: */
1102 struct rcu_head rcu;
1103 char *name;
1104 MemoryRegion *root;
1105
1106 /* Accessed via RCU. */
1107 struct FlatView *current_map;
1108
1109 int ioeventfd_nb;
1110 int ioeventfd_notifiers;
1111 struct MemoryRegionIoeventfd *ioeventfds;
1112 QTAILQ_HEAD(, MemoryListener) listeners;
1113 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
1114
1115 /*
1116 * Maximum DMA bounce buffer size used for indirect memory map requests.
1117 * This limits the total size of bounce buffer allocations made for
1118 * DMA requests to indirect memory regions within this AddressSpace. DMA
1119 * requests that exceed the limit (e.g. due to overly large requested size
1120 * or concurrent DMA requests having claimed too much buffer space) will be
1121 * rejected and left to the caller to handle.
1122 */
1123 size_t max_bounce_buffer_size;
1124 /* Total size of bounce buffers currently allocated, atomically accessed */
1125 size_t bounce_buffer_size;
1126 /* List of callbacks to invoke when buffers free up */
1127 QemuMutex map_client_list_lock;
1128 QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
1129 };
1130
1131 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
1132 typedef struct FlatRange FlatRange;
1133
1134 /* Flattened global view of current active memory hierarchy. Kept in sorted
1135 * order.
1136 */
1137 struct FlatView {
1138 struct rcu_head rcu;
1139 unsigned ref;
1140 FlatRange *ranges;
1141 unsigned nr;
1142 unsigned nr_allocated;
1143 struct AddressSpaceDispatch *dispatch;
1144 MemoryRegion *root;
1145 };
1146
address_space_to_flatview(AddressSpace * as)1147 static inline FlatView *address_space_to_flatview(AddressSpace *as)
1148 {
1149 return qatomic_rcu_read(&as->current_map);
1150 }
1151
1152 /**
1153 * typedef flatview_cb: callback for flatview_for_each_range()
1154 *
1155 * @start: start address of the range within the FlatView
1156 * @len: length of the range in bytes
1157 * @mr: MemoryRegion covering this range
1158 * @offset_in_region: offset of the first byte of the range within @mr
1159 * @opaque: data pointer passed to flatview_for_each_range()
1160 *
1161 * Returns: true to stop the iteration, false to keep going.
1162 */
1163 typedef bool (*flatview_cb)(Int128 start,
1164 Int128 len,
1165 const MemoryRegion *mr,
1166 hwaddr offset_in_region,
1167 void *opaque);
1168
1169 /**
1170 * flatview_for_each_range: Iterate through a FlatView
1171 * @fv: the FlatView to iterate through
1172 * @cb: function to call for each range
1173 * @opaque: opaque data pointer to pass to @cb
1174 *
1175 * A FlatView is made up of a list of non-overlapping ranges, each of
1176 * which is a slice of a MemoryRegion. This function iterates through
1177 * each range in @fv, calling @cb. The callback function can terminate
1178 * iteration early by returning 'true'.
1179 */
1180 void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
1181
MemoryRegionSection_eq(MemoryRegionSection * a,MemoryRegionSection * b)1182 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
1183 MemoryRegionSection *b)
1184 {
1185 return a->mr == b->mr &&
1186 a->fv == b->fv &&
1187 a->offset_within_region == b->offset_within_region &&
1188 a->offset_within_address_space == b->offset_within_address_space &&
1189 int128_eq(a->size, b->size) &&
1190 a->readonly == b->readonly &&
1191 a->nonvolatile == b->nonvolatile;
1192 }
1193
1194 /**
1195 * memory_region_section_new_copy: Copy a memory region section
1196 *
1197 * Allocate memory for a new copy, copy the memory region section, and
1198 * properly take a reference on all relevant members.
1199 *
1200 * @s: the #MemoryRegionSection to copy
1201 */
1202 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
1203
1204 /**
1205 * memory_region_section_free_copy: Free a copied memory region section
1206 *
1207 * Free a copy of a memory section created via memory_region_section_new_copy().
1208 * properly dropping references on all relevant members.
1209 *
1210 * @s: the #MemoryRegionSection to copy
1211 */
1212 void memory_region_section_free_copy(MemoryRegionSection *s);
1213
1214 /**
1215 * memory_region_init: Initialize a memory region
1216 *
1217 * The region typically acts as a container for other memory regions. Use
1218 * memory_region_add_subregion() to add subregions.
1219 *
1220 * @mr: the #MemoryRegion to be initialized
1221 * @owner: the object that tracks the region's reference count
1222 * @name: used for debugging; not visible to the user or ABI
1223 * @size: size of the region; any subregions beyond this size will be clipped
1224 */
1225 void memory_region_init(MemoryRegion *mr,
1226 Object *owner,
1227 const char *name,
1228 uint64_t size);
1229
1230 /**
1231 * memory_region_ref: Add 1 to a memory region's reference count
1232 *
1233 * Whenever memory regions are accessed outside the BQL, they need to be
1234 * preserved against hot-unplug. MemoryRegions actually do not have their
1235 * own reference count; they piggyback on a QOM object, their "owner".
1236 * This function adds a reference to the owner.
1237 *
1238 * All MemoryRegions must have an owner if they can disappear, even if the
1239 * device they belong to operates exclusively under the BQL. This is because
1240 * the region could be returned at any time by memory_region_find, and this
1241 * is usually under guest control.
1242 *
1243 * @mr: the #MemoryRegion
1244 */
1245 void memory_region_ref(MemoryRegion *mr);
1246
1247 /**
1248 * memory_region_unref: Remove 1 to a memory region's reference count
1249 *
1250 * Whenever memory regions are accessed outside the BQL, they need to be
1251 * preserved against hot-unplug. MemoryRegions actually do not have their
1252 * own reference count; they piggyback on a QOM object, their "owner".
1253 * This function removes a reference to the owner and possibly destroys it.
1254 *
1255 * @mr: the #MemoryRegion
1256 */
1257 void memory_region_unref(MemoryRegion *mr);
1258
1259 /**
1260 * memory_region_init_io: Initialize an I/O memory region.
1261 *
1262 * Accesses into the region will cause the callbacks in @ops to be called.
1263 * if @size is nonzero, subregions will be clipped to @size.
1264 *
1265 * @mr: the #MemoryRegion to be initialized.
1266 * @owner: the object that tracks the region's reference count
1267 * @ops: a structure containing read and write callbacks to be used when
1268 * I/O is performed on the region.
1269 * @opaque: passed to the read and write callbacks of the @ops structure.
1270 * @name: used for debugging; not visible to the user or ABI
1271 * @size: size of the region.
1272 */
1273 void memory_region_init_io(MemoryRegion *mr,
1274 Object *owner,
1275 const MemoryRegionOps *ops,
1276 void *opaque,
1277 const char *name,
1278 uint64_t size);
1279
1280 /**
1281 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
1282 * into the region will modify memory
1283 * directly.
1284 *
1285 * @mr: the #MemoryRegion to be initialized.
1286 * @owner: the object that tracks the region's reference count
1287 * @name: Region name, becomes part of RAMBlock name used in migration stream
1288 * must be unique within any device
1289 * @size: size of the region.
1290 * @errp: pointer to Error*, to store an error if it happens.
1291 *
1292 * Note that this function does not do anything to cause the data in the
1293 * RAM memory region to be migrated; that is the responsibility of the caller.
1294 *
1295 * Return: true on success, else false setting @errp with error.
1296 */
1297 bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
1298 Object *owner,
1299 const char *name,
1300 uint64_t size,
1301 Error **errp);
1302
1303 /**
1304 * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
1305 * Accesses into the region will
1306 * modify memory directly.
1307 *
1308 * @mr: the #MemoryRegion to be initialized.
1309 * @owner: the object that tracks the region's reference count
1310 * @name: Region name, becomes part of RAMBlock name used in migration stream
1311 * must be unique within any device
1312 * @size: size of the region.
1313 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
1314 * RAM_GUEST_MEMFD.
1315 * @errp: pointer to Error*, to store an error if it happens.
1316 *
1317 * Note that this function does not do anything to cause the data in the
1318 * RAM memory region to be migrated; that is the responsibility of the caller.
1319 *
1320 * Return: true on success, else false setting @errp with error.
1321 */
1322 bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1323 Object *owner,
1324 const char *name,
1325 uint64_t size,
1326 uint32_t ram_flags,
1327 Error **errp);
1328
1329 /**
1330 * memory_region_init_resizeable_ram: Initialize memory region with resizable
1331 * RAM. Accesses into the region will
1332 * modify memory directly. Only an initial
1333 * portion of this RAM is actually used.
1334 * Changing the size while migrating
1335 * can result in the migration being
1336 * canceled.
1337 *
1338 * @mr: the #MemoryRegion to be initialized.
1339 * @owner: the object that tracks the region's reference count
1340 * @name: Region name, becomes part of RAMBlock name used in migration stream
1341 * must be unique within any device
1342 * @size: used size of the region.
1343 * @max_size: max size of the region.
1344 * @resized: callback to notify owner about used size change.
1345 * @errp: pointer to Error*, to store an error if it happens.
1346 *
1347 * Note that this function does not do anything to cause the data in the
1348 * RAM memory region to be migrated; that is the responsibility of the caller.
1349 *
1350 * Return: true on success, else false setting @errp with error.
1351 */
1352 bool memory_region_init_resizeable_ram(MemoryRegion *mr,
1353 Object *owner,
1354 const char *name,
1355 uint64_t size,
1356 uint64_t max_size,
1357 void (*resized)(const char*,
1358 uint64_t length,
1359 void *host),
1360 Error **errp);
1361 #ifdef CONFIG_POSIX
1362
1363 /**
1364 * memory_region_init_ram_from_file: Initialize RAM memory region with a
1365 * mmap-ed backend.
1366 *
1367 * @mr: the #MemoryRegion to be initialized.
1368 * @owner: the object that tracks the region's reference count
1369 * @name: Region name, becomes part of RAMBlock name used in migration stream
1370 * must be unique within any device
1371 * @size: size of the region.
1372 * @align: alignment of the region base address; if 0, the default alignment
1373 * (getpagesize()) will be used.
1374 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1375 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
1376 * RAM_READONLY_FD, RAM_GUEST_MEMFD
1377 * @path: the path in which to allocate the RAM.
1378 * @offset: offset within the file referenced by path
1379 * @errp: pointer to Error*, to store an error if it happens.
1380 *
1381 * Note that this function does not do anything to cause the data in the
1382 * RAM memory region to be migrated; that is the responsibility of the caller.
1383 *
1384 * Return: true on success, else false setting @errp with error.
1385 */
1386 bool memory_region_init_ram_from_file(MemoryRegion *mr,
1387 Object *owner,
1388 const char *name,
1389 uint64_t size,
1390 uint64_t align,
1391 uint32_t ram_flags,
1392 const char *path,
1393 ram_addr_t offset,
1394 Error **errp);
1395
1396 /**
1397 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
1398 * mmap-ed backend.
1399 *
1400 * @mr: the #MemoryRegion to be initialized.
1401 * @owner: the object that tracks the region's reference count
1402 * @name: the name of the region.
1403 * @size: size of the region.
1404 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1405 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
1406 * RAM_READONLY_FD, RAM_GUEST_MEMFD
1407 * @fd: the fd to mmap.
1408 * @offset: offset within the file referenced by fd
1409 * @errp: pointer to Error*, to store an error if it happens.
1410 *
1411 * Note that this function does not do anything to cause the data in the
1412 * RAM memory region to be migrated; that is the responsibility of the caller.
1413 *
1414 * Return: true on success, else false setting @errp with error.
1415 */
1416 bool memory_region_init_ram_from_fd(MemoryRegion *mr,
1417 Object *owner,
1418 const char *name,
1419 uint64_t size,
1420 uint32_t ram_flags,
1421 int fd,
1422 ram_addr_t offset,
1423 Error **errp);
1424 #endif
1425
1426 /**
1427 * memory_region_init_ram_ptr: Initialize RAM memory region from a
1428 * user-provided pointer. Accesses into the
1429 * region will modify memory directly.
1430 *
1431 * @mr: the #MemoryRegion to be initialized.
1432 * @owner: the object that tracks the region's reference count
1433 * @name: Region name, becomes part of RAMBlock name used in migration stream
1434 * must be unique within any device
1435 * @size: size of the region.
1436 * @ptr: memory to be mapped; must contain at least @size bytes.
1437 *
1438 * Note that this function does not do anything to cause the data in the
1439 * RAM memory region to be migrated; that is the responsibility of the caller.
1440 */
1441 void memory_region_init_ram_ptr(MemoryRegion *mr,
1442 Object *owner,
1443 const char *name,
1444 uint64_t size,
1445 void *ptr);
1446
1447 /**
1448 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
1449 * a user-provided pointer.
1450 *
1451 * A RAM device represents a mapping to a physical device, such as to a PCI
1452 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
1453 * into the VM address space and access to the region will modify memory
1454 * directly. However, the memory region should not be included in a memory
1455 * dump (device may not be enabled/mapped at the time of the dump), and
1456 * operations incompatible with manipulating MMIO should be avoided. Replaces
1457 * skip_dump flag.
1458 *
1459 * @mr: the #MemoryRegion to be initialized.
1460 * @owner: the object that tracks the region's reference count
1461 * @name: the name of the region.
1462 * @size: size of the region.
1463 * @ptr: memory to be mapped; must contain at least @size bytes.
1464 *
1465 * Note that this function does not do anything to cause the data in the
1466 * RAM memory region to be migrated; that is the responsibility of the caller.
1467 * (For RAM device memory regions, migrating the contents rarely makes sense.)
1468 */
1469 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1470 Object *owner,
1471 const char *name,
1472 uint64_t size,
1473 void *ptr);
1474
1475 /**
1476 * memory_region_init_alias: Initialize a memory region that aliases all or a
1477 * part of another memory region.
1478 *
1479 * @mr: the #MemoryRegion to be initialized.
1480 * @owner: the object that tracks the region's reference count
1481 * @name: used for debugging; not visible to the user or ABI
1482 * @orig: the region to be referenced; @mr will be equivalent to
1483 * @orig between @offset and @offset + @size - 1.
1484 * @offset: start of the section in @orig to be referenced.
1485 * @size: size of the region.
1486 */
1487 void memory_region_init_alias(MemoryRegion *mr,
1488 Object *owner,
1489 const char *name,
1490 MemoryRegion *orig,
1491 hwaddr offset,
1492 uint64_t size);
1493
1494 /**
1495 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
1496 *
1497 * This has the same effect as calling memory_region_init_ram_nomigrate()
1498 * and then marking the resulting region read-only with
1499 * memory_region_set_readonly().
1500 *
1501 * Note that this function does not do anything to cause the data in the
1502 * RAM side of the memory region to be migrated; that is the responsibility
1503 * of the caller.
1504 *
1505 * @mr: the #MemoryRegion to be initialized.
1506 * @owner: the object that tracks the region's reference count
1507 * @name: Region name, becomes part of RAMBlock name used in migration stream
1508 * must be unique within any device
1509 * @size: size of the region.
1510 * @errp: pointer to Error*, to store an error if it happens.
1511 *
1512 * Return: true on success, else false setting @errp with error.
1513 */
1514 bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
1515 Object *owner,
1516 const char *name,
1517 uint64_t size,
1518 Error **errp);
1519
1520 /**
1521 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
1522 * Writes are handled via callbacks.
1523 *
1524 * Note that this function does not do anything to cause the data in the
1525 * RAM side of the memory region to be migrated; that is the responsibility
1526 * of the caller.
1527 *
1528 * @mr: the #MemoryRegion to be initialized.
1529 * @owner: the object that tracks the region's reference count
1530 * @ops: callbacks for write access handling (must not be NULL).
1531 * @opaque: passed to the read and write callbacks of the @ops structure.
1532 * @name: Region name, becomes part of RAMBlock name used in migration stream
1533 * must be unique within any device
1534 * @size: size of the region.
1535 * @errp: pointer to Error*, to store an error if it happens.
1536 *
1537 * Return: true on success, else false setting @errp with error.
1538 */
1539 bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1540 Object *owner,
1541 const MemoryRegionOps *ops,
1542 void *opaque,
1543 const char *name,
1544 uint64_t size,
1545 Error **errp);
1546
1547 /**
1548 * memory_region_init_iommu: Initialize a memory region of a custom type
1549 * that translates addresses
1550 *
1551 * An IOMMU region translates addresses and forwards accesses to a target
1552 * memory region.
1553 *
1554 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
1555 * @_iommu_mr should be a pointer to enough memory for an instance of
1556 * that subclass, @instance_size is the size of that subclass, and
1557 * @mrtypename is its name. This function will initialize @_iommu_mr as an
1558 * instance of the subclass, and its methods will then be called to handle
1559 * accesses to the memory region. See the documentation of
1560 * #IOMMUMemoryRegionClass for further details.
1561 *
1562 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
1563 * @instance_size: the IOMMUMemoryRegion subclass instance size
1564 * @mrtypename: the type name of the #IOMMUMemoryRegion
1565 * @owner: the object that tracks the region's reference count
1566 * @name: used for debugging; not visible to the user or ABI
1567 * @size: size of the region.
1568 */
1569 void memory_region_init_iommu(void *_iommu_mr,
1570 size_t instance_size,
1571 const char *mrtypename,
1572 Object *owner,
1573 const char *name,
1574 uint64_t size);
1575
1576 /**
1577 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
1578 * region will modify memory directly.
1579 *
1580 * @mr: the #MemoryRegion to be initialized
1581 * @owner: the object that tracks the region's reference count (must be
1582 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
1583 * @name: name of the memory region
1584 * @size: size of the region in bytes
1585 * @errp: pointer to Error*, to store an error if it happens.
1586 *
1587 * This function allocates RAM for a board model or device, and
1588 * arranges for it to be migrated (by calling vmstate_register_ram()
1589 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1590 * @owner is NULL).
1591 *
1592 * TODO: Currently we restrict @owner to being either NULL (for
1593 * global RAM regions with no owner) or devices, so that we can
1594 * give the RAM block a unique name for migration purposes.
1595 * We should lift this restriction and allow arbitrary Objects.
1596 * If you pass a non-NULL non-device @owner then we will assert.
1597 *
1598 * Return: true on success, else false setting @errp with error.
1599 */
1600 bool memory_region_init_ram(MemoryRegion *mr,
1601 Object *owner,
1602 const char *name,
1603 uint64_t size,
1604 Error **errp);
1605
1606 bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
1607 Object *owner,
1608 const char *name,
1609 uint64_t size,
1610 Error **errp);
1611
1612 /**
1613 * memory_region_init_rom: Initialize a ROM memory region.
1614 *
1615 * This has the same effect as calling memory_region_init_ram()
1616 * and then marking the resulting region read-only with
1617 * memory_region_set_readonly(). This includes arranging for the
1618 * contents to be migrated.
1619 *
1620 * TODO: Currently we restrict @owner to being either NULL (for
1621 * global RAM regions with no owner) or devices, so that we can
1622 * give the RAM block a unique name for migration purposes.
1623 * We should lift this restriction and allow arbitrary Objects.
1624 * If you pass a non-NULL non-device @owner then we will assert.
1625 *
1626 * @mr: the #MemoryRegion to be initialized.
1627 * @owner: the object that tracks the region's reference count
1628 * @name: Region name, becomes part of RAMBlock name used in migration stream
1629 * must be unique within any device
1630 * @size: size of the region.
1631 * @errp: pointer to Error*, to store an error if it happens.
1632 *
1633 * Return: true on success, else false setting @errp with error.
1634 */
1635 bool memory_region_init_rom(MemoryRegion *mr,
1636 Object *owner,
1637 const char *name,
1638 uint64_t size,
1639 Error **errp);
1640
1641 /**
1642 * memory_region_init_rom_device: Initialize a ROM memory region.
1643 * Writes are handled via callbacks.
1644 *
1645 * This function initializes a memory region backed by RAM for reads
1646 * and callbacks for writes, and arranges for the RAM backing to
1647 * be migrated (by calling vmstate_register_ram()
1648 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1649 * @owner is NULL).
1650 *
1651 * TODO: Currently we restrict @owner to being either NULL (for
1652 * global RAM regions with no owner) or devices, so that we can
1653 * give the RAM block a unique name for migration purposes.
1654 * We should lift this restriction and allow arbitrary Objects.
1655 * If you pass a non-NULL non-device @owner then we will assert.
1656 *
1657 * @mr: the #MemoryRegion to be initialized.
1658 * @owner: the object that tracks the region's reference count
1659 * @ops: callbacks for write access handling (must not be NULL).
1660 * @opaque: passed to the read and write callbacks of the @ops structure.
1661 * @name: Region name, becomes part of RAMBlock name used in migration stream
1662 * must be unique within any device
1663 * @size: size of the region.
1664 * @errp: pointer to Error*, to store an error if it happens.
1665 *
1666 * Return: true on success, else false setting @errp with error.
1667 */
1668 bool memory_region_init_rom_device(MemoryRegion *mr,
1669 Object *owner,
1670 const MemoryRegionOps *ops,
1671 void *opaque,
1672 const char *name,
1673 uint64_t size,
1674 Error **errp);
1675
1676
1677 /**
1678 * memory_region_owner: get a memory region's owner.
1679 *
1680 * @mr: the memory region being queried.
1681 */
1682 Object *memory_region_owner(MemoryRegion *mr);
1683
1684 /**
1685 * memory_region_size: get a memory region's size.
1686 *
1687 * @mr: the memory region being queried.
1688 */
1689 uint64_t memory_region_size(MemoryRegion *mr);
1690
1691 /**
1692 * memory_region_is_ram: check whether a memory region is random access
1693 *
1694 * Returns %true if a memory region is random access.
1695 *
1696 * @mr: the memory region being queried
1697 */
memory_region_is_ram(MemoryRegion * mr)1698 static inline bool memory_region_is_ram(MemoryRegion *mr)
1699 {
1700 return mr->ram;
1701 }
1702
1703 /**
1704 * memory_region_is_ram_device: check whether a memory region is a ram device
1705 *
1706 * Returns %true if a memory region is a device backed ram region
1707 *
1708 * @mr: the memory region being queried
1709 */
1710 bool memory_region_is_ram_device(MemoryRegion *mr);
1711
1712 /**
1713 * memory_region_is_romd: check whether a memory region is in ROMD mode
1714 *
1715 * Returns %true if a memory region is a ROM device and currently set to allow
1716 * direct reads.
1717 *
1718 * @mr: the memory region being queried
1719 */
memory_region_is_romd(MemoryRegion * mr)1720 static inline bool memory_region_is_romd(MemoryRegion *mr)
1721 {
1722 return mr->rom_device && mr->romd_mode;
1723 }
1724
1725 /**
1726 * memory_region_is_protected: check whether a memory region is protected
1727 *
1728 * Returns %true if a memory region is protected RAM and cannot be accessed
1729 * via standard mechanisms, e.g. DMA.
1730 *
1731 * @mr: the memory region being queried
1732 */
1733 bool memory_region_is_protected(MemoryRegion *mr);
1734
1735 /**
1736 * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
1737 * associated
1738 *
1739 * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
1740 *
1741 * @mr: the memory region being queried
1742 */
1743 bool memory_region_has_guest_memfd(MemoryRegion *mr);
1744
1745 /**
1746 * memory_region_get_iommu: check whether a memory region is an iommu
1747 *
1748 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1749 * otherwise NULL.
1750 *
1751 * @mr: the memory region being queried
1752 */
memory_region_get_iommu(MemoryRegion * mr)1753 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1754 {
1755 if (mr->alias) {
1756 return memory_region_get_iommu(mr->alias);
1757 }
1758 if (mr->is_iommu) {
1759 return (IOMMUMemoryRegion *) mr;
1760 }
1761 return NULL;
1762 }
1763
1764 /**
1765 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1766 * if an iommu or NULL if not
1767 *
1768 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1769 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1770 *
1771 * @iommu_mr: the memory region being queried
1772 */
memory_region_get_iommu_class_nocheck(IOMMUMemoryRegion * iommu_mr)1773 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1774 IOMMUMemoryRegion *iommu_mr)
1775 {
1776 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1777 }
1778
1779 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1780
1781 /**
1782 * memory_region_iommu_get_min_page_size: get minimum supported page size
1783 * for an iommu
1784 *
1785 * Returns minimum supported page size for an iommu.
1786 *
1787 * @iommu_mr: the memory region being queried
1788 */
1789 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1790
1791 /**
1792 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1793 *
1794 * Note: for any IOMMU implementation, an in-place mapping change
1795 * should be notified with an UNMAP followed by a MAP.
1796 *
1797 * @iommu_mr: the memory region that was changed
1798 * @iommu_idx: the IOMMU index for the translation table which has changed
1799 * @event: TLB event with the new entry in the IOMMU translation table.
1800 * The entry replaces all old entries for the same virtual I/O address
1801 * range.
1802 */
1803 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1804 int iommu_idx,
1805 const IOMMUTLBEvent event);
1806
1807 /**
1808 * memory_region_notify_iommu_one: notify a change in an IOMMU translation
1809 * entry to a single notifier
1810 *
1811 * This works just like memory_region_notify_iommu(), but it only
1812 * notifies a specific notifier, not all of them.
1813 *
1814 * @notifier: the notifier to be notified
1815 * @event: TLB event with the new entry in the IOMMU translation table.
1816 * The entry replaces all old entries for the same virtual I/O address
1817 * range.
1818 */
1819 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
1820 const IOMMUTLBEvent *event);
1821
1822 /**
1823 * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
1824 * translation that covers the
1825 * range of a notifier
1826 *
1827 * @notifier: the notifier to be notified
1828 */
1829 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
1830
1831
1832 /**
1833 * memory_region_register_iommu_notifier: register a notifier for changes to
1834 * IOMMU translation entries.
1835 *
1836 * Returns 0 on success, or a negative errno otherwise. In particular,
1837 * -EINVAL indicates that at least one of the attributes of the notifier
1838 * is not supported (flag/range) by the IOMMU memory region. In case of error
1839 * the error object must be created.
1840 *
1841 * @mr: the memory region to observe
1842 * @n: the IOMMUNotifier to be added; the notify callback receives a
1843 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1844 * ceases to be valid on exit from the notifier.
1845 * @errp: pointer to Error*, to store an error if it happens.
1846 */
1847 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1848 IOMMUNotifier *n, Error **errp);
1849
1850 /**
1851 * memory_region_iommu_replay: replay existing IOMMU translations to
1852 * a notifier with the minimum page granularity returned by
1853 * mr->iommu_ops->get_page_size().
1854 *
1855 * Note: this is not related to record-and-replay functionality.
1856 *
1857 * @iommu_mr: the memory region to observe
1858 * @n: the notifier to which to replay iommu mappings
1859 */
1860 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1861
1862 /**
1863 * memory_region_unregister_iommu_notifier: unregister a notifier for
1864 * changes to IOMMU translation entries.
1865 *
1866 * @mr: the memory region which was observed and for which notify_stopped()
1867 * needs to be called
1868 * @n: the notifier to be removed.
1869 */
1870 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1871 IOMMUNotifier *n);
1872
1873 /**
1874 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1875 * defined on the IOMMU.
1876 *
1877 * Returns 0 on success, or a negative errno otherwise. In particular,
1878 * -EINVAL indicates that the IOMMU does not support the requested
1879 * attribute.
1880 *
1881 * @iommu_mr: the memory region
1882 * @attr: the requested attribute
1883 * @data: a pointer to the requested attribute data
1884 */
1885 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1886 enum IOMMUMemoryRegionAttr attr,
1887 void *data);
1888
1889 /**
1890 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1891 * use for translations with the given memory transaction attributes.
1892 *
1893 * @iommu_mr: the memory region
1894 * @attrs: the memory transaction attributes
1895 */
1896 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1897 MemTxAttrs attrs);
1898
1899 /**
1900 * memory_region_iommu_num_indexes: return the total number of IOMMU
1901 * indexes that this IOMMU supports.
1902 *
1903 * @iommu_mr: the memory region
1904 */
1905 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1906
1907 /**
1908 * memory_region_name: get a memory region's name
1909 *
1910 * Returns the string that was used to initialize the memory region.
1911 *
1912 * @mr: the memory region being queried
1913 */
1914 const char *memory_region_name(const MemoryRegion *mr);
1915
1916 /**
1917 * memory_region_is_logging: return whether a memory region is logging writes
1918 *
1919 * Returns %true if the memory region is logging writes for the given client
1920 *
1921 * @mr: the memory region being queried
1922 * @client: the client being queried
1923 */
1924 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1925
1926 /**
1927 * memory_region_get_dirty_log_mask: return the clients for which a
1928 * memory region is logging writes.
1929 *
1930 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1931 * are the bit indices.
1932 *
1933 * @mr: the memory region being queried
1934 */
1935 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1936
1937 /**
1938 * memory_region_is_rom: check whether a memory region is ROM
1939 *
1940 * Returns %true if a memory region is read-only memory.
1941 *
1942 * @mr: the memory region being queried
1943 */
memory_region_is_rom(MemoryRegion * mr)1944 static inline bool memory_region_is_rom(MemoryRegion *mr)
1945 {
1946 return mr->ram && mr->readonly;
1947 }
1948
1949 /**
1950 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1951 *
1952 * Returns %true is a memory region is non-volatile memory.
1953 *
1954 * @mr: the memory region being queried
1955 */
memory_region_is_nonvolatile(MemoryRegion * mr)1956 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1957 {
1958 return mr->nonvolatile;
1959 }
1960
1961 /**
1962 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1963 *
1964 * Returns a file descriptor backing a file-based RAM memory region,
1965 * or -1 if the region is not a file-based RAM memory region.
1966 *
1967 * @mr: the RAM or alias memory region being queried.
1968 */
1969 int memory_region_get_fd(MemoryRegion *mr);
1970
1971 /**
1972 * memory_region_from_host: Convert a pointer into a RAM memory region
1973 * and an offset within it.
1974 *
1975 * Given a host pointer inside a RAM memory region (created with
1976 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1977 * the MemoryRegion and the offset within it.
1978 *
1979 * Use with care; by the time this function returns, the returned pointer is
1980 * not protected by RCU anymore. If the caller is not within an RCU critical
1981 * section and does not hold the BQL, it must have other means of
1982 * protecting the pointer, such as a reference to the region that includes
1983 * the incoming ram_addr_t.
1984 *
1985 * @ptr: the host pointer to be converted
1986 * @offset: the offset within memory region
1987 */
1988 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1989
1990 /**
1991 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1992 *
1993 * Returns a host pointer to a RAM memory region (created with
1994 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1995 *
1996 * Use with care; by the time this function returns, the returned pointer is
1997 * not protected by RCU anymore. If the caller is not within an RCU critical
1998 * section and does not hold the BQL, it must have other means of
1999 * protecting the pointer, such as a reference to the region that includes
2000 * the incoming ram_addr_t.
2001 *
2002 * @mr: the memory region being queried.
2003 */
2004 void *memory_region_get_ram_ptr(MemoryRegion *mr);
2005
2006 /* memory_region_ram_resize: Resize a RAM region.
2007 *
2008 * Resizing RAM while migrating can result in the migration being canceled.
2009 * Care has to be taken if the guest might have already detected the memory.
2010 *
2011 * @mr: a memory region created with @memory_region_init_resizeable_ram.
2012 * @newsize: the new size the region
2013 * @errp: pointer to Error*, to store an error if it happens.
2014 */
2015 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
2016 Error **errp);
2017
2018 /**
2019 * memory_region_msync: Synchronize selected address range of
2020 * a memory mapped region
2021 *
2022 * @mr: the memory region to be msync
2023 * @addr: the initial address of the range to be sync
2024 * @size: the size of the range to be sync
2025 */
2026 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
2027
2028 /**
2029 * memory_region_writeback: Trigger cache writeback for
2030 * selected address range
2031 *
2032 * @mr: the memory region to be updated
2033 * @addr: the initial address of the range to be written back
2034 * @size: the size of the range to be written back
2035 */
2036 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
2037
2038 /**
2039 * memory_region_set_log: Turn dirty logging on or off for a region.
2040 *
2041 * Turns dirty logging on or off for a specified client (display, migration).
2042 * Only meaningful for RAM regions.
2043 *
2044 * @mr: the memory region being updated.
2045 * @log: whether dirty logging is to be enabled or disabled.
2046 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
2047 */
2048 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
2049
2050 /**
2051 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
2052 *
2053 * Marks a range of bytes as dirty, after it has been dirtied outside
2054 * guest code.
2055 *
2056 * @mr: the memory region being dirtied.
2057 * @addr: the address (relative to the start of the region) being dirtied.
2058 * @size: size of the range being dirtied.
2059 */
2060 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2061 hwaddr size);
2062
2063 /**
2064 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
2065 *
2066 * This function is called when the caller wants to clear the remote
2067 * dirty bitmap of a memory range within the memory region. This can
2068 * be used by e.g. KVM to manually clear dirty log when
2069 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
2070 * kernel.
2071 *
2072 * @mr: the memory region to clear the dirty log upon
2073 * @start: start address offset within the memory region
2074 * @len: length of the memory region to clear dirty bitmap
2075 */
2076 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2077 hwaddr len);
2078
2079 /**
2080 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
2081 * bitmap and clear it.
2082 *
2083 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
2084 * returns the snapshot. The snapshot can then be used to query dirty
2085 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
2086 * querying the same page multiple times, which is especially useful for
2087 * display updates where the scanlines often are not page aligned.
2088 *
2089 * The dirty bitmap region which gets copied into the snapshot (and
2090 * cleared afterwards) can be larger than requested. The boundaries
2091 * are rounded up/down so complete bitmap longs (covering 64 pages on
2092 * 64bit hosts) can be copied over into the bitmap snapshot. Which
2093 * isn't a problem for display updates as the extra pages are outside
2094 * the visible area, and in case the visible area changes a full
2095 * display redraw is due anyway. Should other use cases for this
2096 * function emerge we might have to revisit this implementation
2097 * detail.
2098 *
2099 * Use g_free to release DirtyBitmapSnapshot.
2100 *
2101 * @mr: the memory region being queried.
2102 * @addr: the address (relative to the start of the region) being queried.
2103 * @size: the size of the range being queried.
2104 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
2105 */
2106 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2107 hwaddr addr,
2108 hwaddr size,
2109 unsigned client);
2110
2111 /**
2112 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
2113 * in the specified dirty bitmap snapshot.
2114 *
2115 * @mr: the memory region being queried.
2116 * @snap: the dirty bitmap snapshot
2117 * @addr: the address (relative to the start of the region) being queried.
2118 * @size: the size of the range being queried.
2119 */
2120 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
2121 DirtyBitmapSnapshot *snap,
2122 hwaddr addr, hwaddr size);
2123
2124 /**
2125 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
2126 * client.
2127 *
2128 * Marks a range of pages as no longer dirty.
2129 *
2130 * @mr: the region being updated.
2131 * @addr: the start of the subrange being cleaned.
2132 * @size: the size of the subrange being cleaned.
2133 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
2134 * %DIRTY_MEMORY_VGA.
2135 */
2136 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2137 hwaddr size, unsigned client);
2138
2139 /**
2140 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
2141 * TBs (for self-modifying code).
2142 *
2143 * The MemoryRegionOps->write() callback of a ROM device must use this function
2144 * to mark byte ranges that have been modified internally, such as by directly
2145 * accessing the memory returned by memory_region_get_ram_ptr().
2146 *
2147 * This function marks the range dirty and invalidates TBs so that TCG can
2148 * detect self-modifying code.
2149 *
2150 * @mr: the region being flushed.
2151 * @addr: the start, relative to the start of the region, of the range being
2152 * flushed.
2153 * @size: the size, in bytes, of the range being flushed.
2154 */
2155 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
2156
2157 /**
2158 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
2159 *
2160 * Allows a memory region to be marked as read-only (turning it into a ROM).
2161 * only useful on RAM regions.
2162 *
2163 * @mr: the region being updated.
2164 * @readonly: whether the region is to be ROM or RAM.
2165 */
2166 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
2167
2168 /**
2169 * memory_region_set_nonvolatile: Turn a memory region non-volatile
2170 *
2171 * Allows a memory region to be marked as non-volatile.
2172 * only useful on RAM regions.
2173 *
2174 * @mr: the region being updated.
2175 * @nonvolatile: whether the region is to be non-volatile.
2176 */
2177 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
2178
2179 /**
2180 * memory_region_rom_device_set_romd: enable/disable ROMD mode
2181 *
2182 * Allows a ROM device (initialized with memory_region_init_rom_device() to
2183 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
2184 * device is mapped to guest memory and satisfies read access directly.
2185 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
2186 * Writes are always handled by the #MemoryRegion.write function.
2187 *
2188 * @mr: the memory region to be updated
2189 * @romd_mode: %true to put the region into ROMD mode
2190 */
2191 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
2192
2193 /**
2194 * memory_region_set_coalescing: Enable memory coalescing for the region.
2195 *
2196 * Enabled writes to a region to be queued for later processing. MMIO ->write
2197 * callbacks may be delayed until a non-coalesced MMIO is issued.
2198 * Only useful for IO regions. Roughly similar to write-combining hardware.
2199 *
2200 * @mr: the memory region to be write coalesced
2201 */
2202 void memory_region_set_coalescing(MemoryRegion *mr);
2203
2204 /**
2205 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
2206 * a region.
2207 *
2208 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
2209 * Multiple calls can be issued coalesced disjoint ranges.
2210 *
2211 * @mr: the memory region to be updated.
2212 * @offset: the start of the range within the region to be coalesced.
2213 * @size: the size of the subrange to be coalesced.
2214 */
2215 void memory_region_add_coalescing(MemoryRegion *mr,
2216 hwaddr offset,
2217 uint64_t size);
2218
2219 /**
2220 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
2221 *
2222 * Disables any coalescing caused by memory_region_set_coalescing() or
2223 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
2224 * hardware.
2225 *
2226 * @mr: the memory region to be updated.
2227 */
2228 void memory_region_clear_coalescing(MemoryRegion *mr);
2229
2230 /**
2231 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
2232 * accesses.
2233 *
2234 * Ensure that pending coalesced MMIO request are flushed before the memory
2235 * region is accessed. This property is automatically enabled for all regions
2236 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
2237 *
2238 * @mr: the memory region to be updated.
2239 */
2240 void memory_region_set_flush_coalesced(MemoryRegion *mr);
2241
2242 /**
2243 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
2244 * accesses.
2245 *
2246 * Clear the automatic coalesced MMIO flushing enabled via
2247 * memory_region_set_flush_coalesced. Note that this service has no effect on
2248 * memory regions that have MMIO coalescing enabled for themselves. For them,
2249 * automatic flushing will stop once coalescing is disabled.
2250 *
2251 * @mr: the memory region to be updated.
2252 */
2253 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
2254
2255 /**
2256 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
2257 * is written to a location.
2258 *
2259 * Marks a word in an IO region (initialized with memory_region_init_io())
2260 * as a trigger for an eventfd event. The I/O callback will not be called.
2261 * The caller must be prepared to handle failure (that is, take the required
2262 * action if the callback _is_ called).
2263 *
2264 * @mr: the memory region being updated.
2265 * @addr: the address within @mr that is to be monitored
2266 * @size: the size of the access to trigger the eventfd
2267 * @match_data: whether to match against @data, instead of just @addr
2268 * @data: the data to match against the guest write
2269 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2270 **/
2271 void memory_region_add_eventfd(MemoryRegion *mr,
2272 hwaddr addr,
2273 unsigned size,
2274 bool match_data,
2275 uint64_t data,
2276 EventNotifier *e);
2277
2278 /**
2279 * memory_region_del_eventfd: Cancel an eventfd.
2280 *
2281 * Cancels an eventfd trigger requested by a previous
2282 * memory_region_add_eventfd() call.
2283 *
2284 * @mr: the memory region being updated.
2285 * @addr: the address within @mr that is to be monitored
2286 * @size: the size of the access to trigger the eventfd
2287 * @match_data: whether to match against @data, instead of just @addr
2288 * @data: the data to match against the guest write
2289 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2290 */
2291 void memory_region_del_eventfd(MemoryRegion *mr,
2292 hwaddr addr,
2293 unsigned size,
2294 bool match_data,
2295 uint64_t data,
2296 EventNotifier *e);
2297
2298 /**
2299 * memory_region_add_subregion: Add a subregion to a container.
2300 *
2301 * Adds a subregion at @offset. The subregion may not overlap with other
2302 * subregions (except for those explicitly marked as overlapping). A region
2303 * may only be added once as a subregion (unless removed with
2304 * memory_region_del_subregion()); use memory_region_init_alias() if you
2305 * want a region to be a subregion in multiple locations.
2306 *
2307 * @mr: the region to contain the new subregion; must be a container
2308 * initialized with memory_region_init().
2309 * @offset: the offset relative to @mr where @subregion is added.
2310 * @subregion: the subregion to be added.
2311 */
2312 void memory_region_add_subregion(MemoryRegion *mr,
2313 hwaddr offset,
2314 MemoryRegion *subregion);
2315 /**
2316 * memory_region_add_subregion_overlap: Add a subregion to a container
2317 * with overlap.
2318 *
2319 * Adds a subregion at @offset. The subregion may overlap with other
2320 * subregions. Conflicts are resolved by having a higher @priority hide a
2321 * lower @priority. Subregions without priority are taken as @priority 0.
2322 * A region may only be added once as a subregion (unless removed with
2323 * memory_region_del_subregion()); use memory_region_init_alias() if you
2324 * want a region to be a subregion in multiple locations.
2325 *
2326 * @mr: the region to contain the new subregion; must be a container
2327 * initialized with memory_region_init().
2328 * @offset: the offset relative to @mr where @subregion is added.
2329 * @subregion: the subregion to be added.
2330 * @priority: used for resolving overlaps; highest priority wins.
2331 */
2332 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2333 hwaddr offset,
2334 MemoryRegion *subregion,
2335 int priority);
2336
2337 /**
2338 * memory_region_get_ram_addr: Get the ram address associated with a memory
2339 * region
2340 *
2341 * @mr: the region to be queried
2342 */
2343 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
2344
2345 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
2346 /**
2347 * memory_region_del_subregion: Remove a subregion.
2348 *
2349 * Removes a subregion from its container.
2350 *
2351 * @mr: the container to be updated.
2352 * @subregion: the region being removed; must be a current subregion of @mr.
2353 */
2354 void memory_region_del_subregion(MemoryRegion *mr,
2355 MemoryRegion *subregion);
2356
2357 /*
2358 * memory_region_set_enabled: dynamically enable or disable a region
2359 *
2360 * Enables or disables a memory region. A disabled memory region
2361 * ignores all accesses to itself and its subregions. It does not
2362 * obscure sibling subregions with lower priority - it simply behaves as
2363 * if it was removed from the hierarchy.
2364 *
2365 * Regions default to being enabled.
2366 *
2367 * @mr: the region to be updated
2368 * @enabled: whether to enable or disable the region
2369 */
2370 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
2371
2372 /*
2373 * memory_region_set_address: dynamically update the address of a region
2374 *
2375 * Dynamically updates the address of a region, relative to its container.
2376 * May be used on regions are currently part of a memory hierarchy.
2377 *
2378 * @mr: the region to be updated
2379 * @addr: new address, relative to container region
2380 */
2381 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
2382
2383 /*
2384 * memory_region_set_size: dynamically update the size of a region.
2385 *
2386 * Dynamically updates the size of a region.
2387 *
2388 * @mr: the region to be updated
2389 * @size: used size of the region.
2390 */
2391 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
2392
2393 /*
2394 * memory_region_set_alias_offset: dynamically update a memory alias's offset
2395 *
2396 * Dynamically updates the offset into the target region that an alias points
2397 * to, as if the fourth argument to memory_region_init_alias() has changed.
2398 *
2399 * @mr: the #MemoryRegion to be updated; should be an alias.
2400 * @offset: the new offset into the target memory region
2401 */
2402 void memory_region_set_alias_offset(MemoryRegion *mr,
2403 hwaddr offset);
2404
2405 /*
2406 * memory_region_set_unmergeable: Set a memory region unmergeable
2407 *
2408 * Mark a memory region unmergeable, resulting in the memory region (or
2409 * everything contained in a memory region container) not getting merged when
2410 * simplifying the address space and notifying memory listeners. Consequently,
2411 * memory listeners will never get notified about ranges that are larger than
2412 * the original memory regions.
2413 *
2414 * This is primarily useful when multiple aliases to a RAM memory region are
2415 * mapped into a memory region container, and updates (e.g., enable/disable or
2416 * map/unmap) of individual memory region aliases are not supposed to affect
2417 * other memory regions in the same container.
2418 *
2419 * @mr: the #MemoryRegion to be updated
2420 * @unmergeable: whether to mark the #MemoryRegion unmergeable
2421 */
2422 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
2423
2424 /**
2425 * memory_region_present: checks if an address relative to a @container
2426 * translates into #MemoryRegion within @container
2427 *
2428 * Answer whether a #MemoryRegion within @container covers the address
2429 * @addr.
2430 *
2431 * @container: a #MemoryRegion within which @addr is a relative address
2432 * @addr: the area within @container to be searched
2433 */
2434 bool memory_region_present(MemoryRegion *container, hwaddr addr);
2435
2436 /**
2437 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
2438 * into another memory region, which does not necessarily imply that it is
2439 * mapped into an address space.
2440 *
2441 * @mr: a #MemoryRegion which should be checked if it's mapped
2442 */
2443 bool memory_region_is_mapped(MemoryRegion *mr);
2444
2445 /**
2446 * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
2447 * #MemoryRegion
2448 *
2449 * The #RamDiscardManager cannot change while a memory region is mapped.
2450 *
2451 * @mr: the #MemoryRegion
2452 */
2453 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
2454
2455 /**
2456 * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
2457 * #RamDiscardManager assigned
2458 *
2459 * @mr: the #MemoryRegion
2460 */
memory_region_has_ram_discard_manager(MemoryRegion * mr)2461 static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
2462 {
2463 return !!memory_region_get_ram_discard_manager(mr);
2464 }
2465
2466 /**
2467 * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
2468 * #MemoryRegion
2469 *
2470 * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
2471 * that does not cover RAM, or a #MemoryRegion that already has a
2472 * #RamDiscardManager assigned.
2473 *
2474 * @mr: the #MemoryRegion
2475 * @rdm: #RamDiscardManager to set
2476 */
2477 void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2478 RamDiscardManager *rdm);
2479
2480 /**
2481 * memory_region_find: translate an address/size relative to a
2482 * MemoryRegion into a #MemoryRegionSection.
2483 *
2484 * Locates the first #MemoryRegion within @mr that overlaps the range
2485 * given by @addr and @size.
2486 *
2487 * Returns a #MemoryRegionSection that describes a contiguous overlap.
2488 * It will have the following characteristics:
2489 * - @size = 0 iff no overlap was found
2490 * - @mr is non-%NULL iff an overlap was found
2491 *
2492 * Remember that in the return value the @offset_within_region is
2493 * relative to the returned region (in the .@mr field), not to the
2494 * @mr argument.
2495 *
2496 * Similarly, the .@offset_within_address_space is relative to the
2497 * address space that contains both regions, the passed and the
2498 * returned one. However, in the special case where the @mr argument
2499 * has no container (and thus is the root of the address space), the
2500 * following will hold:
2501 * - @offset_within_address_space >= @addr
2502 * - @offset_within_address_space + .@size <= @addr + @size
2503 *
2504 * @mr: a MemoryRegion within which @addr is a relative address
2505 * @addr: start of the area within @as to be searched
2506 * @size: size of the area to be searched
2507 */
2508 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2509 hwaddr addr, uint64_t size);
2510
2511 /**
2512 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2513 *
2514 * Synchronizes the dirty page log for all address spaces.
2515 *
2516 * @last_stage: whether this is the last stage of live migration
2517 */
2518 void memory_global_dirty_log_sync(bool last_stage);
2519
2520 /**
2521 * memory_global_after_dirty_log_sync: synchronize the dirty log for all memory
2522 *
2523 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
2524 * This function must be called after the dirty log bitmap is cleared, and
2525 * before dirty guest memory pages are read. If you are using
2526 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
2527 * care of doing this.
2528 */
2529 void memory_global_after_dirty_log_sync(void);
2530
2531 /**
2532 * memory_region_transaction_begin: Start a transaction.
2533 *
2534 * During a transaction, changes will be accumulated and made visible
2535 * only when the transaction ends (is committed).
2536 */
2537 void memory_region_transaction_begin(void);
2538
2539 /**
2540 * memory_region_transaction_commit: Commit a transaction and make changes
2541 * visible to the guest.
2542 */
2543 void memory_region_transaction_commit(void);
2544
2545 /**
2546 * memory_listener_register: register callbacks to be called when memory
2547 * sections are mapped or unmapped into an address
2548 * space
2549 *
2550 * @listener: an object containing the callbacks to be called
2551 * @filter: if non-%NULL, only regions in this address space will be observed
2552 */
2553 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
2554
2555 /**
2556 * memory_listener_unregister: undo the effect of memory_listener_register()
2557 *
2558 * @listener: an object containing the callbacks to be removed
2559 */
2560 void memory_listener_unregister(MemoryListener *listener);
2561
2562 /**
2563 * memory_global_dirty_log_start: begin dirty logging for all regions
2564 *
2565 * @flags: purpose of starting dirty log, migration or dirty rate
2566 * @errp: pointer to Error*, to store an error if it happens.
2567 *
2568 * Return: true on success, else false setting @errp with error.
2569 */
2570 bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
2571
2572 /**
2573 * memory_global_dirty_log_stop: end dirty logging for all regions
2574 *
2575 * @flags: purpose of stopping dirty log, migration or dirty rate
2576 */
2577 void memory_global_dirty_log_stop(unsigned int flags);
2578
2579 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
2580
2581 bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
2582 unsigned size, bool is_write,
2583 MemTxAttrs attrs);
2584
2585 /**
2586 * memory_region_dispatch_read: perform a read directly to the specified
2587 * MemoryRegion.
2588 *
2589 * @mr: #MemoryRegion to access
2590 * @addr: address within that region
2591 * @pval: pointer to uint64_t which the data is written to
2592 * @op: size, sign, and endianness of the memory operation
2593 * @attrs: memory transaction attributes to use for the access
2594 */
2595 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
2596 hwaddr addr,
2597 uint64_t *pval,
2598 MemOp op,
2599 MemTxAttrs attrs);
2600 /**
2601 * memory_region_dispatch_write: perform a write directly to the specified
2602 * MemoryRegion.
2603 *
2604 * @mr: #MemoryRegion to access
2605 * @addr: address within that region
2606 * @data: data to write
2607 * @op: size, sign, and endianness of the memory operation
2608 * @attrs: memory transaction attributes to use for the access
2609 */
2610 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
2611 hwaddr addr,
2612 uint64_t data,
2613 MemOp op,
2614 MemTxAttrs attrs);
2615
2616 /**
2617 * address_space_init: initializes an address space
2618 *
2619 * @as: an uninitialized #AddressSpace
2620 * @root: a #MemoryRegion that routes addresses for the address space
2621 * @name: an address space name. The name is only used for debugging
2622 * output.
2623 */
2624 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
2625
2626 /**
2627 * address_space_destroy: destroy an address space
2628 *
2629 * Releases all resources associated with an address space. After an address space
2630 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
2631 * as well.
2632 *
2633 * @as: address space to be destroyed
2634 */
2635 void address_space_destroy(AddressSpace *as);
2636
2637 /**
2638 * address_space_remove_listeners: unregister all listeners of an address space
2639 *
2640 * Removes all callbacks previously registered with memory_listener_register()
2641 * for @as.
2642 *
2643 * @as: an initialized #AddressSpace
2644 */
2645 void address_space_remove_listeners(AddressSpace *as);
2646
2647 /**
2648 * address_space_rw: read from or write to an address space.
2649 *
2650 * Return a MemTxResult indicating whether the operation succeeded
2651 * or failed (eg unassigned memory, device rejected the transaction,
2652 * IOMMU fault).
2653 *
2654 * @as: #AddressSpace to be accessed
2655 * @addr: address within that address space
2656 * @attrs: memory transaction attributes
2657 * @buf: buffer with the data transferred
2658 * @len: the number of bytes to read or write
2659 * @is_write: indicates the transfer direction
2660 */
2661 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
2662 MemTxAttrs attrs, void *buf,
2663 hwaddr len, bool is_write);
2664
2665 /**
2666 * address_space_write: write to address space.
2667 *
2668 * Return a MemTxResult indicating whether the operation succeeded
2669 * or failed (eg unassigned memory, device rejected the transaction,
2670 * IOMMU fault).
2671 *
2672 * @as: #AddressSpace to be accessed
2673 * @addr: address within that address space
2674 * @attrs: memory transaction attributes
2675 * @buf: buffer with the data transferred
2676 * @len: the number of bytes to write
2677 */
2678 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2679 MemTxAttrs attrs,
2680 const void *buf, hwaddr len);
2681
2682 /**
2683 * address_space_write_rom: write to address space, including ROM.
2684 *
2685 * This function writes to the specified address space, but will
2686 * write data to both ROM and RAM. This is used for non-guest
2687 * writes like writes from the gdb debug stub or initial loading
2688 * of ROM contents.
2689 *
2690 * Note that portions of the write which attempt to write data to
2691 * a device will be silently ignored -- only real RAM and ROM will
2692 * be written to.
2693 *
2694 * Return a MemTxResult indicating whether the operation succeeded
2695 * or failed (eg unassigned memory, device rejected the transaction,
2696 * IOMMU fault).
2697 *
2698 * @as: #AddressSpace to be accessed
2699 * @addr: address within that address space
2700 * @attrs: memory transaction attributes
2701 * @buf: buffer with the data transferred
2702 * @len: the number of bytes to write
2703 */
2704 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
2705 MemTxAttrs attrs,
2706 const void *buf, hwaddr len);
2707
2708 /* address_space_ld*: load from an address space
2709 * address_space_st*: store to an address space
2710 *
2711 * These functions perform a load or store of the byte, word,
2712 * longword or quad to the specified address within the AddressSpace.
2713 * The _le suffixed functions treat the data as little endian;
2714 * _be indicates big endian; no suffix indicates "same endianness
2715 * as guest CPU".
2716 *
2717 * The "guest CPU endianness" accessors are deprecated for use outside
2718 * target-* code; devices should be CPU-agnostic and use either the LE
2719 * or the BE accessors.
2720 *
2721 * @as #AddressSpace to be accessed
2722 * @addr: address within that address space
2723 * @val: data value, for stores
2724 * @attrs: memory transaction attributes
2725 * @result: location to write the success/failure of the transaction;
2726 * if NULL, this information is discarded
2727 */
2728
2729 #define SUFFIX
2730 #define ARG1 as
2731 #define ARG1_DECL AddressSpace *as
2732 #include "exec/memory_ldst.h.inc"
2733
stl_phys_notdirty(AddressSpace * as,hwaddr addr,uint32_t val)2734 static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
2735 {
2736 address_space_stl_notdirty(as, addr, val,
2737 MEMTXATTRS_UNSPECIFIED, NULL);
2738 }
2739
2740 #define SUFFIX
2741 #define ARG1 as
2742 #define ARG1_DECL AddressSpace *as
2743 #include "exec/memory_ldst_phys.h.inc"
2744
2745 struct MemoryRegionCache {
2746 uint8_t *ptr;
2747 hwaddr xlat;
2748 hwaddr len;
2749 FlatView *fv;
2750 MemoryRegionSection mrs;
2751 bool is_write;
2752 };
2753
2754 /* address_space_ld*_cached: load from a cached #MemoryRegion
2755 * address_space_st*_cached: store into a cached #MemoryRegion
2756 *
2757 * These functions perform a load or store of the byte, word,
2758 * longword or quad to the specified address. The address is
2759 * a physical address in the AddressSpace, but it must lie within
2760 * a #MemoryRegion that was mapped with address_space_cache_init.
2761 *
2762 * The _le suffixed functions treat the data as little endian;
2763 * _be indicates big endian; no suffix indicates "same endianness
2764 * as guest CPU".
2765 *
2766 * The "guest CPU endianness" accessors are deprecated for use outside
2767 * target-* code; devices should be CPU-agnostic and use either the LE
2768 * or the BE accessors.
2769 *
2770 * @cache: previously initialized #MemoryRegionCache to be accessed
2771 * @addr: address within the address space
2772 * @val: data value, for stores
2773 * @attrs: memory transaction attributes
2774 * @result: location to write the success/failure of the transaction;
2775 * if NULL, this information is discarded
2776 */
2777
2778 #define SUFFIX _cached_slow
2779 #define ARG1 cache
2780 #define ARG1_DECL MemoryRegionCache *cache
2781 #include "exec/memory_ldst.h.inc"
2782
2783 /* Inline fast path for direct RAM access. */
address_space_ldub_cached(MemoryRegionCache * cache,hwaddr addr,MemTxAttrs attrs,MemTxResult * result)2784 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
2785 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
2786 {
2787 assert(addr < cache->len);
2788 if (likely(cache->ptr)) {
2789 return ldub_p(cache->ptr + addr);
2790 } else {
2791 return address_space_ldub_cached_slow(cache, addr, attrs, result);
2792 }
2793 }
2794
address_space_stb_cached(MemoryRegionCache * cache,hwaddr addr,uint8_t val,MemTxAttrs attrs,MemTxResult * result)2795 static inline void address_space_stb_cached(MemoryRegionCache *cache,
2796 hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
2797 {
2798 assert(addr < cache->len);
2799 if (likely(cache->ptr)) {
2800 stb_p(cache->ptr + addr, val);
2801 } else {
2802 address_space_stb_cached_slow(cache, addr, val, attrs, result);
2803 }
2804 }
2805
2806 #define ENDIANNESS
2807 #include "exec/memory_ldst_cached.h.inc"
2808
2809 #define ENDIANNESS _le
2810 #include "exec/memory_ldst_cached.h.inc"
2811
2812 #define ENDIANNESS _be
2813 #include "exec/memory_ldst_cached.h.inc"
2814
2815 #define SUFFIX _cached
2816 #define ARG1 cache
2817 #define ARG1_DECL MemoryRegionCache *cache
2818 #include "exec/memory_ldst_phys.h.inc"
2819
2820 /* address_space_cache_init: prepare for repeated access to a physical
2821 * memory region
2822 *
2823 * @cache: #MemoryRegionCache to be filled
2824 * @as: #AddressSpace to be accessed
2825 * @addr: address within that address space
2826 * @len: length of buffer
2827 * @is_write: indicates the transfer direction
2828 *
2829 * Will only work with RAM, and may map a subset of the requested range by
2830 * returning a value that is less than @len. On failure, return a negative
2831 * errno value.
2832 *
2833 * Because it only works with RAM, this function can be used for
2834 * read-modify-write operations. In this case, is_write should be %true.
2835 *
2836 * Note that addresses passed to the address_space_*_cached functions
2837 * are relative to @addr.
2838 */
2839 int64_t address_space_cache_init(MemoryRegionCache *cache,
2840 AddressSpace *as,
2841 hwaddr addr,
2842 hwaddr len,
2843 bool is_write);
2844
2845 /**
2846 * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
2847 *
2848 * @cache: The #MemoryRegionCache to operate on.
2849 *
2850 * Initializes #MemoryRegionCache structure without memory region attached.
2851 * Cache initialized this way can only be safely destroyed, but not used.
2852 */
address_space_cache_init_empty(MemoryRegionCache * cache)2853 static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
2854 {
2855 cache->mrs.mr = NULL;
2856 /* There is no real need to initialize fv, but it makes Coverity happy. */
2857 cache->fv = NULL;
2858 }
2859
2860 /**
2861 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2862 *
2863 * @cache: The #MemoryRegionCache to operate on.
2864 * @addr: The first physical address that was written, relative to the
2865 * address that was passed to @address_space_cache_init.
2866 * @access_len: The number of bytes that were written starting at @addr.
2867 */
2868 void address_space_cache_invalidate(MemoryRegionCache *cache,
2869 hwaddr addr,
2870 hwaddr access_len);
2871
2872 /**
2873 * address_space_cache_destroy: free a #MemoryRegionCache
2874 *
2875 * @cache: The #MemoryRegionCache whose memory should be released.
2876 */
2877 void address_space_cache_destroy(MemoryRegionCache *cache);
2878
2879 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2880 * entry. Should be called from an RCU critical section.
2881 */
2882 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2883 bool is_write, MemTxAttrs attrs);
2884
2885 /* address_space_translate: translate an address range into an address space
2886 * into a MemoryRegion and an address range into that section. Should be
2887 * called from an RCU critical section, to avoid that the last reference
2888 * to the returned region disappears after address_space_translate returns.
2889 *
2890 * @fv: #FlatView to be accessed
2891 * @addr: address within that address space
2892 * @xlat: pointer to address within the returned memory region section's
2893 * #MemoryRegion.
2894 * @len: pointer to length
2895 * @is_write: indicates the transfer direction
2896 * @attrs: memory attributes
2897 */
2898 MemoryRegion *flatview_translate(FlatView *fv,
2899 hwaddr addr, hwaddr *xlat,
2900 hwaddr *len, bool is_write,
2901 MemTxAttrs attrs);
2902
address_space_translate(AddressSpace * as,hwaddr addr,hwaddr * xlat,hwaddr * len,bool is_write,MemTxAttrs attrs)2903 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2904 hwaddr addr, hwaddr *xlat,
2905 hwaddr *len, bool is_write,
2906 MemTxAttrs attrs)
2907 {
2908 return flatview_translate(address_space_to_flatview(as),
2909 addr, xlat, len, is_write, attrs);
2910 }
2911
2912 /* address_space_access_valid: check for validity of accessing an address
2913 * space range
2914 *
2915 * Check whether memory is assigned to the given address space range, and
2916 * access is permitted by any IOMMU regions that are active for the address
2917 * space.
2918 *
2919 * For now, addr and len should be aligned to a page size. This limitation
2920 * will be lifted in the future.
2921 *
2922 * @as: #AddressSpace to be accessed
2923 * @addr: address within that address space
2924 * @len: length of the area to be checked
2925 * @is_write: indicates the transfer direction
2926 * @attrs: memory attributes
2927 */
2928 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2929 bool is_write, MemTxAttrs attrs);
2930
2931 /* address_space_map: map a physical memory region into a host virtual address
2932 *
2933 * May map a subset of the requested range, given by and returned in @plen.
2934 * May return %NULL and set *@plen to zero(0), if resources needed to perform
2935 * the mapping are exhausted.
2936 * Use only for reads OR writes - not for read-modify-write operations.
2937 * Use address_space_register_map_client() to know when retrying the map
2938 * operation is likely to succeed.
2939 *
2940 * @as: #AddressSpace to be accessed
2941 * @addr: address within that address space
2942 * @plen: pointer to length of buffer; updated on return
2943 * @is_write: indicates the transfer direction
2944 * @attrs: memory attributes
2945 */
2946 void *address_space_map(AddressSpace *as, hwaddr addr,
2947 hwaddr *plen, bool is_write, MemTxAttrs attrs);
2948
2949 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2950 *
2951 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
2952 * the amount of memory that was actually read or written by the caller.
2953 *
2954 * @as: #AddressSpace used
2955 * @buffer: host pointer as returned by address_space_map()
2956 * @len: buffer length as returned by address_space_map()
2957 * @access_len: amount of data actually transferred
2958 * @is_write: indicates the transfer direction
2959 */
2960 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2961 bool is_write, hwaddr access_len);
2962
2963 /*
2964 * address_space_register_map_client: Register a callback to invoke when
2965 * resources for address_space_map() are available again.
2966 *
2967 * address_space_map may fail when there are not enough resources available,
2968 * such as when bounce buffer memory would exceed the limit. The callback can
2969 * be used to retry the address_space_map operation. Note that the callback
2970 * gets automatically removed after firing.
2971 *
2972 * @as: #AddressSpace to be accessed
2973 * @bh: callback to invoke when address_space_map() retry is appropriate
2974 */
2975 void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
2976
2977 /*
2978 * address_space_unregister_map_client: Unregister a callback that has
2979 * previously been registered and not fired yet.
2980 *
2981 * @as: #AddressSpace to be accessed
2982 * @bh: callback to unregister
2983 */
2984 void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
2985
2986 /* Internal functions, part of the implementation of address_space_read. */
2987 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2988 MemTxAttrs attrs, void *buf, hwaddr len);
2989 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2990 MemTxAttrs attrs, void *buf,
2991 hwaddr len, hwaddr addr1, hwaddr l,
2992 MemoryRegion *mr);
2993 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2994
2995 /* Internal functions, part of the implementation of address_space_read_cached
2996 * and address_space_write_cached. */
2997 MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
2998 hwaddr addr, void *buf, hwaddr len);
2999 MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
3000 hwaddr addr, const void *buf,
3001 hwaddr len);
3002
3003 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
3004 bool prepare_mmio_access(MemoryRegion *mr);
3005
memory_region_supports_direct_access(MemoryRegion * mr)3006 static inline bool memory_region_supports_direct_access(MemoryRegion *mr)
3007 {
3008 /* ROM DEVICE regions only allow direct access if in ROMD mode. */
3009 if (memory_region_is_romd(mr)) {
3010 return true;
3011 }
3012 if (!memory_region_is_ram(mr)) {
3013 return false;
3014 }
3015 /*
3016 * RAM DEVICE regions can be accessed directly using memcpy, but it might
3017 * be MMIO and access using mempy can be wrong (e.g., using instructions not
3018 * intended for MMIO access). So we treat this as IO.
3019 */
3020 return !memory_region_is_ram_device(mr);
3021 }
3022
memory_access_is_direct(MemoryRegion * mr,bool is_write,MemTxAttrs attrs)3023 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write,
3024 MemTxAttrs attrs)
3025 {
3026 if (!memory_region_supports_direct_access(mr)) {
3027 return false;
3028 }
3029 /* Debug access can write to ROM. */
3030 if (is_write && !attrs.debug) {
3031 return !mr->readonly && !mr->rom_device;
3032 }
3033 return true;
3034 }
3035
3036 /**
3037 * address_space_read: read from an address space.
3038 *
3039 * Return a MemTxResult indicating whether the operation succeeded
3040 * or failed (eg unassigned memory, device rejected the transaction,
3041 * IOMMU fault). Called within RCU critical section.
3042 *
3043 * @as: #AddressSpace to be accessed
3044 * @addr: address within that address space
3045 * @attrs: memory transaction attributes
3046 * @buf: buffer with the data transferred
3047 * @len: length of the data transferred
3048 */
3049 static inline __attribute__((__always_inline__))
address_space_read(AddressSpace * as,hwaddr addr,MemTxAttrs attrs,void * buf,hwaddr len)3050 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
3051 MemTxAttrs attrs, void *buf,
3052 hwaddr len)
3053 {
3054 MemTxResult result = MEMTX_OK;
3055 hwaddr l, addr1;
3056 void *ptr;
3057 MemoryRegion *mr;
3058 FlatView *fv;
3059
3060 if (__builtin_constant_p(len)) {
3061 if (len) {
3062 RCU_READ_LOCK_GUARD();
3063 fv = address_space_to_flatview(as);
3064 l = len;
3065 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
3066 if (len == l && memory_access_is_direct(mr, false, attrs)) {
3067 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3068 memcpy(buf, ptr, len);
3069 } else {
3070 result = flatview_read_continue(fv, addr, attrs, buf, len,
3071 addr1, l, mr);
3072 }
3073 }
3074 } else {
3075 result = address_space_read_full(as, addr, attrs, buf, len);
3076 }
3077 return result;
3078 }
3079
3080 /**
3081 * address_space_read_cached: read from a cached RAM region
3082 *
3083 * @cache: Cached region to be addressed
3084 * @addr: address relative to the base of the RAM region
3085 * @buf: buffer with the data transferred
3086 * @len: length of the data transferred
3087 */
3088 static inline MemTxResult
address_space_read_cached(MemoryRegionCache * cache,hwaddr addr,void * buf,hwaddr len)3089 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
3090 void *buf, hwaddr len)
3091 {
3092 assert(addr < cache->len && len <= cache->len - addr);
3093 fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
3094 if (likely(cache->ptr)) {
3095 memcpy(buf, cache->ptr + addr, len);
3096 return MEMTX_OK;
3097 } else {
3098 return address_space_read_cached_slow(cache, addr, buf, len);
3099 }
3100 }
3101
3102 /**
3103 * address_space_write_cached: write to a cached RAM region
3104 *
3105 * @cache: Cached region to be addressed
3106 * @addr: address relative to the base of the RAM region
3107 * @buf: buffer with the data transferred
3108 * @len: length of the data transferred
3109 */
3110 static inline MemTxResult
address_space_write_cached(MemoryRegionCache * cache,hwaddr addr,const void * buf,hwaddr len)3111 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
3112 const void *buf, hwaddr len)
3113 {
3114 assert(addr < cache->len && len <= cache->len - addr);
3115 if (likely(cache->ptr)) {
3116 memcpy(cache->ptr + addr, buf, len);
3117 return MEMTX_OK;
3118 } else {
3119 return address_space_write_cached_slow(cache, addr, buf, len);
3120 }
3121 }
3122
3123 /**
3124 * address_space_set: Fill address space with a constant byte.
3125 *
3126 * Return a MemTxResult indicating whether the operation succeeded
3127 * or failed (eg unassigned memory, device rejected the transaction,
3128 * IOMMU fault).
3129 *
3130 * @as: #AddressSpace to be accessed
3131 * @addr: address within that address space
3132 * @c: constant byte to fill the memory
3133 * @len: the number of bytes to fill with the constant byte
3134 * @attrs: memory transaction attributes
3135 */
3136 MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
3137 uint8_t c, hwaddr len, MemTxAttrs attrs);
3138
3139 /*
3140 * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
3141 * to manage the actual amount of memory consumed by the VM (then, the memory
3142 * provided by RAM blocks might be bigger than the desired memory consumption).
3143 * This *must* be set if:
3144 * - Discarding parts of a RAM blocks does not result in the change being
3145 * reflected in the VM and the pages getting freed.
3146 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
3147 * discards blindly.
3148 * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
3149 * encrypted VMs).
3150 * Technologies that only temporarily pin the current working set of a
3151 * driver are fine, because we don't expect such pages to be discarded
3152 * (esp. based on guest action like balloon inflation).
3153 *
3154 * This is *not* to be used to protect from concurrent discards (esp.,
3155 * postcopy).
3156 *
3157 * Returns 0 if successful. Returns -EBUSY if a technology that relies on
3158 * discards to work reliably is active.
3159 */
3160 int ram_block_discard_disable(bool state);
3161
3162 /*
3163 * See ram_block_discard_disable(): only disable uncoordinated discards,
3164 * keeping coordinated discards (via the RamDiscardManager) enabled.
3165 */
3166 int ram_block_uncoordinated_discard_disable(bool state);
3167
3168 /*
3169 * Inhibit technologies that disable discarding of pages in RAM blocks.
3170 *
3171 * Returns 0 if successful. Returns -EBUSY if discards are already set to
3172 * broken.
3173 */
3174 int ram_block_discard_require(bool state);
3175
3176 /*
3177 * See ram_block_discard_require(): only inhibit technologies that disable
3178 * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
3179 * technologies that only inhibit uncoordinated discards (via the
3180 * RamDiscardManager).
3181 */
3182 int ram_block_coordinated_discard_require(bool state);
3183
3184 /*
3185 * Test if any discarding of memory in ram blocks is disabled.
3186 */
3187 bool ram_block_discard_is_disabled(void);
3188
3189 /*
3190 * Test if any discarding of memory in ram blocks is required to work reliably.
3191 */
3192 bool ram_block_discard_is_required(void);
3193
3194 void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp);
3195 void ram_block_del_cpr_blocker(RAMBlock *rb);
3196
3197 #endif
3198