1 /* 2 * RAM allocation and memory access 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/page-vary.h" 22 #include "qapi/error.h" 23 24 #include "qemu/cutils.h" 25 #include "qemu/cacheflush.h" 26 #include "qemu/hbitmap.h" 27 #include "qemu/madvise.h" 28 #include "qemu/lockable.h" 29 30 #ifdef CONFIG_TCG 31 #include "hw/core/tcg-cpu-ops.h" 32 #endif /* CONFIG_TCG */ 33 34 #include "exec/exec-all.h" 35 #include "exec/page-protection.h" 36 #include "exec/target_page.h" 37 #include "exec/translation-block.h" 38 #include "hw/qdev-core.h" 39 #include "hw/qdev-properties.h" 40 #include "hw/boards.h" 41 #include "system/xen.h" 42 #include "system/kvm.h" 43 #include "system/tcg.h" 44 #include "system/qtest.h" 45 #include "qemu/timer.h" 46 #include "qemu/config-file.h" 47 #include "qemu/error-report.h" 48 #include "qemu/qemu-print.h" 49 #include "qemu/log.h" 50 #include "qemu/memalign.h" 51 #include "qemu/memfd.h" 52 #include "exec/memory.h" 53 #include "exec/ioport.h" 54 #include "system/dma.h" 55 #include "system/hostmem.h" 56 #include "system/hw_accel.h" 57 #include "system/xen-mapcache.h" 58 #include "trace.h" 59 60 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 61 #include <linux/falloc.h> 62 #endif 63 64 #include "qemu/rcu_queue.h" 65 #include "qemu/main-loop.h" 66 #include "system/replay.h" 67 68 #include "exec/memory-internal.h" 69 #include "exec/ram_addr.h" 70 71 #include "qemu/pmem.h" 72 73 #include "migration/cpr.h" 74 #include "migration/vmstate.h" 75 76 #include "qemu/range.h" 77 #ifndef _WIN32 78 #include "qemu/mmap-alloc.h" 79 #endif 80 81 #include "monitor/monitor.h" 82 83 #ifdef CONFIG_LIBDAXCTL 84 #include <daxctl/libdaxctl.h> 85 #endif 86 87 //#define DEBUG_SUBPAGE 88 89 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes 90 * are protected by the ramlist lock. 91 */ 92 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; 93 94 static MemoryRegion *system_memory; 95 static MemoryRegion *system_io; 96 97 AddressSpace address_space_io; 98 AddressSpace address_space_memory; 99 100 static MemoryRegion io_mem_unassigned; 101 102 typedef struct PhysPageEntry PhysPageEntry; 103 104 struct PhysPageEntry { 105 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ 106 uint32_t skip : 6; 107 /* index into phys_sections (!skip) or phys_map_nodes (skip) */ 108 uint32_t ptr : 26; 109 }; 110 111 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) 112 113 /* Size of the L2 (and L3, etc) page tables. */ 114 #define ADDR_SPACE_BITS 64 115 116 #define P_L2_BITS 9 117 #define P_L2_SIZE (1 << P_L2_BITS) 118 119 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) 120 121 typedef PhysPageEntry Node[P_L2_SIZE]; 122 123 typedef struct PhysPageMap { 124 struct rcu_head rcu; 125 126 unsigned sections_nb; 127 unsigned sections_nb_alloc; 128 unsigned nodes_nb; 129 unsigned nodes_nb_alloc; 130 Node *nodes; 131 MemoryRegionSection *sections; 132 } PhysPageMap; 133 134 struct AddressSpaceDispatch { 135 MemoryRegionSection *mru_section; 136 /* This is a multi-level map on the physical address space. 137 * The bottom level has pointers to MemoryRegionSections. 138 */ 139 PhysPageEntry phys_map; 140 PhysPageMap map; 141 }; 142 143 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 144 typedef struct subpage_t { 145 MemoryRegion iomem; 146 FlatView *fv; 147 hwaddr base; 148 uint16_t sub_section[]; 149 } subpage_t; 150 151 #define PHYS_SECTION_UNASSIGNED 0 152 153 static void io_mem_init(void); 154 static void memory_map_init(void); 155 static void tcg_log_global_after_sync(MemoryListener *listener); 156 static void tcg_commit(MemoryListener *listener); 157 158 /** 159 * CPUAddressSpace: all the information a CPU needs about an AddressSpace 160 * @cpu: the CPU whose AddressSpace this is 161 * @as: the AddressSpace itself 162 * @memory_dispatch: its dispatch pointer (cached, RCU protected) 163 * @tcg_as_listener: listener for tracking changes to the AddressSpace 164 */ 165 typedef struct CPUAddressSpace { 166 CPUState *cpu; 167 AddressSpace *as; 168 struct AddressSpaceDispatch *memory_dispatch; 169 MemoryListener tcg_as_listener; 170 } CPUAddressSpace; 171 172 struct DirtyBitmapSnapshot { 173 ram_addr_t start; 174 ram_addr_t end; 175 unsigned long dirty[]; 176 }; 177 178 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) 179 { 180 static unsigned alloc_hint = 16; 181 if (map->nodes_nb + nodes > map->nodes_nb_alloc) { 182 map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes); 183 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); 184 alloc_hint = map->nodes_nb_alloc; 185 } 186 } 187 188 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) 189 { 190 unsigned i; 191 uint32_t ret; 192 PhysPageEntry e; 193 PhysPageEntry *p; 194 195 ret = map->nodes_nb++; 196 p = map->nodes[ret]; 197 assert(ret != PHYS_MAP_NODE_NIL); 198 assert(ret != map->nodes_nb_alloc); 199 200 e.skip = leaf ? 0 : 1; 201 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; 202 for (i = 0; i < P_L2_SIZE; ++i) { 203 memcpy(&p[i], &e, sizeof(e)); 204 } 205 return ret; 206 } 207 208 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, 209 hwaddr *index, uint64_t *nb, uint16_t leaf, 210 int level) 211 { 212 PhysPageEntry *p; 213 hwaddr step = (hwaddr)1 << (level * P_L2_BITS); 214 215 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { 216 lp->ptr = phys_map_node_alloc(map, level == 0); 217 } 218 p = map->nodes[lp->ptr]; 219 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; 220 221 while (*nb && lp < &p[P_L2_SIZE]) { 222 if ((*index & (step - 1)) == 0 && *nb >= step) { 223 lp->skip = 0; 224 lp->ptr = leaf; 225 *index += step; 226 *nb -= step; 227 } else { 228 phys_page_set_level(map, lp, index, nb, leaf, level - 1); 229 } 230 ++lp; 231 } 232 } 233 234 static void phys_page_set(AddressSpaceDispatch *d, 235 hwaddr index, uint64_t nb, 236 uint16_t leaf) 237 { 238 /* Wildly overreserve - it doesn't matter much. */ 239 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); 240 241 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); 242 } 243 244 /* Compact a non leaf page entry. Simply detect that the entry has a single child, 245 * and update our entry so we can skip it and go directly to the destination. 246 */ 247 static void phys_page_compact(PhysPageEntry *lp, Node *nodes) 248 { 249 unsigned valid_ptr = P_L2_SIZE; 250 int valid = 0; 251 PhysPageEntry *p; 252 int i; 253 254 if (lp->ptr == PHYS_MAP_NODE_NIL) { 255 return; 256 } 257 258 p = nodes[lp->ptr]; 259 for (i = 0; i < P_L2_SIZE; i++) { 260 if (p[i].ptr == PHYS_MAP_NODE_NIL) { 261 continue; 262 } 263 264 valid_ptr = i; 265 valid++; 266 if (p[i].skip) { 267 phys_page_compact(&p[i], nodes); 268 } 269 } 270 271 /* We can only compress if there's only one child. */ 272 if (valid != 1) { 273 return; 274 } 275 276 assert(valid_ptr < P_L2_SIZE); 277 278 /* Don't compress if it won't fit in the # of bits we have. */ 279 if (P_L2_LEVELS >= (1 << 6) && 280 lp->skip + p[valid_ptr].skip >= (1 << 6)) { 281 return; 282 } 283 284 lp->ptr = p[valid_ptr].ptr; 285 if (!p[valid_ptr].skip) { 286 /* If our only child is a leaf, make this a leaf. */ 287 /* By design, we should have made this node a leaf to begin with so we 288 * should never reach here. 289 * But since it's so simple to handle this, let's do it just in case we 290 * change this rule. 291 */ 292 lp->skip = 0; 293 } else { 294 lp->skip += p[valid_ptr].skip; 295 } 296 } 297 298 void address_space_dispatch_compact(AddressSpaceDispatch *d) 299 { 300 if (d->phys_map.skip) { 301 phys_page_compact(&d->phys_map, d->map.nodes); 302 } 303 } 304 305 static inline bool section_covers_addr(const MemoryRegionSection *section, 306 hwaddr addr) 307 { 308 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means 309 * the section must cover the entire address space. 310 */ 311 return int128_gethi(section->size) || 312 range_covers_byte(section->offset_within_address_space, 313 int128_getlo(section->size), addr); 314 } 315 316 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr) 317 { 318 PhysPageEntry lp = d->phys_map, *p; 319 Node *nodes = d->map.nodes; 320 MemoryRegionSection *sections = d->map.sections; 321 hwaddr index = addr >> TARGET_PAGE_BITS; 322 int i; 323 324 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { 325 if (lp.ptr == PHYS_MAP_NODE_NIL) { 326 return §ions[PHYS_SECTION_UNASSIGNED]; 327 } 328 p = nodes[lp.ptr]; 329 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; 330 } 331 332 if (section_covers_addr(§ions[lp.ptr], addr)) { 333 return §ions[lp.ptr]; 334 } else { 335 return §ions[PHYS_SECTION_UNASSIGNED]; 336 } 337 } 338 339 /* Called from RCU critical section */ 340 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, 341 hwaddr addr, 342 bool resolve_subpage) 343 { 344 MemoryRegionSection *section = qatomic_read(&d->mru_section); 345 subpage_t *subpage; 346 347 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || 348 !section_covers_addr(section, addr)) { 349 section = phys_page_find(d, addr); 350 qatomic_set(&d->mru_section, section); 351 } 352 if (resolve_subpage && section->mr->subpage) { 353 subpage = container_of(section->mr, subpage_t, iomem); 354 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; 355 } 356 return section; 357 } 358 359 /* Called from RCU critical section */ 360 static MemoryRegionSection * 361 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, 362 hwaddr *plen, bool resolve_subpage) 363 { 364 MemoryRegionSection *section; 365 MemoryRegion *mr; 366 Int128 diff; 367 368 section = address_space_lookup_region(d, addr, resolve_subpage); 369 /* Compute offset within MemoryRegionSection */ 370 addr -= section->offset_within_address_space; 371 372 /* Compute offset within MemoryRegion */ 373 *xlat = addr + section->offset_within_region; 374 375 mr = section->mr; 376 377 /* MMIO registers can be expected to perform full-width accesses based only 378 * on their address, without considering adjacent registers that could 379 * decode to completely different MemoryRegions. When such registers 380 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO 381 * regions overlap wildly. For this reason we cannot clamp the accesses 382 * here. 383 * 384 * If the length is small (as is the case for address_space_ldl/stl), 385 * everything works fine. If the incoming length is large, however, 386 * the caller really has to do the clamping through memory_access_size. 387 */ 388 if (memory_region_is_ram(mr)) { 389 diff = int128_sub(section->size, int128_make64(addr)); 390 *plen = int128_get64(int128_min(diff, int128_make64(*plen))); 391 } 392 return section; 393 } 394 395 /** 396 * address_space_translate_iommu - translate an address through an IOMMU 397 * memory region and then through the target address space. 398 * 399 * @iommu_mr: the IOMMU memory region that we start the translation from 400 * @addr: the address to be translated through the MMU 401 * @xlat: the translated address offset within the destination memory region. 402 * It cannot be %NULL. 403 * @plen_out: valid read/write length of the translated address. It 404 * cannot be %NULL. 405 * @page_mask_out: page mask for the translated address. This 406 * should only be meaningful for IOMMU translated 407 * addresses, since there may be huge pages that this bit 408 * would tell. It can be %NULL if we don't care about it. 409 * @is_write: whether the translation operation is for write 410 * @is_mmio: whether this can be MMIO, set true if it can 411 * @target_as: the address space targeted by the IOMMU 412 * @attrs: transaction attributes 413 * 414 * This function is called from RCU critical section. It is the common 415 * part of flatview_do_translate and address_space_translate_cached. 416 */ 417 static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr, 418 hwaddr *xlat, 419 hwaddr *plen_out, 420 hwaddr *page_mask_out, 421 bool is_write, 422 bool is_mmio, 423 AddressSpace **target_as, 424 MemTxAttrs attrs) 425 { 426 MemoryRegionSection *section; 427 hwaddr page_mask = (hwaddr)-1; 428 429 do { 430 hwaddr addr = *xlat; 431 IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 432 int iommu_idx = 0; 433 IOMMUTLBEntry iotlb; 434 435 if (imrc->attrs_to_index) { 436 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 437 } 438 439 iotlb = imrc->translate(iommu_mr, addr, is_write ? 440 IOMMU_WO : IOMMU_RO, iommu_idx); 441 442 if (!(iotlb.perm & (1 << is_write))) { 443 goto unassigned; 444 } 445 446 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 447 | (addr & iotlb.addr_mask)); 448 page_mask &= iotlb.addr_mask; 449 *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); 450 *target_as = iotlb.target_as; 451 452 section = address_space_translate_internal( 453 address_space_to_dispatch(iotlb.target_as), addr, xlat, 454 plen_out, is_mmio); 455 456 iommu_mr = memory_region_get_iommu(section->mr); 457 } while (unlikely(iommu_mr)); 458 459 if (page_mask_out) { 460 *page_mask_out = page_mask; 461 } 462 return *section; 463 464 unassigned: 465 return (MemoryRegionSection) { .mr = &io_mem_unassigned }; 466 } 467 468 /** 469 * flatview_do_translate - translate an address in FlatView 470 * 471 * @fv: the flat view that we want to translate on 472 * @addr: the address to be translated in above address space 473 * @xlat: the translated address offset within memory region. It 474 * cannot be @NULL. 475 * @plen_out: valid read/write length of the translated address. It 476 * can be @NULL when we don't care about it. 477 * @page_mask_out: page mask for the translated address. This 478 * should only be meaningful for IOMMU translated 479 * addresses, since there may be huge pages that this bit 480 * would tell. It can be @NULL if we don't care about it. 481 * @is_write: whether the translation operation is for write 482 * @is_mmio: whether this can be MMIO, set true if it can 483 * @target_as: the address space targeted by the IOMMU 484 * @attrs: memory transaction attributes 485 * 486 * This function is called from RCU critical section 487 */ 488 static MemoryRegionSection flatview_do_translate(FlatView *fv, 489 hwaddr addr, 490 hwaddr *xlat, 491 hwaddr *plen_out, 492 hwaddr *page_mask_out, 493 bool is_write, 494 bool is_mmio, 495 AddressSpace **target_as, 496 MemTxAttrs attrs) 497 { 498 MemoryRegionSection *section; 499 IOMMUMemoryRegion *iommu_mr; 500 hwaddr plen = (hwaddr)(-1); 501 502 if (!plen_out) { 503 plen_out = &plen; 504 } 505 506 section = address_space_translate_internal( 507 flatview_to_dispatch(fv), addr, xlat, 508 plen_out, is_mmio); 509 510 iommu_mr = memory_region_get_iommu(section->mr); 511 if (unlikely(iommu_mr)) { 512 return address_space_translate_iommu(iommu_mr, xlat, 513 plen_out, page_mask_out, 514 is_write, is_mmio, 515 target_as, attrs); 516 } 517 if (page_mask_out) { 518 /* Not behind an IOMMU, use default page size. */ 519 *page_mask_out = ~TARGET_PAGE_MASK; 520 } 521 522 return *section; 523 } 524 525 /* Called from RCU critical section */ 526 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 527 bool is_write, MemTxAttrs attrs) 528 { 529 MemoryRegionSection section; 530 hwaddr xlat, page_mask; 531 532 /* 533 * This can never be MMIO, and we don't really care about plen, 534 * but page mask. 535 */ 536 section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat, 537 NULL, &page_mask, is_write, false, &as, 538 attrs); 539 540 /* Illegal translation */ 541 if (section.mr == &io_mem_unassigned) { 542 goto iotlb_fail; 543 } 544 545 /* Convert memory region offset into address space offset */ 546 xlat += section.offset_within_address_space - 547 section.offset_within_region; 548 549 return (IOMMUTLBEntry) { 550 .target_as = as, 551 .iova = addr & ~page_mask, 552 .translated_addr = xlat & ~page_mask, 553 .addr_mask = page_mask, 554 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */ 555 .perm = IOMMU_RW, 556 }; 557 558 iotlb_fail: 559 return (IOMMUTLBEntry) {0}; 560 } 561 562 /* Called from RCU critical section */ 563 MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, 564 hwaddr *plen, bool is_write, 565 MemTxAttrs attrs) 566 { 567 MemoryRegion *mr; 568 MemoryRegionSection section; 569 AddressSpace *as = NULL; 570 571 /* This can be MMIO, so setup MMIO bit. */ 572 section = flatview_do_translate(fv, addr, xlat, plen, NULL, 573 is_write, true, &as, attrs); 574 mr = section.mr; 575 576 if (xen_enabled() && memory_access_is_direct(mr, is_write)) { 577 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; 578 *plen = MIN(page, *plen); 579 } 580 581 return mr; 582 } 583 584 typedef struct TCGIOMMUNotifier { 585 IOMMUNotifier n; 586 MemoryRegion *mr; 587 CPUState *cpu; 588 int iommu_idx; 589 bool active; 590 } TCGIOMMUNotifier; 591 592 static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 593 { 594 TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n); 595 596 if (!notifier->active) { 597 return; 598 } 599 tlb_flush(notifier->cpu); 600 notifier->active = false; 601 /* We leave the notifier struct on the list to avoid reallocating it later. 602 * Generally the number of IOMMUs a CPU deals with will be small. 603 * In any case we can't unregister the iommu notifier from a notify 604 * callback. 605 */ 606 } 607 608 static void tcg_register_iommu_notifier(CPUState *cpu, 609 IOMMUMemoryRegion *iommu_mr, 610 int iommu_idx) 611 { 612 /* Make sure this CPU has an IOMMU notifier registered for this 613 * IOMMU/IOMMU index combination, so that we can flush its TLB 614 * when the IOMMU tells us the mappings we've cached have changed. 615 */ 616 MemoryRegion *mr = MEMORY_REGION(iommu_mr); 617 TCGIOMMUNotifier *notifier = NULL; 618 int i; 619 620 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 621 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 622 if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) { 623 break; 624 } 625 } 626 if (i == cpu->iommu_notifiers->len) { 627 /* Not found, add a new entry at the end of the array */ 628 cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1); 629 notifier = g_new0(TCGIOMMUNotifier, 1); 630 g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier; 631 632 notifier->mr = mr; 633 notifier->iommu_idx = iommu_idx; 634 notifier->cpu = cpu; 635 /* Rather than trying to register interest in the specific part 636 * of the iommu's address space that we've accessed and then 637 * expand it later as subsequent accesses touch more of it, we 638 * just register interest in the whole thing, on the assumption 639 * that iommu reconfiguration will be rare. 640 */ 641 iommu_notifier_init(¬ifier->n, 642 tcg_iommu_unmap_notify, 643 IOMMU_NOTIFIER_UNMAP, 644 0, 645 HWADDR_MAX, 646 iommu_idx); 647 memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, 648 &error_fatal); 649 } 650 651 if (!notifier->active) { 652 notifier->active = true; 653 } 654 } 655 656 void tcg_iommu_free_notifier_list(CPUState *cpu) 657 { 658 /* Destroy the CPU's notifier list */ 659 int i; 660 TCGIOMMUNotifier *notifier; 661 662 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 663 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 664 memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n); 665 g_free(notifier); 666 } 667 g_array_free(cpu->iommu_notifiers, true); 668 } 669 670 void tcg_iommu_init_notifier_list(CPUState *cpu) 671 { 672 cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *)); 673 } 674 675 /* Called from RCU critical section */ 676 MemoryRegionSection * 677 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, 678 hwaddr *xlat, hwaddr *plen, 679 MemTxAttrs attrs, int *prot) 680 { 681 MemoryRegionSection *section; 682 IOMMUMemoryRegion *iommu_mr; 683 IOMMUMemoryRegionClass *imrc; 684 IOMMUTLBEntry iotlb; 685 int iommu_idx; 686 hwaddr addr = orig_addr; 687 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; 688 689 for (;;) { 690 section = address_space_translate_internal(d, addr, &addr, plen, false); 691 692 iommu_mr = memory_region_get_iommu(section->mr); 693 if (!iommu_mr) { 694 break; 695 } 696 697 imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 698 699 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 700 tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx); 701 /* We need all the permissions, so pass IOMMU_NONE so the IOMMU 702 * doesn't short-cut its translation table walk. 703 */ 704 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); 705 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 706 | (addr & iotlb.addr_mask)); 707 /* Update the caller's prot bits to remove permissions the IOMMU 708 * is giving us a failure response for. If we get down to no 709 * permissions left at all we can give up now. 710 */ 711 if (!(iotlb.perm & IOMMU_RO)) { 712 *prot &= ~(PAGE_READ | PAGE_EXEC); 713 } 714 if (!(iotlb.perm & IOMMU_WO)) { 715 *prot &= ~PAGE_WRITE; 716 } 717 718 if (!*prot) { 719 goto translate_fail; 720 } 721 722 d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as)); 723 } 724 725 assert(!memory_region_is_iommu(section->mr)); 726 *xlat = addr; 727 return section; 728 729 translate_fail: 730 /* 731 * We should be given a page-aligned address -- certainly 732 * tlb_set_page_with_attrs() does so. The page offset of xlat 733 * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0. 734 * The page portion of xlat will be logged by memory_region_access_valid() 735 * when this memory access is rejected, so use the original untranslated 736 * physical address. 737 */ 738 assert((orig_addr & ~TARGET_PAGE_MASK) == 0); 739 *xlat = orig_addr; 740 return &d->map.sections[PHYS_SECTION_UNASSIGNED]; 741 } 742 743 void cpu_address_space_init(CPUState *cpu, int asidx, 744 const char *prefix, MemoryRegion *mr) 745 { 746 CPUAddressSpace *newas; 747 AddressSpace *as = g_new0(AddressSpace, 1); 748 char *as_name; 749 750 assert(mr); 751 as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index); 752 address_space_init(as, mr, as_name); 753 g_free(as_name); 754 755 /* Target code should have set num_ases before calling us */ 756 assert(asidx < cpu->num_ases); 757 758 if (asidx == 0) { 759 /* address space 0 gets the convenience alias */ 760 cpu->as = as; 761 } 762 763 /* KVM cannot currently support multiple address spaces. */ 764 assert(asidx == 0 || !kvm_enabled()); 765 766 if (!cpu->cpu_ases) { 767 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); 768 cpu->cpu_ases_count = cpu->num_ases; 769 } 770 771 newas = &cpu->cpu_ases[asidx]; 772 newas->cpu = cpu; 773 newas->as = as; 774 if (tcg_enabled()) { 775 newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; 776 newas->tcg_as_listener.commit = tcg_commit; 777 newas->tcg_as_listener.name = "tcg"; 778 memory_listener_register(&newas->tcg_as_listener, as); 779 } 780 } 781 782 void cpu_address_space_destroy(CPUState *cpu, int asidx) 783 { 784 CPUAddressSpace *cpuas; 785 786 assert(cpu->cpu_ases); 787 assert(asidx >= 0 && asidx < cpu->num_ases); 788 /* KVM cannot currently support multiple address spaces. */ 789 assert(asidx == 0 || !kvm_enabled()); 790 791 cpuas = &cpu->cpu_ases[asidx]; 792 if (tcg_enabled()) { 793 memory_listener_unregister(&cpuas->tcg_as_listener); 794 } 795 796 address_space_destroy(cpuas->as); 797 g_free_rcu(cpuas->as, rcu); 798 799 if (asidx == 0) { 800 /* reset the convenience alias for address space 0 */ 801 cpu->as = NULL; 802 } 803 804 if (--cpu->cpu_ases_count == 0) { 805 g_free(cpu->cpu_ases); 806 cpu->cpu_ases = NULL; 807 } 808 } 809 810 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) 811 { 812 /* Return the AddressSpace corresponding to the specified index */ 813 return cpu->cpu_ases[asidx].as; 814 } 815 816 /* Called from RCU critical section */ 817 static RAMBlock *qemu_get_ram_block(ram_addr_t addr) 818 { 819 RAMBlock *block; 820 821 block = qatomic_rcu_read(&ram_list.mru_block); 822 if (block && addr - block->offset < block->max_length) { 823 return block; 824 } 825 RAMBLOCK_FOREACH(block) { 826 if (addr - block->offset < block->max_length) { 827 goto found; 828 } 829 } 830 831 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 832 abort(); 833 834 found: 835 /* It is safe to write mru_block outside the BQL. This 836 * is what happens: 837 * 838 * mru_block = xxx 839 * rcu_read_unlock() 840 * xxx removed from list 841 * rcu_read_lock() 842 * read mru_block 843 * mru_block = NULL; 844 * call_rcu(reclaim_ramblock, xxx); 845 * rcu_read_unlock() 846 * 847 * qatomic_rcu_set is not needed here. The block was already published 848 * when it was placed into the list. Here we're just making an extra 849 * copy of the pointer. 850 */ 851 ram_list.mru_block = block; 852 return block; 853 } 854 855 void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) 856 { 857 CPUState *cpu; 858 ram_addr_t start1; 859 RAMBlock *block; 860 ram_addr_t end; 861 862 assert(tcg_enabled()); 863 end = TARGET_PAGE_ALIGN(start + length); 864 start &= TARGET_PAGE_MASK; 865 866 RCU_READ_LOCK_GUARD(); 867 block = qemu_get_ram_block(start); 868 assert(block == qemu_get_ram_block(end - 1)); 869 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); 870 CPU_FOREACH(cpu) { 871 tlb_reset_dirty(cpu, start1, length); 872 } 873 } 874 875 /* Note: start and end must be within the same ram block. */ 876 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, 877 ram_addr_t length, 878 unsigned client) 879 { 880 DirtyMemoryBlocks *blocks; 881 unsigned long end, page, start_page; 882 bool dirty = false; 883 RAMBlock *ramblock; 884 uint64_t mr_offset, mr_size; 885 886 if (length == 0) { 887 return false; 888 } 889 890 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 891 start_page = start >> TARGET_PAGE_BITS; 892 page = start_page; 893 894 WITH_RCU_READ_LOCK_GUARD() { 895 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 896 ramblock = qemu_get_ram_block(start); 897 /* Range sanity check on the ramblock */ 898 assert(start >= ramblock->offset && 899 start + length <= ramblock->offset + ramblock->used_length); 900 901 while (page < end) { 902 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 903 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; 904 unsigned long num = MIN(end - page, 905 DIRTY_MEMORY_BLOCK_SIZE - offset); 906 907 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], 908 offset, num); 909 page += num; 910 } 911 912 mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; 913 mr_size = (end - start_page) << TARGET_PAGE_BITS; 914 memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); 915 } 916 917 if (dirty) { 918 cpu_physical_memory_dirty_bits_cleared(start, length); 919 } 920 921 return dirty; 922 } 923 924 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty 925 (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) 926 { 927 DirtyMemoryBlocks *blocks; 928 ram_addr_t start, first, last; 929 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL); 930 DirtyBitmapSnapshot *snap; 931 unsigned long page, end, dest; 932 933 start = memory_region_get_ram_addr(mr); 934 /* We know we're only called for RAM MemoryRegions */ 935 assert(start != RAM_ADDR_INVALID); 936 start += offset; 937 938 first = QEMU_ALIGN_DOWN(start, align); 939 last = QEMU_ALIGN_UP(start + length, align); 940 941 snap = g_malloc0(sizeof(*snap) + 942 ((last - first) >> (TARGET_PAGE_BITS + 3))); 943 snap->start = first; 944 snap->end = last; 945 946 page = first >> TARGET_PAGE_BITS; 947 end = last >> TARGET_PAGE_BITS; 948 dest = 0; 949 950 WITH_RCU_READ_LOCK_GUARD() { 951 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 952 953 while (page < end) { 954 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 955 unsigned long ofs = page % DIRTY_MEMORY_BLOCK_SIZE; 956 unsigned long num = MIN(end - page, 957 DIRTY_MEMORY_BLOCK_SIZE - ofs); 958 959 assert(QEMU_IS_ALIGNED(ofs, (1 << BITS_PER_LEVEL))); 960 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL))); 961 ofs >>= BITS_PER_LEVEL; 962 963 bitmap_copy_and_clear_atomic(snap->dirty + dest, 964 blocks->blocks[idx] + ofs, 965 num); 966 page += num; 967 dest += num >> BITS_PER_LEVEL; 968 } 969 } 970 971 cpu_physical_memory_dirty_bits_cleared(start, length); 972 973 memory_region_clear_dirty_bitmap(mr, offset, length); 974 975 return snap; 976 } 977 978 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, 979 ram_addr_t start, 980 ram_addr_t length) 981 { 982 unsigned long page, end; 983 984 assert(start >= snap->start); 985 assert(start + length <= snap->end); 986 987 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; 988 page = (start - snap->start) >> TARGET_PAGE_BITS; 989 990 while (page < end) { 991 if (test_bit(page, snap->dirty)) { 992 return true; 993 } 994 page++; 995 } 996 return false; 997 } 998 999 /* Called from RCU critical section */ 1000 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 1001 MemoryRegionSection *section) 1002 { 1003 AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); 1004 return section - d->map.sections; 1005 } 1006 1007 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 1008 uint16_t section); 1009 static subpage_t *subpage_init(FlatView *fv, hwaddr base); 1010 1011 static uint16_t phys_section_add(PhysPageMap *map, 1012 MemoryRegionSection *section) 1013 { 1014 /* The physical section number is ORed with a page-aligned 1015 * pointer to produce the iotlb entries. Thus it should 1016 * never overflow into the page-aligned value. 1017 */ 1018 assert(map->sections_nb < TARGET_PAGE_SIZE); 1019 1020 if (map->sections_nb == map->sections_nb_alloc) { 1021 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); 1022 map->sections = g_renew(MemoryRegionSection, map->sections, 1023 map->sections_nb_alloc); 1024 } 1025 map->sections[map->sections_nb] = *section; 1026 memory_region_ref(section->mr); 1027 return map->sections_nb++; 1028 } 1029 1030 static void phys_section_destroy(MemoryRegion *mr) 1031 { 1032 bool have_sub_page = mr->subpage; 1033 1034 memory_region_unref(mr); 1035 1036 if (have_sub_page) { 1037 subpage_t *subpage = container_of(mr, subpage_t, iomem); 1038 object_unref(OBJECT(&subpage->iomem)); 1039 g_free(subpage); 1040 } 1041 } 1042 1043 static void phys_sections_free(PhysPageMap *map) 1044 { 1045 while (map->sections_nb > 0) { 1046 MemoryRegionSection *section = &map->sections[--map->sections_nb]; 1047 phys_section_destroy(section->mr); 1048 } 1049 g_free(map->sections); 1050 g_free(map->nodes); 1051 } 1052 1053 static void register_subpage(FlatView *fv, MemoryRegionSection *section) 1054 { 1055 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1056 subpage_t *subpage; 1057 hwaddr base = section->offset_within_address_space 1058 & TARGET_PAGE_MASK; 1059 MemoryRegionSection *existing = phys_page_find(d, base); 1060 MemoryRegionSection subsection = { 1061 .offset_within_address_space = base, 1062 .size = int128_make64(TARGET_PAGE_SIZE), 1063 }; 1064 hwaddr start, end; 1065 1066 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); 1067 1068 if (!(existing->mr->subpage)) { 1069 subpage = subpage_init(fv, base); 1070 subsection.fv = fv; 1071 subsection.mr = &subpage->iomem; 1072 phys_page_set(d, base >> TARGET_PAGE_BITS, 1, 1073 phys_section_add(&d->map, &subsection)); 1074 } else { 1075 subpage = container_of(existing->mr, subpage_t, iomem); 1076 } 1077 start = section->offset_within_address_space & ~TARGET_PAGE_MASK; 1078 end = start + int128_get64(section->size) - 1; 1079 subpage_register(subpage, start, end, 1080 phys_section_add(&d->map, section)); 1081 } 1082 1083 1084 static void register_multipage(FlatView *fv, 1085 MemoryRegionSection *section) 1086 { 1087 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1088 hwaddr start_addr = section->offset_within_address_space; 1089 uint16_t section_index = phys_section_add(&d->map, section); 1090 uint64_t num_pages = int128_get64(int128_rshift(section->size, 1091 TARGET_PAGE_BITS)); 1092 1093 assert(num_pages); 1094 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); 1095 } 1096 1097 /* 1098 * The range in *section* may look like this: 1099 * 1100 * |s|PPPPPPP|s| 1101 * 1102 * where s stands for subpage and P for page. 1103 */ 1104 void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section) 1105 { 1106 MemoryRegionSection remain = *section; 1107 Int128 page_size = int128_make64(TARGET_PAGE_SIZE); 1108 1109 /* register first subpage */ 1110 if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { 1111 uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space) 1112 - remain.offset_within_address_space; 1113 1114 MemoryRegionSection now = remain; 1115 now.size = int128_min(int128_make64(left), now.size); 1116 register_subpage(fv, &now); 1117 if (int128_eq(remain.size, now.size)) { 1118 return; 1119 } 1120 remain.size = int128_sub(remain.size, now.size); 1121 remain.offset_within_address_space += int128_get64(now.size); 1122 remain.offset_within_region += int128_get64(now.size); 1123 } 1124 1125 /* register whole pages */ 1126 if (int128_ge(remain.size, page_size)) { 1127 MemoryRegionSection now = remain; 1128 now.size = int128_and(now.size, int128_neg(page_size)); 1129 register_multipage(fv, &now); 1130 if (int128_eq(remain.size, now.size)) { 1131 return; 1132 } 1133 remain.size = int128_sub(remain.size, now.size); 1134 remain.offset_within_address_space += int128_get64(now.size); 1135 remain.offset_within_region += int128_get64(now.size); 1136 } 1137 1138 /* register last subpage */ 1139 register_subpage(fv, &remain); 1140 } 1141 1142 void qemu_flush_coalesced_mmio_buffer(void) 1143 { 1144 if (kvm_enabled()) 1145 kvm_flush_coalesced_mmio_buffer(); 1146 } 1147 1148 void qemu_mutex_lock_ramlist(void) 1149 { 1150 qemu_mutex_lock(&ram_list.mutex); 1151 } 1152 1153 void qemu_mutex_unlock_ramlist(void) 1154 { 1155 qemu_mutex_unlock(&ram_list.mutex); 1156 } 1157 1158 GString *ram_block_format(void) 1159 { 1160 RAMBlock *block; 1161 char *psize; 1162 GString *buf = g_string_new(""); 1163 1164 RCU_READ_LOCK_GUARD(); 1165 g_string_append_printf(buf, "%24s %8s %18s %18s %18s %18s %3s\n", 1166 "Block Name", "PSize", "Offset", "Used", "Total", 1167 "HVA", "RO"); 1168 1169 RAMBLOCK_FOREACH(block) { 1170 psize = size_to_str(block->page_size); 1171 g_string_append_printf(buf, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 1172 " 0x%016" PRIx64 " 0x%016" PRIx64 " %3s\n", 1173 block->idstr, psize, 1174 (uint64_t)block->offset, 1175 (uint64_t)block->used_length, 1176 (uint64_t)block->max_length, 1177 (uint64_t)(uintptr_t)block->host, 1178 block->mr->readonly ? "ro" : "rw"); 1179 1180 g_free(psize); 1181 } 1182 1183 return buf; 1184 } 1185 1186 static int find_min_backend_pagesize(Object *obj, void *opaque) 1187 { 1188 long *hpsize_min = opaque; 1189 1190 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1191 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1192 long hpsize = host_memory_backend_pagesize(backend); 1193 1194 if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) { 1195 *hpsize_min = hpsize; 1196 } 1197 } 1198 1199 return 0; 1200 } 1201 1202 static int find_max_backend_pagesize(Object *obj, void *opaque) 1203 { 1204 long *hpsize_max = opaque; 1205 1206 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1207 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1208 long hpsize = host_memory_backend_pagesize(backend); 1209 1210 if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) { 1211 *hpsize_max = hpsize; 1212 } 1213 } 1214 1215 return 0; 1216 } 1217 1218 /* 1219 * TODO: We assume right now that all mapped host memory backends are 1220 * used as RAM, however some might be used for different purposes. 1221 */ 1222 long qemu_minrampagesize(void) 1223 { 1224 long hpsize = LONG_MAX; 1225 Object *memdev_root = object_resolve_path("/objects", NULL); 1226 1227 object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); 1228 return hpsize; 1229 } 1230 1231 long qemu_maxrampagesize(void) 1232 { 1233 long pagesize = 0; 1234 Object *memdev_root = object_resolve_path("/objects", NULL); 1235 1236 object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize); 1237 return pagesize; 1238 } 1239 1240 #ifdef CONFIG_POSIX 1241 static int64_t get_file_size(int fd) 1242 { 1243 int64_t size; 1244 #if defined(__linux__) 1245 struct stat st; 1246 1247 if (fstat(fd, &st) < 0) { 1248 return -errno; 1249 } 1250 1251 /* Special handling for devdax character devices */ 1252 if (S_ISCHR(st.st_mode)) { 1253 g_autofree char *subsystem_path = NULL; 1254 g_autofree char *subsystem = NULL; 1255 1256 subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem", 1257 major(st.st_rdev), minor(st.st_rdev)); 1258 subsystem = g_file_read_link(subsystem_path, NULL); 1259 1260 if (subsystem && g_str_has_suffix(subsystem, "/dax")) { 1261 g_autofree char *size_path = NULL; 1262 g_autofree char *size_str = NULL; 1263 1264 size_path = g_strdup_printf("/sys/dev/char/%d:%d/size", 1265 major(st.st_rdev), minor(st.st_rdev)); 1266 1267 if (g_file_get_contents(size_path, &size_str, NULL, NULL)) { 1268 return g_ascii_strtoll(size_str, NULL, 0); 1269 } 1270 } 1271 } 1272 #endif /* defined(__linux__) */ 1273 1274 /* st.st_size may be zero for special files yet lseek(2) works */ 1275 size = lseek(fd, 0, SEEK_END); 1276 if (size < 0) { 1277 return -errno; 1278 } 1279 return size; 1280 } 1281 1282 static int64_t get_file_align(int fd) 1283 { 1284 int64_t align = -1; 1285 #if defined(__linux__) && defined(CONFIG_LIBDAXCTL) 1286 struct stat st; 1287 1288 if (fstat(fd, &st) < 0) { 1289 return -errno; 1290 } 1291 1292 /* Special handling for devdax character devices */ 1293 if (S_ISCHR(st.st_mode)) { 1294 g_autofree char *path = NULL; 1295 g_autofree char *rpath = NULL; 1296 struct daxctl_ctx *ctx; 1297 struct daxctl_region *region; 1298 int rc = 0; 1299 1300 path = g_strdup_printf("/sys/dev/char/%d:%d", 1301 major(st.st_rdev), minor(st.st_rdev)); 1302 rpath = realpath(path, NULL); 1303 if (!rpath) { 1304 return -errno; 1305 } 1306 1307 rc = daxctl_new(&ctx); 1308 if (rc) { 1309 return -1; 1310 } 1311 1312 daxctl_region_foreach(ctx, region) { 1313 if (strstr(rpath, daxctl_region_get_path(region))) { 1314 align = daxctl_region_get_align(region); 1315 break; 1316 } 1317 } 1318 daxctl_unref(ctx); 1319 } 1320 #endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */ 1321 1322 return align; 1323 } 1324 1325 static int file_ram_open(const char *path, 1326 const char *region_name, 1327 bool readonly, 1328 bool *created) 1329 { 1330 char *filename; 1331 char *sanitized_name; 1332 char *c; 1333 int fd = -1; 1334 1335 *created = false; 1336 for (;;) { 1337 fd = open(path, readonly ? O_RDONLY : O_RDWR); 1338 if (fd >= 0) { 1339 /* 1340 * open(O_RDONLY) won't fail with EISDIR. Check manually if we 1341 * opened a directory and fail similarly to how we fail ENOENT 1342 * in readonly mode. Note that mkstemp() would imply O_RDWR. 1343 */ 1344 if (readonly) { 1345 struct stat file_stat; 1346 1347 if (fstat(fd, &file_stat)) { 1348 close(fd); 1349 if (errno == EINTR) { 1350 continue; 1351 } 1352 return -errno; 1353 } else if (S_ISDIR(file_stat.st_mode)) { 1354 close(fd); 1355 return -EISDIR; 1356 } 1357 } 1358 /* @path names an existing file, use it */ 1359 break; 1360 } 1361 if (errno == ENOENT) { 1362 if (readonly) { 1363 /* Refuse to create new, readonly files. */ 1364 return -ENOENT; 1365 } 1366 /* @path names a file that doesn't exist, create it */ 1367 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); 1368 if (fd >= 0) { 1369 *created = true; 1370 break; 1371 } 1372 } else if (errno == EISDIR) { 1373 /* @path names a directory, create a file there */ 1374 /* Make name safe to use with mkstemp by replacing '/' with '_'. */ 1375 sanitized_name = g_strdup(region_name); 1376 for (c = sanitized_name; *c != '\0'; c++) { 1377 if (*c == '/') { 1378 *c = '_'; 1379 } 1380 } 1381 1382 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, 1383 sanitized_name); 1384 g_free(sanitized_name); 1385 1386 fd = mkstemp(filename); 1387 if (fd >= 0) { 1388 unlink(filename); 1389 g_free(filename); 1390 break; 1391 } 1392 g_free(filename); 1393 } 1394 if (errno != EEXIST && errno != EINTR) { 1395 return -errno; 1396 } 1397 /* 1398 * Try again on EINTR and EEXIST. The latter happens when 1399 * something else creates the file between our two open(). 1400 */ 1401 } 1402 1403 return fd; 1404 } 1405 1406 static void *file_ram_alloc(RAMBlock *block, 1407 ram_addr_t memory, 1408 int fd, 1409 bool truncate, 1410 off_t offset, 1411 Error **errp) 1412 { 1413 uint32_t qemu_map_flags; 1414 void *area; 1415 1416 block->page_size = qemu_fd_getpagesize(fd); 1417 if (block->mr->align % block->page_size) { 1418 error_setg(errp, "alignment 0x%" PRIx64 1419 " must be multiples of page size 0x%zx", 1420 block->mr->align, block->page_size); 1421 return NULL; 1422 } else if (block->mr->align && !is_power_of_2(block->mr->align)) { 1423 error_setg(errp, "alignment 0x%" PRIx64 1424 " must be a power of two", block->mr->align); 1425 return NULL; 1426 } else if (offset % block->page_size) { 1427 error_setg(errp, "offset 0x%" PRIx64 1428 " must be multiples of page size 0x%zx", 1429 offset, block->page_size); 1430 return NULL; 1431 } 1432 block->mr->align = MAX(block->page_size, block->mr->align); 1433 #if defined(__s390x__) 1434 if (kvm_enabled()) { 1435 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); 1436 } 1437 #endif 1438 1439 if (memory < block->page_size) { 1440 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " 1441 "or larger than page size 0x%zx", 1442 memory, block->page_size); 1443 return NULL; 1444 } 1445 1446 memory = ROUND_UP(memory, block->page_size); 1447 1448 /* 1449 * ftruncate is not supported by hugetlbfs in older 1450 * hosts, so don't bother bailing out on errors. 1451 * If anything goes wrong with it under other filesystems, 1452 * mmap will fail. 1453 * 1454 * Do not truncate the non-empty backend file to avoid corrupting 1455 * the existing data in the file. Disabling shrinking is not 1456 * enough. For example, the current vNVDIMM implementation stores 1457 * the guest NVDIMM labels at the end of the backend file. If the 1458 * backend file is later extended, QEMU will not be able to find 1459 * those labels. Therefore, extending the non-empty backend file 1460 * is disabled as well. 1461 */ 1462 if (truncate && ftruncate(fd, offset + memory)) { 1463 perror("ftruncate"); 1464 } 1465 1466 qemu_map_flags = (block->flags & RAM_READONLY) ? QEMU_MAP_READONLY : 0; 1467 qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0; 1468 qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0; 1469 qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0; 1470 area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset); 1471 if (area == MAP_FAILED) { 1472 error_setg_errno(errp, errno, 1473 "unable to map backing store for guest RAM"); 1474 return NULL; 1475 } 1476 1477 block->fd = fd; 1478 block->fd_offset = offset; 1479 return area; 1480 } 1481 #endif 1482 1483 /* Allocate space within the ram_addr_t space that governs the 1484 * dirty bitmaps. 1485 * Called with the ramlist lock held. 1486 */ 1487 static ram_addr_t find_ram_offset(ram_addr_t size) 1488 { 1489 RAMBlock *block, *next_block; 1490 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; 1491 1492 assert(size != 0); /* it would hand out same offset multiple times */ 1493 1494 if (QLIST_EMPTY_RCU(&ram_list.blocks)) { 1495 return 0; 1496 } 1497 1498 RAMBLOCK_FOREACH(block) { 1499 ram_addr_t candidate, next = RAM_ADDR_MAX; 1500 1501 /* Align blocks to start on a 'long' in the bitmap 1502 * which makes the bitmap sync'ing take the fast path. 1503 */ 1504 candidate = block->offset + block->max_length; 1505 candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS); 1506 1507 /* Search for the closest following block 1508 * and find the gap. 1509 */ 1510 RAMBLOCK_FOREACH(next_block) { 1511 if (next_block->offset >= candidate) { 1512 next = MIN(next, next_block->offset); 1513 } 1514 } 1515 1516 /* If it fits remember our place and remember the size 1517 * of gap, but keep going so that we might find a smaller 1518 * gap to fill so avoiding fragmentation. 1519 */ 1520 if (next - candidate >= size && next - candidate < mingap) { 1521 offset = candidate; 1522 mingap = next - candidate; 1523 } 1524 1525 trace_find_ram_offset_loop(size, candidate, offset, next, mingap); 1526 } 1527 1528 if (offset == RAM_ADDR_MAX) { 1529 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", 1530 (uint64_t)size); 1531 abort(); 1532 } 1533 1534 trace_find_ram_offset(size, offset); 1535 1536 return offset; 1537 } 1538 1539 static void qemu_ram_setup_dump(void *addr, ram_addr_t size) 1540 { 1541 int ret; 1542 1543 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ 1544 if (!machine_dump_guest_core(current_machine)) { 1545 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); 1546 if (ret) { 1547 perror("qemu_madvise"); 1548 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " 1549 "but dump-guest-core=off specified\n"); 1550 } 1551 } 1552 } 1553 1554 const char *qemu_ram_get_idstr(RAMBlock *rb) 1555 { 1556 return rb->idstr; 1557 } 1558 1559 void *qemu_ram_get_host_addr(RAMBlock *rb) 1560 { 1561 return rb->host; 1562 } 1563 1564 ram_addr_t qemu_ram_get_offset(RAMBlock *rb) 1565 { 1566 return rb->offset; 1567 } 1568 1569 ram_addr_t qemu_ram_get_used_length(RAMBlock *rb) 1570 { 1571 return rb->used_length; 1572 } 1573 1574 ram_addr_t qemu_ram_get_max_length(RAMBlock *rb) 1575 { 1576 return rb->max_length; 1577 } 1578 1579 bool qemu_ram_is_shared(RAMBlock *rb) 1580 { 1581 return rb->flags & RAM_SHARED; 1582 } 1583 1584 bool qemu_ram_is_noreserve(RAMBlock *rb) 1585 { 1586 return rb->flags & RAM_NORESERVE; 1587 } 1588 1589 /* Note: Only set at the start of postcopy */ 1590 bool qemu_ram_is_uf_zeroable(RAMBlock *rb) 1591 { 1592 return rb->flags & RAM_UF_ZEROPAGE; 1593 } 1594 1595 void qemu_ram_set_uf_zeroable(RAMBlock *rb) 1596 { 1597 rb->flags |= RAM_UF_ZEROPAGE; 1598 } 1599 1600 bool qemu_ram_is_migratable(RAMBlock *rb) 1601 { 1602 return rb->flags & RAM_MIGRATABLE; 1603 } 1604 1605 void qemu_ram_set_migratable(RAMBlock *rb) 1606 { 1607 rb->flags |= RAM_MIGRATABLE; 1608 } 1609 1610 void qemu_ram_unset_migratable(RAMBlock *rb) 1611 { 1612 rb->flags &= ~RAM_MIGRATABLE; 1613 } 1614 1615 bool qemu_ram_is_named_file(RAMBlock *rb) 1616 { 1617 return rb->flags & RAM_NAMED_FILE; 1618 } 1619 1620 int qemu_ram_get_fd(RAMBlock *rb) 1621 { 1622 return rb->fd; 1623 } 1624 1625 /* Called with the BQL held. */ 1626 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) 1627 { 1628 RAMBlock *block; 1629 1630 assert(new_block); 1631 assert(!new_block->idstr[0]); 1632 1633 if (dev) { 1634 char *id = qdev_get_dev_path(dev); 1635 if (id) { 1636 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 1637 g_free(id); 1638 } 1639 } 1640 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 1641 1642 RCU_READ_LOCK_GUARD(); 1643 RAMBLOCK_FOREACH(block) { 1644 if (block != new_block && 1645 !strcmp(block->idstr, new_block->idstr)) { 1646 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 1647 new_block->idstr); 1648 abort(); 1649 } 1650 } 1651 } 1652 1653 /* Called with the BQL held. */ 1654 void qemu_ram_unset_idstr(RAMBlock *block) 1655 { 1656 /* FIXME: arch_init.c assumes that this is not called throughout 1657 * migration. Ignore the problem since hot-unplug during migration 1658 * does not work anyway. 1659 */ 1660 if (block) { 1661 memset(block->idstr, 0, sizeof(block->idstr)); 1662 } 1663 } 1664 1665 static char *cpr_name(MemoryRegion *mr) 1666 { 1667 const char *mr_name = memory_region_name(mr); 1668 g_autofree char *id = mr->dev ? qdev_get_dev_path(mr->dev) : NULL; 1669 1670 if (id) { 1671 return g_strdup_printf("%s/%s", id, mr_name); 1672 } else { 1673 return g_strdup(mr_name); 1674 } 1675 } 1676 1677 size_t qemu_ram_pagesize(RAMBlock *rb) 1678 { 1679 return rb->page_size; 1680 } 1681 1682 /* Returns the largest size of page in use */ 1683 size_t qemu_ram_pagesize_largest(void) 1684 { 1685 RAMBlock *block; 1686 size_t largest = 0; 1687 1688 RAMBLOCK_FOREACH(block) { 1689 largest = MAX(largest, qemu_ram_pagesize(block)); 1690 } 1691 1692 return largest; 1693 } 1694 1695 static int memory_try_enable_merging(void *addr, size_t len) 1696 { 1697 if (!machine_mem_merge(current_machine)) { 1698 /* disabled by the user */ 1699 return 0; 1700 } 1701 1702 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); 1703 } 1704 1705 /* 1706 * Resizing RAM while migrating can result in the migration being canceled. 1707 * Care has to be taken if the guest might have already detected the memory. 1708 * 1709 * As memory core doesn't know how is memory accessed, it is up to 1710 * resize callback to update device state and/or add assertions to detect 1711 * misuse, if necessary. 1712 */ 1713 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) 1714 { 1715 const ram_addr_t oldsize = block->used_length; 1716 const ram_addr_t unaligned_size = newsize; 1717 1718 assert(block); 1719 1720 newsize = TARGET_PAGE_ALIGN(newsize); 1721 newsize = REAL_HOST_PAGE_ALIGN(newsize); 1722 1723 if (block->used_length == newsize) { 1724 /* 1725 * We don't have to resize the ram block (which only knows aligned 1726 * sizes), however, we have to notify if the unaligned size changed. 1727 */ 1728 if (unaligned_size != memory_region_size(block->mr)) { 1729 memory_region_set_size(block->mr, unaligned_size); 1730 if (block->resized) { 1731 block->resized(block->idstr, unaligned_size, block->host); 1732 } 1733 } 1734 return 0; 1735 } 1736 1737 if (!(block->flags & RAM_RESIZEABLE)) { 1738 error_setg_errno(errp, EINVAL, 1739 "Size mismatch: %s: 0x" RAM_ADDR_FMT 1740 " != 0x" RAM_ADDR_FMT, block->idstr, 1741 newsize, block->used_length); 1742 return -EINVAL; 1743 } 1744 1745 if (block->max_length < newsize) { 1746 error_setg_errno(errp, EINVAL, 1747 "Size too large: %s: 0x" RAM_ADDR_FMT 1748 " > 0x" RAM_ADDR_FMT, block->idstr, 1749 newsize, block->max_length); 1750 return -EINVAL; 1751 } 1752 1753 /* Notify before modifying the ram block and touching the bitmaps. */ 1754 if (block->host) { 1755 ram_block_notify_resize(block->host, oldsize, newsize); 1756 } 1757 1758 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); 1759 block->used_length = newsize; 1760 cpu_physical_memory_set_dirty_range(block->offset, block->used_length, 1761 DIRTY_CLIENTS_ALL); 1762 memory_region_set_size(block->mr, unaligned_size); 1763 if (block->resized) { 1764 block->resized(block->idstr, unaligned_size, block->host); 1765 } 1766 return 0; 1767 } 1768 1769 /* 1770 * Trigger sync on the given ram block for range [start, start + length] 1771 * with the backing store if one is available. 1772 * Otherwise no-op. 1773 * @Note: this is supposed to be a synchronous op. 1774 */ 1775 void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) 1776 { 1777 /* The requested range should fit in within the block range */ 1778 g_assert((start + length) <= block->used_length); 1779 1780 #ifdef CONFIG_LIBPMEM 1781 /* The lack of support for pmem should not block the sync */ 1782 if (ramblock_is_pmem(block)) { 1783 void *addr = ramblock_ptr(block, start); 1784 pmem_persist(addr, length); 1785 return; 1786 } 1787 #endif 1788 if (block->fd >= 0) { 1789 /** 1790 * Case there is no support for PMEM or the memory has not been 1791 * specified as persistent (or is not one) - use the msync. 1792 * Less optimal but still achieves the same goal 1793 */ 1794 void *addr = ramblock_ptr(block, start); 1795 if (qemu_msync(addr, length, block->fd)) { 1796 warn_report("%s: failed to sync memory range: start: " 1797 RAM_ADDR_FMT " length: " RAM_ADDR_FMT, 1798 __func__, start, length); 1799 } 1800 } 1801 } 1802 1803 /* Called with ram_list.mutex held */ 1804 static void dirty_memory_extend(ram_addr_t new_ram_size) 1805 { 1806 unsigned int old_num_blocks = ram_list.num_dirty_blocks; 1807 unsigned int new_num_blocks = DIV_ROUND_UP(new_ram_size, 1808 DIRTY_MEMORY_BLOCK_SIZE); 1809 int i; 1810 1811 /* Only need to extend if block count increased */ 1812 if (new_num_blocks <= old_num_blocks) { 1813 return; 1814 } 1815 1816 for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 1817 DirtyMemoryBlocks *old_blocks; 1818 DirtyMemoryBlocks *new_blocks; 1819 int j; 1820 1821 old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]); 1822 new_blocks = g_malloc(sizeof(*new_blocks) + 1823 sizeof(new_blocks->blocks[0]) * new_num_blocks); 1824 1825 if (old_num_blocks) { 1826 memcpy(new_blocks->blocks, old_blocks->blocks, 1827 old_num_blocks * sizeof(old_blocks->blocks[0])); 1828 } 1829 1830 for (j = old_num_blocks; j < new_num_blocks; j++) { 1831 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); 1832 } 1833 1834 qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); 1835 1836 if (old_blocks) { 1837 g_free_rcu(old_blocks, rcu); 1838 } 1839 } 1840 1841 ram_list.num_dirty_blocks = new_num_blocks; 1842 } 1843 1844 static void ram_block_add(RAMBlock *new_block, Error **errp) 1845 { 1846 const bool noreserve = qemu_ram_is_noreserve(new_block); 1847 const bool shared = qemu_ram_is_shared(new_block); 1848 RAMBlock *block; 1849 RAMBlock *last_block = NULL; 1850 bool free_on_error = false; 1851 ram_addr_t ram_size; 1852 Error *err = NULL; 1853 1854 qemu_mutex_lock_ramlist(); 1855 new_block->offset = find_ram_offset(new_block->max_length); 1856 1857 if (!new_block->host) { 1858 if (xen_enabled()) { 1859 xen_ram_alloc(new_block->offset, new_block->max_length, 1860 new_block->mr, &err); 1861 if (err) { 1862 error_propagate(errp, err); 1863 qemu_mutex_unlock_ramlist(); 1864 return; 1865 } 1866 } else { 1867 new_block->host = qemu_anon_ram_alloc(new_block->max_length, 1868 &new_block->mr->align, 1869 shared, noreserve); 1870 if (!new_block->host) { 1871 error_setg_errno(errp, errno, 1872 "cannot set up guest memory '%s'", 1873 memory_region_name(new_block->mr)); 1874 qemu_mutex_unlock_ramlist(); 1875 return; 1876 } 1877 memory_try_enable_merging(new_block->host, new_block->max_length); 1878 free_on_error = true; 1879 } 1880 } 1881 1882 if (new_block->flags & RAM_GUEST_MEMFD) { 1883 int ret; 1884 1885 assert(kvm_enabled()); 1886 assert(new_block->guest_memfd < 0); 1887 1888 ret = ram_block_discard_require(true); 1889 if (ret < 0) { 1890 error_setg_errno(errp, -ret, 1891 "cannot set up private guest memory: discard currently blocked"); 1892 error_append_hint(errp, "Are you using assigned devices?\n"); 1893 goto out_free; 1894 } 1895 1896 new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length, 1897 0, errp); 1898 if (new_block->guest_memfd < 0) { 1899 qemu_mutex_unlock_ramlist(); 1900 goto out_free; 1901 } 1902 } 1903 1904 ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS; 1905 dirty_memory_extend(ram_size); 1906 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, 1907 * QLIST (which has an RCU-friendly variant) does not have insertion at 1908 * tail, so save the last element in last_block. 1909 */ 1910 RAMBLOCK_FOREACH(block) { 1911 last_block = block; 1912 if (block->max_length < new_block->max_length) { 1913 break; 1914 } 1915 } 1916 if (block) { 1917 QLIST_INSERT_BEFORE_RCU(block, new_block, next); 1918 } else if (last_block) { 1919 QLIST_INSERT_AFTER_RCU(last_block, new_block, next); 1920 } else { /* list is empty */ 1921 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); 1922 } 1923 ram_list.mru_block = NULL; 1924 1925 /* Write list before version */ 1926 smp_wmb(); 1927 ram_list.version++; 1928 qemu_mutex_unlock_ramlist(); 1929 1930 cpu_physical_memory_set_dirty_range(new_block->offset, 1931 new_block->used_length, 1932 DIRTY_CLIENTS_ALL); 1933 1934 if (new_block->host) { 1935 qemu_ram_setup_dump(new_block->host, new_block->max_length); 1936 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); 1937 /* 1938 * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU 1939 * Configure it unless the machine is a qtest server, in which case 1940 * KVM is not used and it may be forked (eg for fuzzing purposes). 1941 */ 1942 if (!qtest_enabled()) { 1943 qemu_madvise(new_block->host, new_block->max_length, 1944 QEMU_MADV_DONTFORK); 1945 } 1946 ram_block_notify_add(new_block->host, new_block->used_length, 1947 new_block->max_length); 1948 } 1949 return; 1950 1951 out_free: 1952 if (free_on_error) { 1953 qemu_anon_ram_free(new_block->host, new_block->max_length); 1954 new_block->host = NULL; 1955 } 1956 } 1957 1958 #ifdef CONFIG_POSIX 1959 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, ram_addr_t max_size, 1960 qemu_ram_resize_cb resized, MemoryRegion *mr, 1961 uint32_t ram_flags, int fd, off_t offset, 1962 bool grow, 1963 Error **errp) 1964 { 1965 ERRP_GUARD(); 1966 RAMBlock *new_block; 1967 Error *local_err = NULL; 1968 int64_t file_size, file_align, share_flags; 1969 1970 share_flags = ram_flags & (RAM_PRIVATE | RAM_SHARED); 1971 assert(share_flags != (RAM_SHARED | RAM_PRIVATE)); 1972 ram_flags &= ~RAM_PRIVATE; 1973 1974 /* Just support these ram flags by now. */ 1975 assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE | 1976 RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY | 1977 RAM_READONLY_FD | RAM_GUEST_MEMFD | 1978 RAM_RESIZEABLE)) == 0); 1979 assert(max_size >= size); 1980 1981 if (xen_enabled()) { 1982 error_setg(errp, "-mem-path not supported with Xen"); 1983 return NULL; 1984 } 1985 1986 if (kvm_enabled() && !kvm_has_sync_mmu()) { 1987 error_setg(errp, 1988 "host lacks kvm mmu notifiers, -mem-path unsupported"); 1989 return NULL; 1990 } 1991 1992 size = TARGET_PAGE_ALIGN(size); 1993 size = REAL_HOST_PAGE_ALIGN(size); 1994 max_size = TARGET_PAGE_ALIGN(max_size); 1995 max_size = REAL_HOST_PAGE_ALIGN(max_size); 1996 1997 file_size = get_file_size(fd); 1998 if (file_size && file_size < offset + max_size && !grow) { 1999 error_setg(errp, "%s backing store size 0x%" PRIx64 2000 " is too small for 'size' option 0x" RAM_ADDR_FMT 2001 " plus 'offset' option 0x%" PRIx64, 2002 memory_region_name(mr), file_size, max_size, 2003 (uint64_t)offset); 2004 return NULL; 2005 } 2006 2007 file_align = get_file_align(fd); 2008 if (file_align > 0 && file_align > mr->align) { 2009 error_setg(errp, "backing store align 0x%" PRIx64 2010 " is larger than 'align' option 0x%" PRIx64, 2011 file_align, mr->align); 2012 return NULL; 2013 } 2014 2015 new_block = g_malloc0(sizeof(*new_block)); 2016 new_block->mr = mr; 2017 new_block->used_length = size; 2018 new_block->max_length = max_size; 2019 new_block->resized = resized; 2020 new_block->flags = ram_flags; 2021 new_block->guest_memfd = -1; 2022 new_block->host = file_ram_alloc(new_block, max_size, fd, 2023 file_size < offset + max_size, 2024 offset, errp); 2025 if (!new_block->host) { 2026 g_free(new_block); 2027 return NULL; 2028 } 2029 2030 ram_block_add(new_block, &local_err); 2031 if (local_err) { 2032 g_free(new_block); 2033 error_propagate(errp, local_err); 2034 return NULL; 2035 } 2036 return new_block; 2037 2038 } 2039 2040 2041 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 2042 uint32_t ram_flags, const char *mem_path, 2043 off_t offset, Error **errp) 2044 { 2045 int fd; 2046 bool created; 2047 RAMBlock *block; 2048 2049 fd = file_ram_open(mem_path, memory_region_name(mr), 2050 !!(ram_flags & RAM_READONLY_FD), &created); 2051 if (fd < 0) { 2052 error_setg_errno(errp, -fd, "can't open backing store %s for guest RAM", 2053 mem_path); 2054 if (!(ram_flags & RAM_READONLY_FD) && !(ram_flags & RAM_SHARED) && 2055 fd == -EACCES) { 2056 /* 2057 * If we can open the file R/O (note: will never create a new file) 2058 * and we are dealing with a private mapping, there are still ways 2059 * to consume such files and get RAM instead of ROM. 2060 */ 2061 fd = file_ram_open(mem_path, memory_region_name(mr), true, 2062 &created); 2063 if (fd < 0) { 2064 return NULL; 2065 } 2066 assert(!created); 2067 close(fd); 2068 error_append_hint(errp, "Consider opening the backing store" 2069 " read-only but still creating writable RAM using" 2070 " '-object memory-backend-file,readonly=on,rom=off...'" 2071 " (see \"VM templating\" documentation)\n"); 2072 } 2073 return NULL; 2074 } 2075 2076 block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd, offset, 2077 false, errp); 2078 if (!block) { 2079 if (created) { 2080 unlink(mem_path); 2081 } 2082 close(fd); 2083 return NULL; 2084 } 2085 2086 return block; 2087 } 2088 #endif 2089 2090 #ifdef CONFIG_POSIX 2091 /* 2092 * Create MAP_SHARED RAMBlocks by mmap'ing a file descriptor, so it can be 2093 * shared with another process if CPR is being used. Use memfd if available 2094 * because it has no size limits, else use POSIX shm. 2095 */ 2096 static int qemu_ram_get_shared_fd(const char *name, bool *reused, Error **errp) 2097 { 2098 int fd = cpr_find_fd(name, 0); 2099 2100 if (fd >= 0) { 2101 *reused = true; 2102 return fd; 2103 } 2104 2105 if (qemu_memfd_check(0)) { 2106 fd = qemu_memfd_create(name, 0, 0, 0, 0, errp); 2107 } else { 2108 fd = qemu_shm_alloc(0, errp); 2109 } 2110 2111 if (fd >= 0) { 2112 cpr_save_fd(name, 0, fd); 2113 } 2114 *reused = false; 2115 return fd; 2116 } 2117 #endif 2118 2119 static 2120 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, 2121 qemu_ram_resize_cb resized, 2122 void *host, uint32_t ram_flags, 2123 MemoryRegion *mr, Error **errp) 2124 { 2125 RAMBlock *new_block; 2126 Error *local_err = NULL; 2127 int align, share_flags; 2128 2129 share_flags = ram_flags & (RAM_PRIVATE | RAM_SHARED); 2130 assert(share_flags != (RAM_SHARED | RAM_PRIVATE)); 2131 ram_flags &= ~RAM_PRIVATE; 2132 2133 assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC | 2134 RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0); 2135 assert(!host ^ (ram_flags & RAM_PREALLOC)); 2136 assert(max_size >= size); 2137 2138 #ifdef CONFIG_POSIX /* ignore RAM_SHARED for Windows */ 2139 if (!host) { 2140 if (!share_flags && current_machine->aux_ram_share) { 2141 ram_flags |= RAM_SHARED; 2142 } 2143 if (ram_flags & RAM_SHARED) { 2144 bool reused; 2145 g_autofree char *name = cpr_name(mr); 2146 int fd = qemu_ram_get_shared_fd(name, &reused, errp); 2147 2148 if (fd < 0) { 2149 return NULL; 2150 } 2151 2152 /* Use same alignment as qemu_anon_ram_alloc */ 2153 mr->align = QEMU_VMALLOC_ALIGN; 2154 2155 /* 2156 * This can fail if the shm mount size is too small, or alloc from 2157 * fd is not supported, but previous QEMU versions that called 2158 * qemu_anon_ram_alloc for anonymous shared memory could have 2159 * succeeded. Quietly fail and fall back. 2160 * 2161 * After cpr-transfer, new QEMU could create a memory region 2162 * with a larger max size than old, so pass reused to grow the 2163 * region if necessary. The extra space will be usable after a 2164 * guest reset. 2165 */ 2166 new_block = qemu_ram_alloc_from_fd(size, max_size, resized, mr, 2167 ram_flags, fd, 0, reused, NULL); 2168 if (new_block) { 2169 trace_qemu_ram_alloc_shared(name, new_block->used_length, 2170 new_block->max_length, fd, 2171 new_block->host); 2172 return new_block; 2173 } 2174 2175 cpr_delete_fd(name, 0); 2176 close(fd); 2177 /* fall back to anon allocation */ 2178 } 2179 } 2180 #endif 2181 2182 align = qemu_real_host_page_size(); 2183 align = MAX(align, TARGET_PAGE_SIZE); 2184 size = ROUND_UP(size, align); 2185 max_size = ROUND_UP(max_size, align); 2186 2187 new_block = g_malloc0(sizeof(*new_block)); 2188 new_block->mr = mr; 2189 new_block->resized = resized; 2190 new_block->used_length = size; 2191 new_block->max_length = max_size; 2192 new_block->fd = -1; 2193 new_block->guest_memfd = -1; 2194 new_block->page_size = qemu_real_host_page_size(); 2195 new_block->host = host; 2196 new_block->flags = ram_flags; 2197 ram_block_add(new_block, &local_err); 2198 if (local_err) { 2199 g_free(new_block); 2200 error_propagate(errp, local_err); 2201 return NULL; 2202 } 2203 return new_block; 2204 } 2205 2206 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 2207 MemoryRegion *mr, Error **errp) 2208 { 2209 return qemu_ram_alloc_internal(size, size, NULL, host, RAM_PREALLOC, mr, 2210 errp); 2211 } 2212 2213 RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, 2214 MemoryRegion *mr, Error **errp) 2215 { 2216 assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD | 2217 RAM_PRIVATE)) == 0); 2218 return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp); 2219 } 2220 2221 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, 2222 qemu_ram_resize_cb resized, 2223 MemoryRegion *mr, Error **errp) 2224 { 2225 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, 2226 RAM_RESIZEABLE, mr, errp); 2227 } 2228 2229 static void reclaim_ramblock(RAMBlock *block) 2230 { 2231 if (block->flags & RAM_PREALLOC) { 2232 ; 2233 } else if (xen_enabled()) { 2234 xen_invalidate_map_cache_entry(block->host); 2235 #ifndef _WIN32 2236 } else if (block->fd >= 0) { 2237 qemu_ram_munmap(block->fd, block->host, block->max_length); 2238 close(block->fd); 2239 #endif 2240 } else { 2241 qemu_anon_ram_free(block->host, block->max_length); 2242 } 2243 2244 if (block->guest_memfd >= 0) { 2245 close(block->guest_memfd); 2246 ram_block_discard_require(false); 2247 } 2248 2249 g_free(block); 2250 } 2251 2252 void qemu_ram_free(RAMBlock *block) 2253 { 2254 g_autofree char *name = NULL; 2255 2256 if (!block) { 2257 return; 2258 } 2259 2260 if (block->host) { 2261 ram_block_notify_remove(block->host, block->used_length, 2262 block->max_length); 2263 } 2264 2265 qemu_mutex_lock_ramlist(); 2266 name = cpr_name(block->mr); 2267 cpr_delete_fd(name, 0); 2268 QLIST_REMOVE_RCU(block, next); 2269 ram_list.mru_block = NULL; 2270 /* Write list before version */ 2271 smp_wmb(); 2272 ram_list.version++; 2273 call_rcu(block, reclaim_ramblock, rcu); 2274 qemu_mutex_unlock_ramlist(); 2275 } 2276 2277 #ifndef _WIN32 2278 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) 2279 { 2280 RAMBlock *block; 2281 ram_addr_t offset; 2282 int flags; 2283 void *area, *vaddr; 2284 int prot; 2285 2286 RAMBLOCK_FOREACH(block) { 2287 offset = addr - block->offset; 2288 if (offset < block->max_length) { 2289 vaddr = ramblock_ptr(block, offset); 2290 if (block->flags & RAM_PREALLOC) { 2291 ; 2292 } else if (xen_enabled()) { 2293 abort(); 2294 } else { 2295 flags = MAP_FIXED; 2296 flags |= block->flags & RAM_SHARED ? 2297 MAP_SHARED : MAP_PRIVATE; 2298 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; 2299 prot = PROT_READ; 2300 prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; 2301 if (block->fd >= 0) { 2302 area = mmap(vaddr, length, prot, flags, block->fd, 2303 offset + block->fd_offset); 2304 } else { 2305 flags |= MAP_ANONYMOUS; 2306 area = mmap(vaddr, length, prot, flags, -1, 0); 2307 } 2308 if (area != vaddr) { 2309 error_report("Could not remap addr: " 2310 RAM_ADDR_FMT "@" RAM_ADDR_FMT "", 2311 length, addr); 2312 exit(1); 2313 } 2314 memory_try_enable_merging(vaddr, length); 2315 qemu_ram_setup_dump(vaddr, length); 2316 } 2317 } 2318 } 2319 } 2320 #endif /* !_WIN32 */ 2321 2322 /* 2323 * Return a host pointer to guest's ram. 2324 * For Xen, foreign mappings get created if they don't already exist. 2325 * 2326 * @block: block for the RAM to lookup (optional and may be NULL). 2327 * @addr: address within the memory region. 2328 * @size: pointer to requested size (optional and may be NULL). 2329 * size may get modified and return a value smaller than 2330 * what was requested. 2331 * @lock: wether to lock the mapping in xen-mapcache until invalidated. 2332 * @is_write: hint wether to map RW or RO in the xen-mapcache. 2333 * (optional and may always be set to true). 2334 * 2335 * Called within RCU critical section. 2336 */ 2337 static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr, 2338 hwaddr *size, bool lock, 2339 bool is_write) 2340 { 2341 hwaddr len = 0; 2342 2343 if (size && *size == 0) { 2344 return NULL; 2345 } 2346 2347 if (block == NULL) { 2348 block = qemu_get_ram_block(addr); 2349 addr -= block->offset; 2350 } 2351 if (size) { 2352 *size = MIN(*size, block->max_length - addr); 2353 len = *size; 2354 } 2355 2356 if (xen_enabled() && block->host == NULL) { 2357 /* We need to check if the requested address is in the RAM 2358 * because we don't want to map the entire memory in QEMU. 2359 * In that case just map the requested area. 2360 */ 2361 if (xen_mr_is_memory(block->mr)) { 2362 return xen_map_cache(block->mr, block->offset + addr, 2363 len, block->offset, 2364 lock, lock, is_write); 2365 } 2366 2367 block->host = xen_map_cache(block->mr, block->offset, 2368 block->max_length, 2369 block->offset, 2370 1, lock, is_write); 2371 } 2372 2373 return ramblock_ptr(block, addr); 2374 } 2375 2376 /* 2377 * Return a host pointer to ram allocated with qemu_ram_alloc. 2378 * This should not be used for general purpose DMA. Use address_space_map 2379 * or address_space_rw instead. For local memory (e.g. video ram) that the 2380 * device owns, use memory_region_get_ram_ptr. 2381 * 2382 * Called within RCU critical section. 2383 */ 2384 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) 2385 { 2386 return qemu_ram_ptr_length(ram_block, addr, NULL, false, true); 2387 } 2388 2389 /* Return the offset of a hostpointer within a ramblock */ 2390 ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) 2391 { 2392 ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; 2393 assert((uintptr_t)host >= (uintptr_t)rb->host); 2394 assert(res < rb->max_length); 2395 2396 return res; 2397 } 2398 2399 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, 2400 ram_addr_t *offset) 2401 { 2402 RAMBlock *block; 2403 uint8_t *host = ptr; 2404 2405 if (xen_enabled()) { 2406 ram_addr_t ram_addr; 2407 RCU_READ_LOCK_GUARD(); 2408 ram_addr = xen_ram_addr_from_mapcache(ptr); 2409 if (ram_addr == RAM_ADDR_INVALID) { 2410 return NULL; 2411 } 2412 2413 block = qemu_get_ram_block(ram_addr); 2414 if (block) { 2415 *offset = ram_addr - block->offset; 2416 } 2417 return block; 2418 } 2419 2420 RCU_READ_LOCK_GUARD(); 2421 block = qatomic_rcu_read(&ram_list.mru_block); 2422 if (block && block->host && host - block->host < block->max_length) { 2423 goto found; 2424 } 2425 2426 RAMBLOCK_FOREACH(block) { 2427 /* This case append when the block is not mapped. */ 2428 if (block->host == NULL) { 2429 continue; 2430 } 2431 if (host - block->host < block->max_length) { 2432 goto found; 2433 } 2434 } 2435 2436 return NULL; 2437 2438 found: 2439 *offset = (host - block->host); 2440 if (round_offset) { 2441 *offset &= TARGET_PAGE_MASK; 2442 } 2443 return block; 2444 } 2445 2446 /* 2447 * Finds the named RAMBlock 2448 * 2449 * name: The name of RAMBlock to find 2450 * 2451 * Returns: RAMBlock (or NULL if not found) 2452 */ 2453 RAMBlock *qemu_ram_block_by_name(const char *name) 2454 { 2455 RAMBlock *block; 2456 2457 RAMBLOCK_FOREACH(block) { 2458 if (!strcmp(name, block->idstr)) { 2459 return block; 2460 } 2461 } 2462 2463 return NULL; 2464 } 2465 2466 /* 2467 * Some of the system routines need to translate from a host pointer 2468 * (typically a TLB entry) back to a ram offset. 2469 */ 2470 ram_addr_t qemu_ram_addr_from_host(void *ptr) 2471 { 2472 RAMBlock *block; 2473 ram_addr_t offset; 2474 2475 block = qemu_ram_block_from_host(ptr, false, &offset); 2476 if (!block) { 2477 return RAM_ADDR_INVALID; 2478 } 2479 2480 return block->offset + offset; 2481 } 2482 2483 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 2484 { 2485 ram_addr_t ram_addr; 2486 2487 ram_addr = qemu_ram_addr_from_host(ptr); 2488 if (ram_addr == RAM_ADDR_INVALID) { 2489 error_report("Bad ram pointer %p", ptr); 2490 abort(); 2491 } 2492 return ram_addr; 2493 } 2494 2495 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 2496 MemTxAttrs attrs, void *buf, hwaddr len); 2497 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 2498 const void *buf, hwaddr len); 2499 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 2500 bool is_write, MemTxAttrs attrs); 2501 2502 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, 2503 unsigned len, MemTxAttrs attrs) 2504 { 2505 subpage_t *subpage = opaque; 2506 uint8_t buf[8]; 2507 MemTxResult res; 2508 2509 #if defined(DEBUG_SUBPAGE) 2510 printf("%s: subpage %p len %u addr " HWADDR_FMT_plx "\n", __func__, 2511 subpage, len, addr); 2512 #endif 2513 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len); 2514 if (res) { 2515 return res; 2516 } 2517 *data = ldn_p(buf, len); 2518 return MEMTX_OK; 2519 } 2520 2521 static MemTxResult subpage_write(void *opaque, hwaddr addr, 2522 uint64_t value, unsigned len, MemTxAttrs attrs) 2523 { 2524 subpage_t *subpage = opaque; 2525 uint8_t buf[8]; 2526 2527 #if defined(DEBUG_SUBPAGE) 2528 printf("%s: subpage %p len %u addr " HWADDR_FMT_plx 2529 " value %"PRIx64"\n", 2530 __func__, subpage, len, addr, value); 2531 #endif 2532 stn_p(buf, len, value); 2533 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len); 2534 } 2535 2536 static bool subpage_accepts(void *opaque, hwaddr addr, 2537 unsigned len, bool is_write, 2538 MemTxAttrs attrs) 2539 { 2540 subpage_t *subpage = opaque; 2541 #if defined(DEBUG_SUBPAGE) 2542 printf("%s: subpage %p %c len %u addr " HWADDR_FMT_plx "\n", 2543 __func__, subpage, is_write ? 'w' : 'r', len, addr); 2544 #endif 2545 2546 return flatview_access_valid(subpage->fv, addr + subpage->base, 2547 len, is_write, attrs); 2548 } 2549 2550 static const MemoryRegionOps subpage_ops = { 2551 .read_with_attrs = subpage_read, 2552 .write_with_attrs = subpage_write, 2553 .impl.min_access_size = 1, 2554 .impl.max_access_size = 8, 2555 .valid.min_access_size = 1, 2556 .valid.max_access_size = 8, 2557 .valid.accepts = subpage_accepts, 2558 .endianness = DEVICE_NATIVE_ENDIAN, 2559 }; 2560 2561 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 2562 uint16_t section) 2563 { 2564 int idx, eidx; 2565 2566 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) 2567 return -1; 2568 idx = SUBPAGE_IDX(start); 2569 eidx = SUBPAGE_IDX(end); 2570 #if defined(DEBUG_SUBPAGE) 2571 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", 2572 __func__, mmio, start, end, idx, eidx, section); 2573 #endif 2574 for (; idx <= eidx; idx++) { 2575 mmio->sub_section[idx] = section; 2576 } 2577 2578 return 0; 2579 } 2580 2581 static subpage_t *subpage_init(FlatView *fv, hwaddr base) 2582 { 2583 subpage_t *mmio; 2584 2585 /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ 2586 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); 2587 mmio->fv = fv; 2588 mmio->base = base; 2589 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, 2590 NULL, TARGET_PAGE_SIZE); 2591 mmio->iomem.subpage = true; 2592 #if defined(DEBUG_SUBPAGE) 2593 printf("%s: %p base " HWADDR_FMT_plx " len %08x\n", __func__, 2594 mmio, base, TARGET_PAGE_SIZE); 2595 #endif 2596 2597 return mmio; 2598 } 2599 2600 static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr) 2601 { 2602 assert(fv); 2603 MemoryRegionSection section = { 2604 .fv = fv, 2605 .mr = mr, 2606 .offset_within_address_space = 0, 2607 .offset_within_region = 0, 2608 .size = int128_2_64(), 2609 }; 2610 2611 return phys_section_add(map, §ion); 2612 } 2613 2614 MemoryRegionSection *iotlb_to_section(CPUState *cpu, 2615 hwaddr index, MemTxAttrs attrs) 2616 { 2617 int asidx = cpu_asidx_from_attrs(cpu, attrs); 2618 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; 2619 AddressSpaceDispatch *d = cpuas->memory_dispatch; 2620 int section_index = index & ~TARGET_PAGE_MASK; 2621 MemoryRegionSection *ret; 2622 2623 assert(section_index < d->map.sections_nb); 2624 ret = d->map.sections + section_index; 2625 assert(ret->mr); 2626 assert(ret->mr->ops); 2627 2628 return ret; 2629 } 2630 2631 static void io_mem_init(void) 2632 { 2633 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, 2634 NULL, UINT64_MAX); 2635 } 2636 2637 AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) 2638 { 2639 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); 2640 uint16_t n; 2641 2642 n = dummy_section(&d->map, fv, &io_mem_unassigned); 2643 assert(n == PHYS_SECTION_UNASSIGNED); 2644 2645 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; 2646 2647 return d; 2648 } 2649 2650 void address_space_dispatch_free(AddressSpaceDispatch *d) 2651 { 2652 phys_sections_free(&d->map); 2653 g_free(d); 2654 } 2655 2656 static void do_nothing(CPUState *cpu, run_on_cpu_data d) 2657 { 2658 } 2659 2660 static void tcg_log_global_after_sync(MemoryListener *listener) 2661 { 2662 CPUAddressSpace *cpuas; 2663 2664 /* Wait for the CPU to end the current TB. This avoids the following 2665 * incorrect race: 2666 * 2667 * vCPU migration 2668 * ---------------------- ------------------------- 2669 * TLB check -> slow path 2670 * notdirty_mem_write 2671 * write to RAM 2672 * mark dirty 2673 * clear dirty flag 2674 * TLB check -> fast path 2675 * read memory 2676 * write to RAM 2677 * 2678 * by pushing the migration thread's memory read after the vCPU thread has 2679 * written the memory. 2680 */ 2681 if (replay_mode == REPLAY_MODE_NONE) { 2682 /* 2683 * VGA can make calls to this function while updating the screen. 2684 * In record/replay mode this causes a deadlock, because 2685 * run_on_cpu waits for rr mutex. Therefore no races are possible 2686 * in this case and no need for making run_on_cpu when 2687 * record/replay is enabled. 2688 */ 2689 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2690 run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); 2691 } 2692 } 2693 2694 static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data) 2695 { 2696 CPUAddressSpace *cpuas = data.host_ptr; 2697 2698 cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as); 2699 tlb_flush(cpu); 2700 } 2701 2702 static void tcg_commit(MemoryListener *listener) 2703 { 2704 CPUAddressSpace *cpuas; 2705 CPUState *cpu; 2706 2707 assert(tcg_enabled()); 2708 /* since each CPU stores ram addresses in its TLB cache, we must 2709 reset the modified entries */ 2710 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2711 cpu = cpuas->cpu; 2712 2713 /* 2714 * Defer changes to as->memory_dispatch until the cpu is quiescent. 2715 * Otherwise we race between (1) other cpu threads and (2) ongoing 2716 * i/o for the current cpu thread, with data cached by mmu_lookup(). 2717 * 2718 * In addition, queueing the work function will kick the cpu back to 2719 * the main loop, which will end the RCU critical section and reclaim 2720 * the memory data structures. 2721 * 2722 * That said, the listener is also called during realize, before 2723 * all of the tcg machinery for run-on is initialized: thus halt_cond. 2724 */ 2725 if (cpu->halt_cond) { 2726 async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas)); 2727 } else { 2728 tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas)); 2729 } 2730 } 2731 2732 static void memory_map_init(void) 2733 { 2734 system_memory = g_malloc(sizeof(*system_memory)); 2735 2736 memory_region_init(system_memory, NULL, "system", UINT64_MAX); 2737 address_space_init(&address_space_memory, system_memory, "memory"); 2738 2739 system_io = g_malloc(sizeof(*system_io)); 2740 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", 2741 65536); 2742 address_space_init(&address_space_io, system_io, "I/O"); 2743 } 2744 2745 MemoryRegion *get_system_memory(void) 2746 { 2747 return system_memory; 2748 } 2749 2750 MemoryRegion *get_system_io(void) 2751 { 2752 return system_io; 2753 } 2754 2755 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, 2756 hwaddr length) 2757 { 2758 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); 2759 ram_addr_t ramaddr = memory_region_get_ram_addr(mr); 2760 2761 /* We know we're only called for RAM MemoryRegions */ 2762 assert(ramaddr != RAM_ADDR_INVALID); 2763 addr += ramaddr; 2764 2765 /* No early return if dirty_log_mask is or becomes 0, because 2766 * cpu_physical_memory_set_dirty_range will still call 2767 * xen_modified_memory. 2768 */ 2769 if (dirty_log_mask) { 2770 dirty_log_mask = 2771 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); 2772 } 2773 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { 2774 assert(tcg_enabled()); 2775 tb_invalidate_phys_range(addr, addr + length - 1); 2776 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); 2777 } 2778 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); 2779 } 2780 2781 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size) 2782 { 2783 /* 2784 * In principle this function would work on other memory region types too, 2785 * but the ROM device use case is the only one where this operation is 2786 * necessary. Other memory regions should use the 2787 * address_space_read/write() APIs. 2788 */ 2789 assert(memory_region_is_romd(mr)); 2790 2791 invalidate_and_set_dirty(mr, addr, size); 2792 } 2793 2794 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) 2795 { 2796 unsigned access_size_max = mr->ops->valid.max_access_size; 2797 2798 /* Regions are assumed to support 1-4 byte accesses unless 2799 otherwise specified. */ 2800 if (access_size_max == 0) { 2801 access_size_max = 4; 2802 } 2803 2804 /* Bound the maximum access by the alignment of the address. */ 2805 if (!mr->ops->impl.unaligned) { 2806 unsigned align_size_max = addr & -addr; 2807 if (align_size_max != 0 && align_size_max < access_size_max) { 2808 access_size_max = align_size_max; 2809 } 2810 } 2811 2812 /* Don't attempt accesses larger than the maximum. */ 2813 if (l > access_size_max) { 2814 l = access_size_max; 2815 } 2816 l = pow2floor(l); 2817 2818 return l; 2819 } 2820 2821 bool prepare_mmio_access(MemoryRegion *mr) 2822 { 2823 bool release_lock = false; 2824 2825 if (!bql_locked()) { 2826 bql_lock(); 2827 release_lock = true; 2828 } 2829 if (mr->flush_coalesced_mmio) { 2830 qemu_flush_coalesced_mmio_buffer(); 2831 } 2832 2833 return release_lock; 2834 } 2835 2836 /** 2837 * flatview_access_allowed 2838 * @mr: #MemoryRegion to be accessed 2839 * @attrs: memory transaction attributes 2840 * @addr: address within that memory region 2841 * @len: the number of bytes to access 2842 * 2843 * Check if a memory transaction is allowed. 2844 * 2845 * Returns: true if transaction is allowed, false if denied. 2846 */ 2847 static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs, 2848 hwaddr addr, hwaddr len) 2849 { 2850 if (likely(!attrs.memory)) { 2851 return true; 2852 } 2853 if (memory_region_is_ram(mr)) { 2854 return true; 2855 } 2856 qemu_log_mask(LOG_INVALID_MEM, 2857 "Invalid access to non-RAM device at " 2858 "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", " 2859 "region '%s'\n", addr, len, memory_region_name(mr)); 2860 return false; 2861 } 2862 2863 static MemTxResult flatview_write_continue_step(MemTxAttrs attrs, 2864 const uint8_t *buf, 2865 hwaddr len, hwaddr mr_addr, 2866 hwaddr *l, MemoryRegion *mr) 2867 { 2868 if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { 2869 return MEMTX_ACCESS_ERROR; 2870 } 2871 2872 if (!memory_access_is_direct(mr, true)) { 2873 uint64_t val; 2874 MemTxResult result; 2875 bool release_lock = prepare_mmio_access(mr); 2876 2877 *l = memory_access_size(mr, *l, mr_addr); 2878 /* 2879 * XXX: could force current_cpu to NULL to avoid 2880 * potential bugs 2881 */ 2882 2883 /* 2884 * Assure Coverity (and ourselves) that we are not going to OVERRUN 2885 * the buffer by following ldn_he_p(). 2886 */ 2887 #ifdef QEMU_STATIC_ANALYSIS 2888 assert((*l == 1 && len >= 1) || 2889 (*l == 2 && len >= 2) || 2890 (*l == 4 && len >= 4) || 2891 (*l == 8 && len >= 8)); 2892 #endif 2893 val = ldn_he_p(buf, *l); 2894 result = memory_region_dispatch_write(mr, mr_addr, val, 2895 size_memop(*l), attrs); 2896 if (release_lock) { 2897 bql_unlock(); 2898 } 2899 2900 return result; 2901 } else { 2902 /* RAM case */ 2903 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, 2904 false, true); 2905 2906 memmove(ram_ptr, buf, *l); 2907 invalidate_and_set_dirty(mr, mr_addr, *l); 2908 2909 return MEMTX_OK; 2910 } 2911 } 2912 2913 /* Called within RCU critical section. */ 2914 static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, 2915 MemTxAttrs attrs, 2916 const void *ptr, 2917 hwaddr len, hwaddr mr_addr, 2918 hwaddr l, MemoryRegion *mr) 2919 { 2920 MemTxResult result = MEMTX_OK; 2921 const uint8_t *buf = ptr; 2922 2923 for (;;) { 2924 result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, 2925 mr); 2926 2927 len -= l; 2928 buf += l; 2929 addr += l; 2930 2931 if (!len) { 2932 break; 2933 } 2934 2935 l = len; 2936 mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); 2937 } 2938 2939 return result; 2940 } 2941 2942 /* Called from RCU critical section. */ 2943 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 2944 const void *buf, hwaddr len) 2945 { 2946 hwaddr l; 2947 hwaddr mr_addr; 2948 MemoryRegion *mr; 2949 2950 l = len; 2951 mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); 2952 if (!flatview_access_allowed(mr, attrs, addr, len)) { 2953 return MEMTX_ACCESS_ERROR; 2954 } 2955 return flatview_write_continue(fv, addr, attrs, buf, len, 2956 mr_addr, l, mr); 2957 } 2958 2959 static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf, 2960 hwaddr len, hwaddr mr_addr, 2961 hwaddr *l, 2962 MemoryRegion *mr) 2963 { 2964 if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { 2965 return MEMTX_ACCESS_ERROR; 2966 } 2967 2968 if (!memory_access_is_direct(mr, false)) { 2969 /* I/O case */ 2970 uint64_t val; 2971 MemTxResult result; 2972 bool release_lock = prepare_mmio_access(mr); 2973 2974 *l = memory_access_size(mr, *l, mr_addr); 2975 result = memory_region_dispatch_read(mr, mr_addr, &val, size_memop(*l), 2976 attrs); 2977 2978 /* 2979 * Assure Coverity (and ourselves) that we are not going to OVERRUN 2980 * the buffer by following stn_he_p(). 2981 */ 2982 #ifdef QEMU_STATIC_ANALYSIS 2983 assert((*l == 1 && len >= 1) || 2984 (*l == 2 && len >= 2) || 2985 (*l == 4 && len >= 4) || 2986 (*l == 8 && len >= 8)); 2987 #endif 2988 stn_he_p(buf, *l, val); 2989 2990 if (release_lock) { 2991 bql_unlock(); 2992 } 2993 return result; 2994 } else { 2995 /* RAM case */ 2996 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, 2997 false, false); 2998 2999 memcpy(buf, ram_ptr, *l); 3000 3001 return MEMTX_OK; 3002 } 3003 } 3004 3005 /* Called within RCU critical section. */ 3006 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 3007 MemTxAttrs attrs, void *ptr, 3008 hwaddr len, hwaddr mr_addr, hwaddr l, 3009 MemoryRegion *mr) 3010 { 3011 MemTxResult result = MEMTX_OK; 3012 uint8_t *buf = ptr; 3013 3014 fuzz_dma_read_cb(addr, len, mr); 3015 for (;;) { 3016 result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); 3017 3018 len -= l; 3019 buf += l; 3020 addr += l; 3021 3022 if (!len) { 3023 break; 3024 } 3025 3026 l = len; 3027 mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); 3028 } 3029 3030 return result; 3031 } 3032 3033 /* Called from RCU critical section. */ 3034 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 3035 MemTxAttrs attrs, void *buf, hwaddr len) 3036 { 3037 hwaddr l; 3038 hwaddr mr_addr; 3039 MemoryRegion *mr; 3040 3041 l = len; 3042 mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); 3043 if (!flatview_access_allowed(mr, attrs, addr, len)) { 3044 return MEMTX_ACCESS_ERROR; 3045 } 3046 return flatview_read_continue(fv, addr, attrs, buf, len, 3047 mr_addr, l, mr); 3048 } 3049 3050 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 3051 MemTxAttrs attrs, void *buf, hwaddr len) 3052 { 3053 MemTxResult result = MEMTX_OK; 3054 FlatView *fv; 3055 3056 if (len > 0) { 3057 RCU_READ_LOCK_GUARD(); 3058 fv = address_space_to_flatview(as); 3059 result = flatview_read(fv, addr, attrs, buf, len); 3060 } 3061 3062 return result; 3063 } 3064 3065 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 3066 MemTxAttrs attrs, 3067 const void *buf, hwaddr len) 3068 { 3069 MemTxResult result = MEMTX_OK; 3070 FlatView *fv; 3071 3072 if (len > 0) { 3073 RCU_READ_LOCK_GUARD(); 3074 fv = address_space_to_flatview(as); 3075 result = flatview_write(fv, addr, attrs, buf, len); 3076 } 3077 3078 return result; 3079 } 3080 3081 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, 3082 void *buf, hwaddr len, bool is_write) 3083 { 3084 if (is_write) { 3085 return address_space_write(as, addr, attrs, buf, len); 3086 } else { 3087 return address_space_read_full(as, addr, attrs, buf, len); 3088 } 3089 } 3090 3091 MemTxResult address_space_set(AddressSpace *as, hwaddr addr, 3092 uint8_t c, hwaddr len, MemTxAttrs attrs) 3093 { 3094 #define FILLBUF_SIZE 512 3095 uint8_t fillbuf[FILLBUF_SIZE]; 3096 int l; 3097 MemTxResult error = MEMTX_OK; 3098 3099 memset(fillbuf, c, FILLBUF_SIZE); 3100 while (len > 0) { 3101 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; 3102 error |= address_space_write(as, addr, attrs, fillbuf, l); 3103 len -= l; 3104 addr += l; 3105 } 3106 3107 return error; 3108 } 3109 3110 void cpu_physical_memory_rw(hwaddr addr, void *buf, 3111 hwaddr len, bool is_write) 3112 { 3113 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, 3114 buf, len, is_write); 3115 } 3116 3117 enum write_rom_type { 3118 WRITE_DATA, 3119 FLUSH_CACHE, 3120 }; 3121 3122 static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, 3123 hwaddr addr, 3124 MemTxAttrs attrs, 3125 const void *ptr, 3126 hwaddr len, 3127 enum write_rom_type type) 3128 { 3129 hwaddr l; 3130 uint8_t *ram_ptr; 3131 hwaddr addr1; 3132 MemoryRegion *mr; 3133 const uint8_t *buf = ptr; 3134 3135 RCU_READ_LOCK_GUARD(); 3136 while (len > 0) { 3137 l = len; 3138 mr = address_space_translate(as, addr, &addr1, &l, true, attrs); 3139 3140 if (!(memory_region_is_ram(mr) || 3141 memory_region_is_romd(mr))) { 3142 l = memory_access_size(mr, l, addr1); 3143 } else { 3144 /* ROM/RAM case */ 3145 ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 3146 switch (type) { 3147 case WRITE_DATA: 3148 memcpy(ram_ptr, buf, l); 3149 invalidate_and_set_dirty(mr, addr1, l); 3150 break; 3151 case FLUSH_CACHE: 3152 flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l); 3153 break; 3154 } 3155 } 3156 len -= l; 3157 buf += l; 3158 addr += l; 3159 } 3160 return MEMTX_OK; 3161 } 3162 3163 /* used for ROM loading : can write in RAM and ROM */ 3164 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, 3165 MemTxAttrs attrs, 3166 const void *buf, hwaddr len) 3167 { 3168 return address_space_write_rom_internal(as, addr, attrs, 3169 buf, len, WRITE_DATA); 3170 } 3171 3172 void cpu_flush_icache_range(hwaddr start, hwaddr len) 3173 { 3174 /* 3175 * This function should do the same thing as an icache flush that was 3176 * triggered from within the guest. For TCG we are always cache coherent, 3177 * so there is no need to flush anything. For KVM / Xen we need to flush 3178 * the host's instruction cache at least. 3179 */ 3180 if (tcg_enabled()) { 3181 return; 3182 } 3183 3184 address_space_write_rom_internal(&address_space_memory, 3185 start, MEMTXATTRS_UNSPECIFIED, 3186 NULL, len, FLUSH_CACHE); 3187 } 3188 3189 /* 3190 * A magic value stored in the first 8 bytes of the bounce buffer struct. Used 3191 * to detect illegal pointers passed to address_space_unmap. 3192 */ 3193 #define BOUNCE_BUFFER_MAGIC 0xb4017ceb4ffe12ed 3194 3195 typedef struct { 3196 uint64_t magic; 3197 MemoryRegion *mr; 3198 hwaddr addr; 3199 size_t len; 3200 uint8_t buffer[]; 3201 } BounceBuffer; 3202 3203 static void 3204 address_space_unregister_map_client_do(AddressSpaceMapClient *client) 3205 { 3206 QLIST_REMOVE(client, link); 3207 g_free(client); 3208 } 3209 3210 static void address_space_notify_map_clients_locked(AddressSpace *as) 3211 { 3212 AddressSpaceMapClient *client; 3213 3214 while (!QLIST_EMPTY(&as->map_client_list)) { 3215 client = QLIST_FIRST(&as->map_client_list); 3216 qemu_bh_schedule(client->bh); 3217 address_space_unregister_map_client_do(client); 3218 } 3219 } 3220 3221 void address_space_register_map_client(AddressSpace *as, QEMUBH *bh) 3222 { 3223 AddressSpaceMapClient *client = g_malloc(sizeof(*client)); 3224 3225 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3226 client->bh = bh; 3227 QLIST_INSERT_HEAD(&as->map_client_list, client, link); 3228 /* Write map_client_list before reading bounce_buffer_size. */ 3229 smp_mb(); 3230 if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) { 3231 address_space_notify_map_clients_locked(as); 3232 } 3233 } 3234 3235 void cpu_exec_init_all(void) 3236 { 3237 qemu_mutex_init(&ram_list.mutex); 3238 /* The data structures we set up here depend on knowing the page size, 3239 * so no more changes can be made after this point. 3240 * In an ideal world, nothing we did before we had finished the 3241 * machine setup would care about the target page size, and we could 3242 * do this much later, rather than requiring board models to state 3243 * up front what their requirements are. 3244 */ 3245 finalize_target_page_bits(); 3246 io_mem_init(); 3247 memory_map_init(); 3248 } 3249 3250 void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh) 3251 { 3252 AddressSpaceMapClient *client; 3253 3254 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3255 QLIST_FOREACH(client, &as->map_client_list, link) { 3256 if (client->bh == bh) { 3257 address_space_unregister_map_client_do(client); 3258 break; 3259 } 3260 } 3261 } 3262 3263 static void address_space_notify_map_clients(AddressSpace *as) 3264 { 3265 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3266 address_space_notify_map_clients_locked(as); 3267 } 3268 3269 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 3270 bool is_write, MemTxAttrs attrs) 3271 { 3272 MemoryRegion *mr; 3273 hwaddr l, xlat; 3274 3275 while (len > 0) { 3276 l = len; 3277 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3278 if (!memory_access_is_direct(mr, is_write)) { 3279 l = memory_access_size(mr, l, addr); 3280 if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) { 3281 return false; 3282 } 3283 } 3284 3285 len -= l; 3286 addr += l; 3287 } 3288 return true; 3289 } 3290 3291 bool address_space_access_valid(AddressSpace *as, hwaddr addr, 3292 hwaddr len, bool is_write, 3293 MemTxAttrs attrs) 3294 { 3295 FlatView *fv; 3296 3297 RCU_READ_LOCK_GUARD(); 3298 fv = address_space_to_flatview(as); 3299 return flatview_access_valid(fv, addr, len, is_write, attrs); 3300 } 3301 3302 static hwaddr 3303 flatview_extend_translation(FlatView *fv, hwaddr addr, 3304 hwaddr target_len, 3305 MemoryRegion *mr, hwaddr base, hwaddr len, 3306 bool is_write, MemTxAttrs attrs) 3307 { 3308 hwaddr done = 0; 3309 hwaddr xlat; 3310 MemoryRegion *this_mr; 3311 3312 for (;;) { 3313 target_len -= len; 3314 addr += len; 3315 done += len; 3316 if (target_len == 0) { 3317 return done; 3318 } 3319 3320 len = target_len; 3321 this_mr = flatview_translate(fv, addr, &xlat, 3322 &len, is_write, attrs); 3323 if (this_mr != mr || xlat != base + done) { 3324 return done; 3325 } 3326 } 3327 } 3328 3329 /* Map a physical memory region into a host virtual address. 3330 * May map a subset of the requested range, given by and returned in *plen. 3331 * May return NULL if resources needed to perform the mapping are exhausted. 3332 * Use only for reads OR writes - not for read-modify-write operations. 3333 * Use address_space_register_map_client() to know when retrying the map 3334 * operation is likely to succeed. 3335 */ 3336 void *address_space_map(AddressSpace *as, 3337 hwaddr addr, 3338 hwaddr *plen, 3339 bool is_write, 3340 MemTxAttrs attrs) 3341 { 3342 hwaddr len = *plen; 3343 hwaddr l, xlat; 3344 MemoryRegion *mr; 3345 FlatView *fv; 3346 3347 trace_address_space_map(as, addr, len, is_write, *(uint32_t *) &attrs); 3348 3349 if (len == 0) { 3350 return NULL; 3351 } 3352 3353 l = len; 3354 RCU_READ_LOCK_GUARD(); 3355 fv = address_space_to_flatview(as); 3356 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3357 3358 if (!memory_access_is_direct(mr, is_write)) { 3359 size_t used = qatomic_read(&as->bounce_buffer_size); 3360 for (;;) { 3361 hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l); 3362 size_t new_size = used + alloc; 3363 size_t actual = 3364 qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size); 3365 if (actual == used) { 3366 l = alloc; 3367 break; 3368 } 3369 used = actual; 3370 } 3371 3372 if (l == 0) { 3373 *plen = 0; 3374 return NULL; 3375 } 3376 3377 BounceBuffer *bounce = g_malloc0(l + sizeof(BounceBuffer)); 3378 bounce->magic = BOUNCE_BUFFER_MAGIC; 3379 memory_region_ref(mr); 3380 bounce->mr = mr; 3381 bounce->addr = addr; 3382 bounce->len = l; 3383 3384 if (!is_write) { 3385 flatview_read(fv, addr, attrs, 3386 bounce->buffer, l); 3387 } 3388 3389 *plen = l; 3390 return bounce->buffer; 3391 } 3392 3393 memory_region_ref(mr); 3394 *plen = flatview_extend_translation(fv, addr, len, mr, xlat, 3395 l, is_write, attrs); 3396 fuzz_dma_read_cb(addr, *plen, mr); 3397 return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true, is_write); 3398 } 3399 3400 /* Unmaps a memory region previously mapped by address_space_map(). 3401 * Will also mark the memory as dirty if is_write is true. access_len gives 3402 * the amount of memory that was actually read or written by the caller. 3403 */ 3404 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 3405 bool is_write, hwaddr access_len) 3406 { 3407 MemoryRegion *mr; 3408 ram_addr_t addr1; 3409 3410 mr = memory_region_from_host(buffer, &addr1); 3411 if (mr != NULL) { 3412 if (is_write) { 3413 invalidate_and_set_dirty(mr, addr1, access_len); 3414 } 3415 if (xen_enabled()) { 3416 xen_invalidate_map_cache_entry(buffer); 3417 } 3418 memory_region_unref(mr); 3419 return; 3420 } 3421 3422 3423 BounceBuffer *bounce = container_of(buffer, BounceBuffer, buffer); 3424 assert(bounce->magic == BOUNCE_BUFFER_MAGIC); 3425 3426 if (is_write) { 3427 address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED, 3428 bounce->buffer, access_len); 3429 } 3430 3431 qatomic_sub(&as->bounce_buffer_size, bounce->len); 3432 bounce->magic = ~BOUNCE_BUFFER_MAGIC; 3433 memory_region_unref(bounce->mr); 3434 g_free(bounce); 3435 /* Write bounce_buffer_size before reading map_client_list. */ 3436 smp_mb(); 3437 address_space_notify_map_clients(as); 3438 } 3439 3440 void *cpu_physical_memory_map(hwaddr addr, 3441 hwaddr *plen, 3442 bool is_write) 3443 { 3444 return address_space_map(&address_space_memory, addr, plen, is_write, 3445 MEMTXATTRS_UNSPECIFIED); 3446 } 3447 3448 void cpu_physical_memory_unmap(void *buffer, hwaddr len, 3449 bool is_write, hwaddr access_len) 3450 { 3451 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); 3452 } 3453 3454 #define ARG1_DECL AddressSpace *as 3455 #define ARG1 as 3456 #define SUFFIX 3457 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) 3458 #define RCU_READ_LOCK(...) rcu_read_lock() 3459 #define RCU_READ_UNLOCK(...) rcu_read_unlock() 3460 #include "memory_ldst.c.inc" 3461 3462 int64_t address_space_cache_init(MemoryRegionCache *cache, 3463 AddressSpace *as, 3464 hwaddr addr, 3465 hwaddr len, 3466 bool is_write) 3467 { 3468 AddressSpaceDispatch *d; 3469 hwaddr l; 3470 MemoryRegion *mr; 3471 Int128 diff; 3472 3473 assert(len > 0); 3474 3475 l = len; 3476 cache->fv = address_space_get_flatview(as); 3477 d = flatview_to_dispatch(cache->fv); 3478 cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); 3479 3480 /* 3481 * cache->xlat is now relative to cache->mrs.mr, not to the section itself. 3482 * Take that into account to compute how many bytes are there between 3483 * cache->xlat and the end of the section. 3484 */ 3485 diff = int128_sub(cache->mrs.size, 3486 int128_make64(cache->xlat - cache->mrs.offset_within_region)); 3487 l = int128_get64(int128_min(diff, int128_make64(l))); 3488 3489 mr = cache->mrs.mr; 3490 memory_region_ref(mr); 3491 if (memory_access_is_direct(mr, is_write)) { 3492 /* We don't care about the memory attributes here as we're only 3493 * doing this if we found actual RAM, which behaves the same 3494 * regardless of attributes; so UNSPECIFIED is fine. 3495 */ 3496 l = flatview_extend_translation(cache->fv, addr, len, mr, 3497 cache->xlat, l, is_write, 3498 MEMTXATTRS_UNSPECIFIED); 3499 cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true, 3500 is_write); 3501 } else { 3502 cache->ptr = NULL; 3503 } 3504 3505 cache->len = l; 3506 cache->is_write = is_write; 3507 return l; 3508 } 3509 3510 void address_space_cache_invalidate(MemoryRegionCache *cache, 3511 hwaddr addr, 3512 hwaddr access_len) 3513 { 3514 assert(cache->is_write); 3515 if (likely(cache->ptr)) { 3516 invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len); 3517 } 3518 } 3519 3520 void address_space_cache_destroy(MemoryRegionCache *cache) 3521 { 3522 if (!cache->mrs.mr) { 3523 return; 3524 } 3525 3526 if (xen_enabled()) { 3527 xen_invalidate_map_cache_entry(cache->ptr); 3528 } 3529 memory_region_unref(cache->mrs.mr); 3530 flatview_unref(cache->fv); 3531 cache->mrs.mr = NULL; 3532 cache->fv = NULL; 3533 } 3534 3535 /* Called from RCU critical section. This function has the same 3536 * semantics as address_space_translate, but it only works on a 3537 * predefined range of a MemoryRegion that was mapped with 3538 * address_space_cache_init. 3539 */ 3540 static inline MemoryRegion *address_space_translate_cached( 3541 MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, 3542 hwaddr *plen, bool is_write, MemTxAttrs attrs) 3543 { 3544 MemoryRegionSection section; 3545 MemoryRegion *mr; 3546 IOMMUMemoryRegion *iommu_mr; 3547 AddressSpace *target_as; 3548 3549 assert(!cache->ptr); 3550 *xlat = addr + cache->xlat; 3551 3552 mr = cache->mrs.mr; 3553 iommu_mr = memory_region_get_iommu(mr); 3554 if (!iommu_mr) { 3555 /* MMIO region. */ 3556 return mr; 3557 } 3558 3559 section = address_space_translate_iommu(iommu_mr, xlat, plen, 3560 NULL, is_write, true, 3561 &target_as, attrs); 3562 return section.mr; 3563 } 3564 3565 /* Called within RCU critical section. */ 3566 static MemTxResult address_space_write_continue_cached(MemTxAttrs attrs, 3567 const void *ptr, 3568 hwaddr len, 3569 hwaddr mr_addr, 3570 hwaddr l, 3571 MemoryRegion *mr) 3572 { 3573 MemTxResult result = MEMTX_OK; 3574 const uint8_t *buf = ptr; 3575 3576 for (;;) { 3577 result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, 3578 mr); 3579 3580 len -= l; 3581 buf += l; 3582 mr_addr += l; 3583 3584 if (!len) { 3585 break; 3586 } 3587 3588 l = len; 3589 } 3590 3591 return result; 3592 } 3593 3594 /* Called within RCU critical section. */ 3595 static MemTxResult address_space_read_continue_cached(MemTxAttrs attrs, 3596 void *ptr, hwaddr len, 3597 hwaddr mr_addr, hwaddr l, 3598 MemoryRegion *mr) 3599 { 3600 MemTxResult result = MEMTX_OK; 3601 uint8_t *buf = ptr; 3602 3603 for (;;) { 3604 result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); 3605 len -= l; 3606 buf += l; 3607 mr_addr += l; 3608 3609 if (!len) { 3610 break; 3611 } 3612 l = len; 3613 } 3614 3615 return result; 3616 } 3617 3618 /* Called from RCU critical section. address_space_read_cached uses this 3619 * out of line function when the target is an MMIO or IOMMU region. 3620 */ 3621 MemTxResult 3622 address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3623 void *buf, hwaddr len) 3624 { 3625 hwaddr mr_addr, l; 3626 MemoryRegion *mr; 3627 3628 l = len; 3629 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false, 3630 MEMTXATTRS_UNSPECIFIED); 3631 return address_space_read_continue_cached(MEMTXATTRS_UNSPECIFIED, 3632 buf, len, mr_addr, l, mr); 3633 } 3634 3635 /* Called from RCU critical section. address_space_write_cached uses this 3636 * out of line function when the target is an MMIO or IOMMU region. 3637 */ 3638 MemTxResult 3639 address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3640 const void *buf, hwaddr len) 3641 { 3642 hwaddr mr_addr, l; 3643 MemoryRegion *mr; 3644 3645 l = len; 3646 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true, 3647 MEMTXATTRS_UNSPECIFIED); 3648 return address_space_write_continue_cached(MEMTXATTRS_UNSPECIFIED, 3649 buf, len, mr_addr, l, mr); 3650 } 3651 3652 #define ARG1_DECL MemoryRegionCache *cache 3653 #define ARG1 cache 3654 #define SUFFIX _cached_slow 3655 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) 3656 #define RCU_READ_LOCK() ((void)0) 3657 #define RCU_READ_UNLOCK() ((void)0) 3658 #include "memory_ldst.c.inc" 3659 3660 /* virtual memory access for debug (includes writing to ROM) */ 3661 int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, 3662 void *ptr, size_t len, bool is_write) 3663 { 3664 hwaddr phys_addr; 3665 vaddr l, page; 3666 uint8_t *buf = ptr; 3667 3668 cpu_synchronize_state(cpu); 3669 while (len > 0) { 3670 int asidx; 3671 MemTxAttrs attrs; 3672 MemTxResult res; 3673 3674 page = addr & TARGET_PAGE_MASK; 3675 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); 3676 asidx = cpu_asidx_from_attrs(cpu, attrs); 3677 /* if no physical page mapped, return an error */ 3678 if (phys_addr == -1) 3679 return -1; 3680 l = (page + TARGET_PAGE_SIZE) - addr; 3681 if (l > len) 3682 l = len; 3683 phys_addr += (addr & ~TARGET_PAGE_MASK); 3684 if (is_write) { 3685 res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, 3686 attrs, buf, l); 3687 } else { 3688 res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr, 3689 attrs, buf, l); 3690 } 3691 if (res != MEMTX_OK) { 3692 return -1; 3693 } 3694 len -= l; 3695 buf += l; 3696 addr += l; 3697 } 3698 return 0; 3699 } 3700 3701 bool cpu_physical_memory_is_io(hwaddr phys_addr) 3702 { 3703 MemoryRegion*mr; 3704 hwaddr l = 1; 3705 3706 RCU_READ_LOCK_GUARD(); 3707 mr = address_space_translate(&address_space_memory, 3708 phys_addr, &phys_addr, &l, false, 3709 MEMTXATTRS_UNSPECIFIED); 3710 3711 return !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); 3712 } 3713 3714 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) 3715 { 3716 RAMBlock *block; 3717 int ret = 0; 3718 3719 RCU_READ_LOCK_GUARD(); 3720 RAMBLOCK_FOREACH(block) { 3721 ret = func(block, opaque); 3722 if (ret) { 3723 break; 3724 } 3725 } 3726 return ret; 3727 } 3728 3729 /* 3730 * Unmap pages of memory from start to start+length such that 3731 * they a) read as 0, b) Trigger whatever fault mechanism 3732 * the OS provides for postcopy. 3733 * The pages must be unmapped by the end of the function. 3734 * Returns: 0 on success, none-0 on failure 3735 * 3736 */ 3737 int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) 3738 { 3739 int ret = -1; 3740 3741 uint8_t *host_startaddr = rb->host + start; 3742 3743 if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { 3744 error_report("%s: Unaligned start address: %p", 3745 __func__, host_startaddr); 3746 goto err; 3747 } 3748 3749 if ((start + length) <= rb->max_length) { 3750 bool need_madvise, need_fallocate; 3751 if (!QEMU_IS_ALIGNED(length, rb->page_size)) { 3752 error_report("%s: Unaligned length: %zx", __func__, length); 3753 goto err; 3754 } 3755 3756 errno = ENOTSUP; /* If we are missing MADVISE etc */ 3757 3758 /* The logic here is messy; 3759 * madvise DONTNEED fails for hugepages 3760 * fallocate works on hugepages and shmem 3761 * shared anonymous memory requires madvise REMOVE 3762 */ 3763 need_madvise = (rb->page_size == qemu_real_host_page_size()); 3764 need_fallocate = rb->fd != -1; 3765 if (need_fallocate) { 3766 /* For a file, this causes the area of the file to be zero'd 3767 * if read, and for hugetlbfs also causes it to be unmapped 3768 * so a userfault will trigger. 3769 */ 3770 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 3771 /* 3772 * fallocate() will fail with readonly files. Let's print a 3773 * proper error message. 3774 */ 3775 if (rb->flags & RAM_READONLY_FD) { 3776 error_report("%s: Discarding RAM with readonly files is not" 3777 " supported", __func__); 3778 goto err; 3779 3780 } 3781 /* 3782 * We'll discard data from the actual file, even though we only 3783 * have a MAP_PRIVATE mapping, possibly messing with other 3784 * MAP_PRIVATE/MAP_SHARED mappings. There is no easy way to 3785 * change that behavior whithout violating the promised 3786 * semantics of ram_block_discard_range(). 3787 * 3788 * Only warn, because it works as long as nobody else uses that 3789 * file. 3790 */ 3791 if (!qemu_ram_is_shared(rb)) { 3792 warn_report_once("%s: Discarding RAM" 3793 " in private file mappings is possibly" 3794 " dangerous, because it will modify the" 3795 " underlying file and will affect other" 3796 " users of the file", __func__); 3797 } 3798 3799 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 3800 start, length); 3801 if (ret) { 3802 ret = -errno; 3803 error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)", 3804 __func__, rb->idstr, start, length, ret); 3805 goto err; 3806 } 3807 #else 3808 ret = -ENOSYS; 3809 error_report("%s: fallocate not available/file" 3810 "%s:%" PRIx64 " +%zx (%d)", 3811 __func__, rb->idstr, start, length, ret); 3812 goto err; 3813 #endif 3814 } 3815 if (need_madvise) { 3816 /* For normal RAM this causes it to be unmapped, 3817 * for shared memory it causes the local mapping to disappear 3818 * and to fall back on the file contents (which we just 3819 * fallocate'd away). 3820 */ 3821 #if defined(CONFIG_MADVISE) 3822 if (qemu_ram_is_shared(rb) && rb->fd < 0) { 3823 ret = madvise(host_startaddr, length, QEMU_MADV_REMOVE); 3824 } else { 3825 ret = madvise(host_startaddr, length, QEMU_MADV_DONTNEED); 3826 } 3827 if (ret) { 3828 ret = -errno; 3829 error_report("%s: Failed to discard range " 3830 "%s:%" PRIx64 " +%zx (%d)", 3831 __func__, rb->idstr, start, length, ret); 3832 goto err; 3833 } 3834 #else 3835 ret = -ENOSYS; 3836 error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)", 3837 __func__, rb->idstr, start, length, ret); 3838 goto err; 3839 #endif 3840 } 3841 trace_ram_block_discard_range(rb->idstr, host_startaddr, length, 3842 need_madvise, need_fallocate, ret); 3843 } else { 3844 error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", 3845 __func__, rb->idstr, start, length, rb->max_length); 3846 } 3847 3848 err: 3849 return ret; 3850 } 3851 3852 int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, 3853 size_t length) 3854 { 3855 int ret = -1; 3856 3857 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 3858 ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 3859 start, length); 3860 3861 if (ret) { 3862 ret = -errno; 3863 error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)", 3864 __func__, rb->idstr, start, length, ret); 3865 } 3866 #else 3867 ret = -ENOSYS; 3868 error_report("%s: fallocate not available %s:%" PRIx64 " +%zx (%d)", 3869 __func__, rb->idstr, start, length, ret); 3870 #endif 3871 3872 return ret; 3873 } 3874 3875 bool ramblock_is_pmem(RAMBlock *rb) 3876 { 3877 return rb->flags & RAM_PMEM; 3878 } 3879 3880 static void mtree_print_phys_entries(int start, int end, int skip, int ptr) 3881 { 3882 if (start == end - 1) { 3883 qemu_printf("\t%3d ", start); 3884 } else { 3885 qemu_printf("\t%3d..%-3d ", start, end - 1); 3886 } 3887 qemu_printf(" skip=%d ", skip); 3888 if (ptr == PHYS_MAP_NODE_NIL) { 3889 qemu_printf(" ptr=NIL"); 3890 } else if (!skip) { 3891 qemu_printf(" ptr=#%d", ptr); 3892 } else { 3893 qemu_printf(" ptr=[%d]", ptr); 3894 } 3895 qemu_printf("\n"); 3896 } 3897 3898 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ 3899 int128_sub((size), int128_one())) : 0) 3900 3901 void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) 3902 { 3903 int i; 3904 3905 qemu_printf(" Dispatch\n"); 3906 qemu_printf(" Physical sections\n"); 3907 3908 for (i = 0; i < d->map.sections_nb; ++i) { 3909 MemoryRegionSection *s = d->map.sections + i; 3910 const char *names[] = { " [unassigned]", " [not dirty]", 3911 " [ROM]", " [watch]" }; 3912 3913 qemu_printf(" #%d @" HWADDR_FMT_plx ".." HWADDR_FMT_plx 3914 " %s%s%s%s%s", 3915 i, 3916 s->offset_within_address_space, 3917 s->offset_within_address_space + MR_SIZE(s->size), 3918 s->mr->name ? s->mr->name : "(noname)", 3919 i < ARRAY_SIZE(names) ? names[i] : "", 3920 s->mr == root ? " [ROOT]" : "", 3921 s == d->mru_section ? " [MRU]" : "", 3922 s->mr->is_iommu ? " [iommu]" : ""); 3923 3924 if (s->mr->alias) { 3925 qemu_printf(" alias=%s", s->mr->alias->name ? 3926 s->mr->alias->name : "noname"); 3927 } 3928 qemu_printf("\n"); 3929 } 3930 3931 qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n", 3932 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); 3933 for (i = 0; i < d->map.nodes_nb; ++i) { 3934 int j, jprev; 3935 PhysPageEntry prev; 3936 Node *n = d->map.nodes + i; 3937 3938 qemu_printf(" [%d]\n", i); 3939 3940 for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) { 3941 PhysPageEntry *pe = *n + j; 3942 3943 if (pe->ptr == prev.ptr && pe->skip == prev.skip) { 3944 continue; 3945 } 3946 3947 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 3948 3949 jprev = j; 3950 prev = *pe; 3951 } 3952 3953 if (jprev != ARRAY_SIZE(*n)) { 3954 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 3955 } 3956 } 3957 } 3958 3959 /* Require any discards to work. */ 3960 static unsigned int ram_block_discard_required_cnt; 3961 /* Require only coordinated discards to work. */ 3962 static unsigned int ram_block_coordinated_discard_required_cnt; 3963 /* Disable any discards. */ 3964 static unsigned int ram_block_discard_disabled_cnt; 3965 /* Disable only uncoordinated discards. */ 3966 static unsigned int ram_block_uncoordinated_discard_disabled_cnt; 3967 static QemuMutex ram_block_discard_disable_mutex; 3968 3969 static void ram_block_discard_disable_mutex_lock(void) 3970 { 3971 static gsize initialized; 3972 3973 if (g_once_init_enter(&initialized)) { 3974 qemu_mutex_init(&ram_block_discard_disable_mutex); 3975 g_once_init_leave(&initialized, 1); 3976 } 3977 qemu_mutex_lock(&ram_block_discard_disable_mutex); 3978 } 3979 3980 static void ram_block_discard_disable_mutex_unlock(void) 3981 { 3982 qemu_mutex_unlock(&ram_block_discard_disable_mutex); 3983 } 3984 3985 int ram_block_discard_disable(bool state) 3986 { 3987 int ret = 0; 3988 3989 ram_block_discard_disable_mutex_lock(); 3990 if (!state) { 3991 ram_block_discard_disabled_cnt--; 3992 } else if (ram_block_discard_required_cnt || 3993 ram_block_coordinated_discard_required_cnt) { 3994 ret = -EBUSY; 3995 } else { 3996 ram_block_discard_disabled_cnt++; 3997 } 3998 ram_block_discard_disable_mutex_unlock(); 3999 return ret; 4000 } 4001 4002 int ram_block_uncoordinated_discard_disable(bool state) 4003 { 4004 int ret = 0; 4005 4006 ram_block_discard_disable_mutex_lock(); 4007 if (!state) { 4008 ram_block_uncoordinated_discard_disabled_cnt--; 4009 } else if (ram_block_discard_required_cnt) { 4010 ret = -EBUSY; 4011 } else { 4012 ram_block_uncoordinated_discard_disabled_cnt++; 4013 } 4014 ram_block_discard_disable_mutex_unlock(); 4015 return ret; 4016 } 4017 4018 int ram_block_discard_require(bool state) 4019 { 4020 int ret = 0; 4021 4022 ram_block_discard_disable_mutex_lock(); 4023 if (!state) { 4024 ram_block_discard_required_cnt--; 4025 } else if (ram_block_discard_disabled_cnt || 4026 ram_block_uncoordinated_discard_disabled_cnt) { 4027 ret = -EBUSY; 4028 } else { 4029 ram_block_discard_required_cnt++; 4030 } 4031 ram_block_discard_disable_mutex_unlock(); 4032 return ret; 4033 } 4034 4035 int ram_block_coordinated_discard_require(bool state) 4036 { 4037 int ret = 0; 4038 4039 ram_block_discard_disable_mutex_lock(); 4040 if (!state) { 4041 ram_block_coordinated_discard_required_cnt--; 4042 } else if (ram_block_discard_disabled_cnt) { 4043 ret = -EBUSY; 4044 } else { 4045 ram_block_coordinated_discard_required_cnt++; 4046 } 4047 ram_block_discard_disable_mutex_unlock(); 4048 return ret; 4049 } 4050 4051 bool ram_block_discard_is_disabled(void) 4052 { 4053 return qatomic_read(&ram_block_discard_disabled_cnt) || 4054 qatomic_read(&ram_block_uncoordinated_discard_disabled_cnt); 4055 } 4056 4057 bool ram_block_discard_is_required(void) 4058 { 4059 return qatomic_read(&ram_block_discard_required_cnt) || 4060 qatomic_read(&ram_block_coordinated_discard_required_cnt); 4061 } 4062