1 /* 2 * RAM allocation and memory access 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu-common.h" 22 #include "qapi/error.h" 23 24 #include "qemu/cutils.h" 25 #include "qemu/cacheflush.h" 26 27 #ifdef CONFIG_TCG 28 #include "hw/core/tcg-cpu-ops.h" 29 #endif /* CONFIG_TCG */ 30 31 #include "exec/exec-all.h" 32 #include "exec/target_page.h" 33 #include "hw/qdev-core.h" 34 #include "hw/qdev-properties.h" 35 #include "hw/boards.h" 36 #include "hw/xen/xen.h" 37 #include "sysemu/kvm.h" 38 #include "sysemu/tcg.h" 39 #include "sysemu/qtest.h" 40 #include "qemu/timer.h" 41 #include "qemu/config-file.h" 42 #include "qemu/error-report.h" 43 #include "qemu/qemu-print.h" 44 #include "exec/memory.h" 45 #include "exec/ioport.h" 46 #include "sysemu/dma.h" 47 #include "sysemu/hostmem.h" 48 #include "sysemu/hw_accel.h" 49 #include "sysemu/xen-mapcache.h" 50 #include "trace/trace-root.h" 51 52 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 53 #include <linux/falloc.h> 54 #endif 55 56 #include "qemu/rcu_queue.h" 57 #include "qemu/main-loop.h" 58 #include "exec/translate-all.h" 59 #include "sysemu/replay.h" 60 61 #include "exec/memory-internal.h" 62 #include "exec/ram_addr.h" 63 #include "exec/log.h" 64 65 #include "qemu/pmem.h" 66 67 #include "migration/vmstate.h" 68 69 #include "qemu/range.h" 70 #ifndef _WIN32 71 #include "qemu/mmap-alloc.h" 72 #endif 73 74 #include "monitor/monitor.h" 75 76 #ifdef CONFIG_LIBDAXCTL 77 #include <daxctl/libdaxctl.h> 78 #endif 79 80 //#define DEBUG_SUBPAGE 81 82 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes 83 * are protected by the ramlist lock. 84 */ 85 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; 86 87 static MemoryRegion *system_memory; 88 static MemoryRegion *system_io; 89 90 AddressSpace address_space_io; 91 AddressSpace address_space_memory; 92 93 static MemoryRegion io_mem_unassigned; 94 95 typedef struct PhysPageEntry PhysPageEntry; 96 97 struct PhysPageEntry { 98 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ 99 uint32_t skip : 6; 100 /* index into phys_sections (!skip) or phys_map_nodes (skip) */ 101 uint32_t ptr : 26; 102 }; 103 104 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) 105 106 /* Size of the L2 (and L3, etc) page tables. */ 107 #define ADDR_SPACE_BITS 64 108 109 #define P_L2_BITS 9 110 #define P_L2_SIZE (1 << P_L2_BITS) 111 112 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) 113 114 typedef PhysPageEntry Node[P_L2_SIZE]; 115 116 typedef struct PhysPageMap { 117 struct rcu_head rcu; 118 119 unsigned sections_nb; 120 unsigned sections_nb_alloc; 121 unsigned nodes_nb; 122 unsigned nodes_nb_alloc; 123 Node *nodes; 124 MemoryRegionSection *sections; 125 } PhysPageMap; 126 127 struct AddressSpaceDispatch { 128 MemoryRegionSection *mru_section; 129 /* This is a multi-level map on the physical address space. 130 * The bottom level has pointers to MemoryRegionSections. 131 */ 132 PhysPageEntry phys_map; 133 PhysPageMap map; 134 }; 135 136 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 137 typedef struct subpage_t { 138 MemoryRegion iomem; 139 FlatView *fv; 140 hwaddr base; 141 uint16_t sub_section[]; 142 } subpage_t; 143 144 #define PHYS_SECTION_UNASSIGNED 0 145 146 static void io_mem_init(void); 147 static void memory_map_init(void); 148 static void tcg_log_global_after_sync(MemoryListener *listener); 149 static void tcg_commit(MemoryListener *listener); 150 151 /** 152 * CPUAddressSpace: all the information a CPU needs about an AddressSpace 153 * @cpu: the CPU whose AddressSpace this is 154 * @as: the AddressSpace itself 155 * @memory_dispatch: its dispatch pointer (cached, RCU protected) 156 * @tcg_as_listener: listener for tracking changes to the AddressSpace 157 */ 158 struct CPUAddressSpace { 159 CPUState *cpu; 160 AddressSpace *as; 161 struct AddressSpaceDispatch *memory_dispatch; 162 MemoryListener tcg_as_listener; 163 }; 164 165 struct DirtyBitmapSnapshot { 166 ram_addr_t start; 167 ram_addr_t end; 168 unsigned long dirty[]; 169 }; 170 171 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) 172 { 173 static unsigned alloc_hint = 16; 174 if (map->nodes_nb + nodes > map->nodes_nb_alloc) { 175 map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes); 176 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); 177 alloc_hint = map->nodes_nb_alloc; 178 } 179 } 180 181 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) 182 { 183 unsigned i; 184 uint32_t ret; 185 PhysPageEntry e; 186 PhysPageEntry *p; 187 188 ret = map->nodes_nb++; 189 p = map->nodes[ret]; 190 assert(ret != PHYS_MAP_NODE_NIL); 191 assert(ret != map->nodes_nb_alloc); 192 193 e.skip = leaf ? 0 : 1; 194 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; 195 for (i = 0; i < P_L2_SIZE; ++i) { 196 memcpy(&p[i], &e, sizeof(e)); 197 } 198 return ret; 199 } 200 201 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, 202 hwaddr *index, uint64_t *nb, uint16_t leaf, 203 int level) 204 { 205 PhysPageEntry *p; 206 hwaddr step = (hwaddr)1 << (level * P_L2_BITS); 207 208 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { 209 lp->ptr = phys_map_node_alloc(map, level == 0); 210 } 211 p = map->nodes[lp->ptr]; 212 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; 213 214 while (*nb && lp < &p[P_L2_SIZE]) { 215 if ((*index & (step - 1)) == 0 && *nb >= step) { 216 lp->skip = 0; 217 lp->ptr = leaf; 218 *index += step; 219 *nb -= step; 220 } else { 221 phys_page_set_level(map, lp, index, nb, leaf, level - 1); 222 } 223 ++lp; 224 } 225 } 226 227 static void phys_page_set(AddressSpaceDispatch *d, 228 hwaddr index, uint64_t nb, 229 uint16_t leaf) 230 { 231 /* Wildly overreserve - it doesn't matter much. */ 232 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); 233 234 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); 235 } 236 237 /* Compact a non leaf page entry. Simply detect that the entry has a single child, 238 * and update our entry so we can skip it and go directly to the destination. 239 */ 240 static void phys_page_compact(PhysPageEntry *lp, Node *nodes) 241 { 242 unsigned valid_ptr = P_L2_SIZE; 243 int valid = 0; 244 PhysPageEntry *p; 245 int i; 246 247 if (lp->ptr == PHYS_MAP_NODE_NIL) { 248 return; 249 } 250 251 p = nodes[lp->ptr]; 252 for (i = 0; i < P_L2_SIZE; i++) { 253 if (p[i].ptr == PHYS_MAP_NODE_NIL) { 254 continue; 255 } 256 257 valid_ptr = i; 258 valid++; 259 if (p[i].skip) { 260 phys_page_compact(&p[i], nodes); 261 } 262 } 263 264 /* We can only compress if there's only one child. */ 265 if (valid != 1) { 266 return; 267 } 268 269 assert(valid_ptr < P_L2_SIZE); 270 271 /* Don't compress if it won't fit in the # of bits we have. */ 272 if (P_L2_LEVELS >= (1 << 6) && 273 lp->skip + p[valid_ptr].skip >= (1 << 6)) { 274 return; 275 } 276 277 lp->ptr = p[valid_ptr].ptr; 278 if (!p[valid_ptr].skip) { 279 /* If our only child is a leaf, make this a leaf. */ 280 /* By design, we should have made this node a leaf to begin with so we 281 * should never reach here. 282 * But since it's so simple to handle this, let's do it just in case we 283 * change this rule. 284 */ 285 lp->skip = 0; 286 } else { 287 lp->skip += p[valid_ptr].skip; 288 } 289 } 290 291 void address_space_dispatch_compact(AddressSpaceDispatch *d) 292 { 293 if (d->phys_map.skip) { 294 phys_page_compact(&d->phys_map, d->map.nodes); 295 } 296 } 297 298 static inline bool section_covers_addr(const MemoryRegionSection *section, 299 hwaddr addr) 300 { 301 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means 302 * the section must cover the entire address space. 303 */ 304 return int128_gethi(section->size) || 305 range_covers_byte(section->offset_within_address_space, 306 int128_getlo(section->size), addr); 307 } 308 309 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr) 310 { 311 PhysPageEntry lp = d->phys_map, *p; 312 Node *nodes = d->map.nodes; 313 MemoryRegionSection *sections = d->map.sections; 314 hwaddr index = addr >> TARGET_PAGE_BITS; 315 int i; 316 317 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { 318 if (lp.ptr == PHYS_MAP_NODE_NIL) { 319 return §ions[PHYS_SECTION_UNASSIGNED]; 320 } 321 p = nodes[lp.ptr]; 322 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; 323 } 324 325 if (section_covers_addr(§ions[lp.ptr], addr)) { 326 return §ions[lp.ptr]; 327 } else { 328 return §ions[PHYS_SECTION_UNASSIGNED]; 329 } 330 } 331 332 /* Called from RCU critical section */ 333 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, 334 hwaddr addr, 335 bool resolve_subpage) 336 { 337 MemoryRegionSection *section = qatomic_read(&d->mru_section); 338 subpage_t *subpage; 339 340 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || 341 !section_covers_addr(section, addr)) { 342 section = phys_page_find(d, addr); 343 qatomic_set(&d->mru_section, section); 344 } 345 if (resolve_subpage && section->mr->subpage) { 346 subpage = container_of(section->mr, subpage_t, iomem); 347 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; 348 } 349 return section; 350 } 351 352 /* Called from RCU critical section */ 353 static MemoryRegionSection * 354 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, 355 hwaddr *plen, bool resolve_subpage) 356 { 357 MemoryRegionSection *section; 358 MemoryRegion *mr; 359 Int128 diff; 360 361 section = address_space_lookup_region(d, addr, resolve_subpage); 362 /* Compute offset within MemoryRegionSection */ 363 addr -= section->offset_within_address_space; 364 365 /* Compute offset within MemoryRegion */ 366 *xlat = addr + section->offset_within_region; 367 368 mr = section->mr; 369 370 /* MMIO registers can be expected to perform full-width accesses based only 371 * on their address, without considering adjacent registers that could 372 * decode to completely different MemoryRegions. When such registers 373 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO 374 * regions overlap wildly. For this reason we cannot clamp the accesses 375 * here. 376 * 377 * If the length is small (as is the case for address_space_ldl/stl), 378 * everything works fine. If the incoming length is large, however, 379 * the caller really has to do the clamping through memory_access_size. 380 */ 381 if (memory_region_is_ram(mr)) { 382 diff = int128_sub(section->size, int128_make64(addr)); 383 *plen = int128_get64(int128_min(diff, int128_make64(*plen))); 384 } 385 return section; 386 } 387 388 /** 389 * address_space_translate_iommu - translate an address through an IOMMU 390 * memory region and then through the target address space. 391 * 392 * @iommu_mr: the IOMMU memory region that we start the translation from 393 * @addr: the address to be translated through the MMU 394 * @xlat: the translated address offset within the destination memory region. 395 * It cannot be %NULL. 396 * @plen_out: valid read/write length of the translated address. It 397 * cannot be %NULL. 398 * @page_mask_out: page mask for the translated address. This 399 * should only be meaningful for IOMMU translated 400 * addresses, since there may be huge pages that this bit 401 * would tell. It can be %NULL if we don't care about it. 402 * @is_write: whether the translation operation is for write 403 * @is_mmio: whether this can be MMIO, set true if it can 404 * @target_as: the address space targeted by the IOMMU 405 * @attrs: transaction attributes 406 * 407 * This function is called from RCU critical section. It is the common 408 * part of flatview_do_translate and address_space_translate_cached. 409 */ 410 static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr, 411 hwaddr *xlat, 412 hwaddr *plen_out, 413 hwaddr *page_mask_out, 414 bool is_write, 415 bool is_mmio, 416 AddressSpace **target_as, 417 MemTxAttrs attrs) 418 { 419 MemoryRegionSection *section; 420 hwaddr page_mask = (hwaddr)-1; 421 422 do { 423 hwaddr addr = *xlat; 424 IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 425 int iommu_idx = 0; 426 IOMMUTLBEntry iotlb; 427 428 if (imrc->attrs_to_index) { 429 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 430 } 431 432 iotlb = imrc->translate(iommu_mr, addr, is_write ? 433 IOMMU_WO : IOMMU_RO, iommu_idx); 434 435 if (!(iotlb.perm & (1 << is_write))) { 436 goto unassigned; 437 } 438 439 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 440 | (addr & iotlb.addr_mask)); 441 page_mask &= iotlb.addr_mask; 442 *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); 443 *target_as = iotlb.target_as; 444 445 section = address_space_translate_internal( 446 address_space_to_dispatch(iotlb.target_as), addr, xlat, 447 plen_out, is_mmio); 448 449 iommu_mr = memory_region_get_iommu(section->mr); 450 } while (unlikely(iommu_mr)); 451 452 if (page_mask_out) { 453 *page_mask_out = page_mask; 454 } 455 return *section; 456 457 unassigned: 458 return (MemoryRegionSection) { .mr = &io_mem_unassigned }; 459 } 460 461 /** 462 * flatview_do_translate - translate an address in FlatView 463 * 464 * @fv: the flat view that we want to translate on 465 * @addr: the address to be translated in above address space 466 * @xlat: the translated address offset within memory region. It 467 * cannot be @NULL. 468 * @plen_out: valid read/write length of the translated address. It 469 * can be @NULL when we don't care about it. 470 * @page_mask_out: page mask for the translated address. This 471 * should only be meaningful for IOMMU translated 472 * addresses, since there may be huge pages that this bit 473 * would tell. It can be @NULL if we don't care about it. 474 * @is_write: whether the translation operation is for write 475 * @is_mmio: whether this can be MMIO, set true if it can 476 * @target_as: the address space targeted by the IOMMU 477 * @attrs: memory transaction attributes 478 * 479 * This function is called from RCU critical section 480 */ 481 static MemoryRegionSection flatview_do_translate(FlatView *fv, 482 hwaddr addr, 483 hwaddr *xlat, 484 hwaddr *plen_out, 485 hwaddr *page_mask_out, 486 bool is_write, 487 bool is_mmio, 488 AddressSpace **target_as, 489 MemTxAttrs attrs) 490 { 491 MemoryRegionSection *section; 492 IOMMUMemoryRegion *iommu_mr; 493 hwaddr plen = (hwaddr)(-1); 494 495 if (!plen_out) { 496 plen_out = &plen; 497 } 498 499 section = address_space_translate_internal( 500 flatview_to_dispatch(fv), addr, xlat, 501 plen_out, is_mmio); 502 503 iommu_mr = memory_region_get_iommu(section->mr); 504 if (unlikely(iommu_mr)) { 505 return address_space_translate_iommu(iommu_mr, xlat, 506 plen_out, page_mask_out, 507 is_write, is_mmio, 508 target_as, attrs); 509 } 510 if (page_mask_out) { 511 /* Not behind an IOMMU, use default page size. */ 512 *page_mask_out = ~TARGET_PAGE_MASK; 513 } 514 515 return *section; 516 } 517 518 /* Called from RCU critical section */ 519 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 520 bool is_write, MemTxAttrs attrs) 521 { 522 MemoryRegionSection section; 523 hwaddr xlat, page_mask; 524 525 /* 526 * This can never be MMIO, and we don't really care about plen, 527 * but page mask. 528 */ 529 section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat, 530 NULL, &page_mask, is_write, false, &as, 531 attrs); 532 533 /* Illegal translation */ 534 if (section.mr == &io_mem_unassigned) { 535 goto iotlb_fail; 536 } 537 538 /* Convert memory region offset into address space offset */ 539 xlat += section.offset_within_address_space - 540 section.offset_within_region; 541 542 return (IOMMUTLBEntry) { 543 .target_as = as, 544 .iova = addr & ~page_mask, 545 .translated_addr = xlat & ~page_mask, 546 .addr_mask = page_mask, 547 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */ 548 .perm = IOMMU_RW, 549 }; 550 551 iotlb_fail: 552 return (IOMMUTLBEntry) {0}; 553 } 554 555 /* Called from RCU critical section */ 556 MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, 557 hwaddr *plen, bool is_write, 558 MemTxAttrs attrs) 559 { 560 MemoryRegion *mr; 561 MemoryRegionSection section; 562 AddressSpace *as = NULL; 563 564 /* This can be MMIO, so setup MMIO bit. */ 565 section = flatview_do_translate(fv, addr, xlat, plen, NULL, 566 is_write, true, &as, attrs); 567 mr = section.mr; 568 569 if (xen_enabled() && memory_access_is_direct(mr, is_write)) { 570 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; 571 *plen = MIN(page, *plen); 572 } 573 574 return mr; 575 } 576 577 typedef struct TCGIOMMUNotifier { 578 IOMMUNotifier n; 579 MemoryRegion *mr; 580 CPUState *cpu; 581 int iommu_idx; 582 bool active; 583 } TCGIOMMUNotifier; 584 585 static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 586 { 587 TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n); 588 589 if (!notifier->active) { 590 return; 591 } 592 tlb_flush(notifier->cpu); 593 notifier->active = false; 594 /* We leave the notifier struct on the list to avoid reallocating it later. 595 * Generally the number of IOMMUs a CPU deals with will be small. 596 * In any case we can't unregister the iommu notifier from a notify 597 * callback. 598 */ 599 } 600 601 static void tcg_register_iommu_notifier(CPUState *cpu, 602 IOMMUMemoryRegion *iommu_mr, 603 int iommu_idx) 604 { 605 /* Make sure this CPU has an IOMMU notifier registered for this 606 * IOMMU/IOMMU index combination, so that we can flush its TLB 607 * when the IOMMU tells us the mappings we've cached have changed. 608 */ 609 MemoryRegion *mr = MEMORY_REGION(iommu_mr); 610 TCGIOMMUNotifier *notifier = NULL; 611 int i; 612 613 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 614 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 615 if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) { 616 break; 617 } 618 } 619 if (i == cpu->iommu_notifiers->len) { 620 /* Not found, add a new entry at the end of the array */ 621 cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1); 622 notifier = g_new0(TCGIOMMUNotifier, 1); 623 g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier; 624 625 notifier->mr = mr; 626 notifier->iommu_idx = iommu_idx; 627 notifier->cpu = cpu; 628 /* Rather than trying to register interest in the specific part 629 * of the iommu's address space that we've accessed and then 630 * expand it later as subsequent accesses touch more of it, we 631 * just register interest in the whole thing, on the assumption 632 * that iommu reconfiguration will be rare. 633 */ 634 iommu_notifier_init(¬ifier->n, 635 tcg_iommu_unmap_notify, 636 IOMMU_NOTIFIER_UNMAP, 637 0, 638 HWADDR_MAX, 639 iommu_idx); 640 memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, 641 &error_fatal); 642 } 643 644 if (!notifier->active) { 645 notifier->active = true; 646 } 647 } 648 649 void tcg_iommu_free_notifier_list(CPUState *cpu) 650 { 651 /* Destroy the CPU's notifier list */ 652 int i; 653 TCGIOMMUNotifier *notifier; 654 655 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 656 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 657 memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n); 658 g_free(notifier); 659 } 660 g_array_free(cpu->iommu_notifiers, true); 661 } 662 663 void tcg_iommu_init_notifier_list(CPUState *cpu) 664 { 665 cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *)); 666 } 667 668 /* Called from RCU critical section */ 669 MemoryRegionSection * 670 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, 671 hwaddr *xlat, hwaddr *plen, 672 MemTxAttrs attrs, int *prot) 673 { 674 MemoryRegionSection *section; 675 IOMMUMemoryRegion *iommu_mr; 676 IOMMUMemoryRegionClass *imrc; 677 IOMMUTLBEntry iotlb; 678 int iommu_idx; 679 AddressSpaceDispatch *d = 680 qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); 681 682 for (;;) { 683 section = address_space_translate_internal(d, addr, &addr, plen, false); 684 685 iommu_mr = memory_region_get_iommu(section->mr); 686 if (!iommu_mr) { 687 break; 688 } 689 690 imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 691 692 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 693 tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx); 694 /* We need all the permissions, so pass IOMMU_NONE so the IOMMU 695 * doesn't short-cut its translation table walk. 696 */ 697 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); 698 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 699 | (addr & iotlb.addr_mask)); 700 /* Update the caller's prot bits to remove permissions the IOMMU 701 * is giving us a failure response for. If we get down to no 702 * permissions left at all we can give up now. 703 */ 704 if (!(iotlb.perm & IOMMU_RO)) { 705 *prot &= ~(PAGE_READ | PAGE_EXEC); 706 } 707 if (!(iotlb.perm & IOMMU_WO)) { 708 *prot &= ~PAGE_WRITE; 709 } 710 711 if (!*prot) { 712 goto translate_fail; 713 } 714 715 d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as)); 716 } 717 718 assert(!memory_region_is_iommu(section->mr)); 719 *xlat = addr; 720 return section; 721 722 translate_fail: 723 return &d->map.sections[PHYS_SECTION_UNASSIGNED]; 724 } 725 726 void cpu_address_space_init(CPUState *cpu, int asidx, 727 const char *prefix, MemoryRegion *mr) 728 { 729 CPUAddressSpace *newas; 730 AddressSpace *as = g_new0(AddressSpace, 1); 731 char *as_name; 732 733 assert(mr); 734 as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index); 735 address_space_init(as, mr, as_name); 736 g_free(as_name); 737 738 /* Target code should have set num_ases before calling us */ 739 assert(asidx < cpu->num_ases); 740 741 if (asidx == 0) { 742 /* address space 0 gets the convenience alias */ 743 cpu->as = as; 744 } 745 746 /* KVM cannot currently support multiple address spaces. */ 747 assert(asidx == 0 || !kvm_enabled()); 748 749 if (!cpu->cpu_ases) { 750 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); 751 } 752 753 newas = &cpu->cpu_ases[asidx]; 754 newas->cpu = cpu; 755 newas->as = as; 756 if (tcg_enabled()) { 757 newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; 758 newas->tcg_as_listener.commit = tcg_commit; 759 newas->tcg_as_listener.name = "tcg"; 760 memory_listener_register(&newas->tcg_as_listener, as); 761 } 762 } 763 764 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) 765 { 766 /* Return the AddressSpace corresponding to the specified index */ 767 return cpu->cpu_ases[asidx].as; 768 } 769 770 /* Add a watchpoint. */ 771 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 772 int flags, CPUWatchpoint **watchpoint) 773 { 774 CPUWatchpoint *wp; 775 vaddr in_page; 776 777 /* forbid ranges which are empty or run off the end of the address space */ 778 if (len == 0 || (addr + len - 1) < addr) { 779 error_report("tried to set invalid watchpoint at %" 780 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); 781 return -EINVAL; 782 } 783 wp = g_malloc(sizeof(*wp)); 784 785 wp->vaddr = addr; 786 wp->len = len; 787 wp->flags = flags; 788 789 /* keep all GDB-injected watchpoints in front */ 790 if (flags & BP_GDB) { 791 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); 792 } else { 793 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); 794 } 795 796 in_page = -(addr | TARGET_PAGE_MASK); 797 if (len <= in_page) { 798 tlb_flush_page(cpu, addr); 799 } else { 800 tlb_flush(cpu); 801 } 802 803 if (watchpoint) 804 *watchpoint = wp; 805 return 0; 806 } 807 808 /* Remove a specific watchpoint. */ 809 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, 810 int flags) 811 { 812 CPUWatchpoint *wp; 813 814 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 815 if (addr == wp->vaddr && len == wp->len 816 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { 817 cpu_watchpoint_remove_by_ref(cpu, wp); 818 return 0; 819 } 820 } 821 return -ENOENT; 822 } 823 824 /* Remove a specific watchpoint by reference. */ 825 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) 826 { 827 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); 828 829 tlb_flush_page(cpu, watchpoint->vaddr); 830 831 g_free(watchpoint); 832 } 833 834 /* Remove all matching watchpoints. */ 835 void cpu_watchpoint_remove_all(CPUState *cpu, int mask) 836 { 837 CPUWatchpoint *wp, *next; 838 839 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { 840 if (wp->flags & mask) { 841 cpu_watchpoint_remove_by_ref(cpu, wp); 842 } 843 } 844 } 845 846 #ifdef CONFIG_TCG 847 /* Return true if this watchpoint address matches the specified 848 * access (ie the address range covered by the watchpoint overlaps 849 * partially or completely with the address range covered by the 850 * access). 851 */ 852 static inline bool watchpoint_address_matches(CPUWatchpoint *wp, 853 vaddr addr, vaddr len) 854 { 855 /* We know the lengths are non-zero, but a little caution is 856 * required to avoid errors in the case where the range ends 857 * exactly at the top of the address space and so addr + len 858 * wraps round to zero. 859 */ 860 vaddr wpend = wp->vaddr + wp->len - 1; 861 vaddr addrend = addr + len - 1; 862 863 return !(addr > wpend || wp->vaddr > addrend); 864 } 865 866 /* Return flags for watchpoints that match addr + prot. */ 867 int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) 868 { 869 CPUWatchpoint *wp; 870 int ret = 0; 871 872 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 873 if (watchpoint_address_matches(wp, addr, len)) { 874 ret |= wp->flags; 875 } 876 } 877 return ret; 878 } 879 880 /* Generate a debug exception if a watchpoint has been hit. */ 881 void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, 882 MemTxAttrs attrs, int flags, uintptr_t ra) 883 { 884 CPUClass *cc = CPU_GET_CLASS(cpu); 885 CPUWatchpoint *wp; 886 887 assert(tcg_enabled()); 888 if (cpu->watchpoint_hit) { 889 /* 890 * We re-entered the check after replacing the TB. 891 * Now raise the debug interrupt so that it will 892 * trigger after the current instruction. 893 */ 894 qemu_mutex_lock_iothread(); 895 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); 896 qemu_mutex_unlock_iothread(); 897 return; 898 } 899 900 if (cc->tcg_ops->adjust_watchpoint_address) { 901 /* this is currently used only by ARM BE32 */ 902 addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len); 903 } 904 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 905 if (watchpoint_address_matches(wp, addr, len) 906 && (wp->flags & flags)) { 907 if (replay_running_debug()) { 908 /* 909 * replay_breakpoint reads icount. 910 * Force recompile to succeed, because icount may 911 * be read only at the end of the block. 912 */ 913 if (!cpu->can_do_io) { 914 /* Force execution of one insn next time. */ 915 cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(cpu); 916 cpu_loop_exit_restore(cpu, ra); 917 } 918 /* 919 * Don't process the watchpoints when we are 920 * in a reverse debugging operation. 921 */ 922 replay_breakpoint(); 923 return; 924 } 925 if (flags == BP_MEM_READ) { 926 wp->flags |= BP_WATCHPOINT_HIT_READ; 927 } else { 928 wp->flags |= BP_WATCHPOINT_HIT_WRITE; 929 } 930 wp->hitaddr = MAX(addr, wp->vaddr); 931 wp->hitattrs = attrs; 932 933 if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint && 934 !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { 935 wp->flags &= ~BP_WATCHPOINT_HIT; 936 continue; 937 } 938 cpu->watchpoint_hit = wp; 939 940 mmap_lock(); 941 /* This call also restores vCPU state */ 942 tb_check_watchpoint(cpu, ra); 943 if (wp->flags & BP_STOP_BEFORE_ACCESS) { 944 cpu->exception_index = EXCP_DEBUG; 945 mmap_unlock(); 946 cpu_loop_exit(cpu); 947 } else { 948 /* Force execution of one insn next time. */ 949 cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(cpu); 950 mmap_unlock(); 951 cpu_loop_exit_noexc(cpu); 952 } 953 } else { 954 wp->flags &= ~BP_WATCHPOINT_HIT; 955 } 956 } 957 } 958 959 #endif /* CONFIG_TCG */ 960 961 /* Called from RCU critical section */ 962 static RAMBlock *qemu_get_ram_block(ram_addr_t addr) 963 { 964 RAMBlock *block; 965 966 block = qatomic_rcu_read(&ram_list.mru_block); 967 if (block && addr - block->offset < block->max_length) { 968 return block; 969 } 970 RAMBLOCK_FOREACH(block) { 971 if (addr - block->offset < block->max_length) { 972 goto found; 973 } 974 } 975 976 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 977 abort(); 978 979 found: 980 /* It is safe to write mru_block outside the iothread lock. This 981 * is what happens: 982 * 983 * mru_block = xxx 984 * rcu_read_unlock() 985 * xxx removed from list 986 * rcu_read_lock() 987 * read mru_block 988 * mru_block = NULL; 989 * call_rcu(reclaim_ramblock, xxx); 990 * rcu_read_unlock() 991 * 992 * qatomic_rcu_set is not needed here. The block was already published 993 * when it was placed into the list. Here we're just making an extra 994 * copy of the pointer. 995 */ 996 ram_list.mru_block = block; 997 return block; 998 } 999 1000 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) 1001 { 1002 CPUState *cpu; 1003 ram_addr_t start1; 1004 RAMBlock *block; 1005 ram_addr_t end; 1006 1007 assert(tcg_enabled()); 1008 end = TARGET_PAGE_ALIGN(start + length); 1009 start &= TARGET_PAGE_MASK; 1010 1011 RCU_READ_LOCK_GUARD(); 1012 block = qemu_get_ram_block(start); 1013 assert(block == qemu_get_ram_block(end - 1)); 1014 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); 1015 CPU_FOREACH(cpu) { 1016 tlb_reset_dirty(cpu, start1, length); 1017 } 1018 } 1019 1020 /* Note: start and end must be within the same ram block. */ 1021 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, 1022 ram_addr_t length, 1023 unsigned client) 1024 { 1025 DirtyMemoryBlocks *blocks; 1026 unsigned long end, page, start_page; 1027 bool dirty = false; 1028 RAMBlock *ramblock; 1029 uint64_t mr_offset, mr_size; 1030 1031 if (length == 0) { 1032 return false; 1033 } 1034 1035 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 1036 start_page = start >> TARGET_PAGE_BITS; 1037 page = start_page; 1038 1039 WITH_RCU_READ_LOCK_GUARD() { 1040 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 1041 ramblock = qemu_get_ram_block(start); 1042 /* Range sanity check on the ramblock */ 1043 assert(start >= ramblock->offset && 1044 start + length <= ramblock->offset + ramblock->used_length); 1045 1046 while (page < end) { 1047 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 1048 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; 1049 unsigned long num = MIN(end - page, 1050 DIRTY_MEMORY_BLOCK_SIZE - offset); 1051 1052 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], 1053 offset, num); 1054 page += num; 1055 } 1056 1057 mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; 1058 mr_size = (end - start_page) << TARGET_PAGE_BITS; 1059 memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); 1060 } 1061 1062 if (dirty && tcg_enabled()) { 1063 tlb_reset_dirty_range_all(start, length); 1064 } 1065 1066 return dirty; 1067 } 1068 1069 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty 1070 (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) 1071 { 1072 DirtyMemoryBlocks *blocks; 1073 ram_addr_t start = memory_region_get_ram_addr(mr) + offset; 1074 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL); 1075 ram_addr_t first = QEMU_ALIGN_DOWN(start, align); 1076 ram_addr_t last = QEMU_ALIGN_UP(start + length, align); 1077 DirtyBitmapSnapshot *snap; 1078 unsigned long page, end, dest; 1079 1080 snap = g_malloc0(sizeof(*snap) + 1081 ((last - first) >> (TARGET_PAGE_BITS + 3))); 1082 snap->start = first; 1083 snap->end = last; 1084 1085 page = first >> TARGET_PAGE_BITS; 1086 end = last >> TARGET_PAGE_BITS; 1087 dest = 0; 1088 1089 WITH_RCU_READ_LOCK_GUARD() { 1090 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 1091 1092 while (page < end) { 1093 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 1094 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; 1095 unsigned long num = MIN(end - page, 1096 DIRTY_MEMORY_BLOCK_SIZE - offset); 1097 1098 assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL))); 1099 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL))); 1100 offset >>= BITS_PER_LEVEL; 1101 1102 bitmap_copy_and_clear_atomic(snap->dirty + dest, 1103 blocks->blocks[idx] + offset, 1104 num); 1105 page += num; 1106 dest += num >> BITS_PER_LEVEL; 1107 } 1108 } 1109 1110 if (tcg_enabled()) { 1111 tlb_reset_dirty_range_all(start, length); 1112 } 1113 1114 memory_region_clear_dirty_bitmap(mr, offset, length); 1115 1116 return snap; 1117 } 1118 1119 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, 1120 ram_addr_t start, 1121 ram_addr_t length) 1122 { 1123 unsigned long page, end; 1124 1125 assert(start >= snap->start); 1126 assert(start + length <= snap->end); 1127 1128 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; 1129 page = (start - snap->start) >> TARGET_PAGE_BITS; 1130 1131 while (page < end) { 1132 if (test_bit(page, snap->dirty)) { 1133 return true; 1134 } 1135 page++; 1136 } 1137 return false; 1138 } 1139 1140 /* Called from RCU critical section */ 1141 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 1142 MemoryRegionSection *section) 1143 { 1144 AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); 1145 return section - d->map.sections; 1146 } 1147 1148 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 1149 uint16_t section); 1150 static subpage_t *subpage_init(FlatView *fv, hwaddr base); 1151 1152 static uint16_t phys_section_add(PhysPageMap *map, 1153 MemoryRegionSection *section) 1154 { 1155 /* The physical section number is ORed with a page-aligned 1156 * pointer to produce the iotlb entries. Thus it should 1157 * never overflow into the page-aligned value. 1158 */ 1159 assert(map->sections_nb < TARGET_PAGE_SIZE); 1160 1161 if (map->sections_nb == map->sections_nb_alloc) { 1162 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); 1163 map->sections = g_renew(MemoryRegionSection, map->sections, 1164 map->sections_nb_alloc); 1165 } 1166 map->sections[map->sections_nb] = *section; 1167 memory_region_ref(section->mr); 1168 return map->sections_nb++; 1169 } 1170 1171 static void phys_section_destroy(MemoryRegion *mr) 1172 { 1173 bool have_sub_page = mr->subpage; 1174 1175 memory_region_unref(mr); 1176 1177 if (have_sub_page) { 1178 subpage_t *subpage = container_of(mr, subpage_t, iomem); 1179 object_unref(OBJECT(&subpage->iomem)); 1180 g_free(subpage); 1181 } 1182 } 1183 1184 static void phys_sections_free(PhysPageMap *map) 1185 { 1186 while (map->sections_nb > 0) { 1187 MemoryRegionSection *section = &map->sections[--map->sections_nb]; 1188 phys_section_destroy(section->mr); 1189 } 1190 g_free(map->sections); 1191 g_free(map->nodes); 1192 } 1193 1194 static void register_subpage(FlatView *fv, MemoryRegionSection *section) 1195 { 1196 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1197 subpage_t *subpage; 1198 hwaddr base = section->offset_within_address_space 1199 & TARGET_PAGE_MASK; 1200 MemoryRegionSection *existing = phys_page_find(d, base); 1201 MemoryRegionSection subsection = { 1202 .offset_within_address_space = base, 1203 .size = int128_make64(TARGET_PAGE_SIZE), 1204 }; 1205 hwaddr start, end; 1206 1207 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); 1208 1209 if (!(existing->mr->subpage)) { 1210 subpage = subpage_init(fv, base); 1211 subsection.fv = fv; 1212 subsection.mr = &subpage->iomem; 1213 phys_page_set(d, base >> TARGET_PAGE_BITS, 1, 1214 phys_section_add(&d->map, &subsection)); 1215 } else { 1216 subpage = container_of(existing->mr, subpage_t, iomem); 1217 } 1218 start = section->offset_within_address_space & ~TARGET_PAGE_MASK; 1219 end = start + int128_get64(section->size) - 1; 1220 subpage_register(subpage, start, end, 1221 phys_section_add(&d->map, section)); 1222 } 1223 1224 1225 static void register_multipage(FlatView *fv, 1226 MemoryRegionSection *section) 1227 { 1228 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1229 hwaddr start_addr = section->offset_within_address_space; 1230 uint16_t section_index = phys_section_add(&d->map, section); 1231 uint64_t num_pages = int128_get64(int128_rshift(section->size, 1232 TARGET_PAGE_BITS)); 1233 1234 assert(num_pages); 1235 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); 1236 } 1237 1238 /* 1239 * The range in *section* may look like this: 1240 * 1241 * |s|PPPPPPP|s| 1242 * 1243 * where s stands for subpage and P for page. 1244 */ 1245 void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section) 1246 { 1247 MemoryRegionSection remain = *section; 1248 Int128 page_size = int128_make64(TARGET_PAGE_SIZE); 1249 1250 /* register first subpage */ 1251 if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { 1252 uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space) 1253 - remain.offset_within_address_space; 1254 1255 MemoryRegionSection now = remain; 1256 now.size = int128_min(int128_make64(left), now.size); 1257 register_subpage(fv, &now); 1258 if (int128_eq(remain.size, now.size)) { 1259 return; 1260 } 1261 remain.size = int128_sub(remain.size, now.size); 1262 remain.offset_within_address_space += int128_get64(now.size); 1263 remain.offset_within_region += int128_get64(now.size); 1264 } 1265 1266 /* register whole pages */ 1267 if (int128_ge(remain.size, page_size)) { 1268 MemoryRegionSection now = remain; 1269 now.size = int128_and(now.size, int128_neg(page_size)); 1270 register_multipage(fv, &now); 1271 if (int128_eq(remain.size, now.size)) { 1272 return; 1273 } 1274 remain.size = int128_sub(remain.size, now.size); 1275 remain.offset_within_address_space += int128_get64(now.size); 1276 remain.offset_within_region += int128_get64(now.size); 1277 } 1278 1279 /* register last subpage */ 1280 register_subpage(fv, &remain); 1281 } 1282 1283 void qemu_flush_coalesced_mmio_buffer(void) 1284 { 1285 if (kvm_enabled()) 1286 kvm_flush_coalesced_mmio_buffer(); 1287 } 1288 1289 void qemu_mutex_lock_ramlist(void) 1290 { 1291 qemu_mutex_lock(&ram_list.mutex); 1292 } 1293 1294 void qemu_mutex_unlock_ramlist(void) 1295 { 1296 qemu_mutex_unlock(&ram_list.mutex); 1297 } 1298 1299 GString *ram_block_format(void) 1300 { 1301 RAMBlock *block; 1302 char *psize; 1303 GString *buf = g_string_new(""); 1304 1305 RCU_READ_LOCK_GUARD(); 1306 g_string_append_printf(buf, "%24s %8s %18s %18s %18s\n", 1307 "Block Name", "PSize", "Offset", "Used", "Total"); 1308 RAMBLOCK_FOREACH(block) { 1309 psize = size_to_str(block->page_size); 1310 g_string_append_printf(buf, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 1311 " 0x%016" PRIx64 "\n", block->idstr, psize, 1312 (uint64_t)block->offset, 1313 (uint64_t)block->used_length, 1314 (uint64_t)block->max_length); 1315 g_free(psize); 1316 } 1317 1318 return buf; 1319 } 1320 1321 #ifdef __linux__ 1322 /* 1323 * FIXME TOCTTOU: this iterates over memory backends' mem-path, which 1324 * may or may not name the same files / on the same filesystem now as 1325 * when we actually open and map them. Iterate over the file 1326 * descriptors instead, and use qemu_fd_getpagesize(). 1327 */ 1328 static int find_min_backend_pagesize(Object *obj, void *opaque) 1329 { 1330 long *hpsize_min = opaque; 1331 1332 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1333 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1334 long hpsize = host_memory_backend_pagesize(backend); 1335 1336 if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) { 1337 *hpsize_min = hpsize; 1338 } 1339 } 1340 1341 return 0; 1342 } 1343 1344 static int find_max_backend_pagesize(Object *obj, void *opaque) 1345 { 1346 long *hpsize_max = opaque; 1347 1348 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1349 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1350 long hpsize = host_memory_backend_pagesize(backend); 1351 1352 if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) { 1353 *hpsize_max = hpsize; 1354 } 1355 } 1356 1357 return 0; 1358 } 1359 1360 /* 1361 * TODO: We assume right now that all mapped host memory backends are 1362 * used as RAM, however some might be used for different purposes. 1363 */ 1364 long qemu_minrampagesize(void) 1365 { 1366 long hpsize = LONG_MAX; 1367 Object *memdev_root = object_resolve_path("/objects", NULL); 1368 1369 object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); 1370 return hpsize; 1371 } 1372 1373 long qemu_maxrampagesize(void) 1374 { 1375 long pagesize = 0; 1376 Object *memdev_root = object_resolve_path("/objects", NULL); 1377 1378 object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize); 1379 return pagesize; 1380 } 1381 #else 1382 long qemu_minrampagesize(void) 1383 { 1384 return qemu_real_host_page_size; 1385 } 1386 long qemu_maxrampagesize(void) 1387 { 1388 return qemu_real_host_page_size; 1389 } 1390 #endif 1391 1392 #ifdef CONFIG_POSIX 1393 static int64_t get_file_size(int fd) 1394 { 1395 int64_t size; 1396 #if defined(__linux__) 1397 struct stat st; 1398 1399 if (fstat(fd, &st) < 0) { 1400 return -errno; 1401 } 1402 1403 /* Special handling for devdax character devices */ 1404 if (S_ISCHR(st.st_mode)) { 1405 g_autofree char *subsystem_path = NULL; 1406 g_autofree char *subsystem = NULL; 1407 1408 subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem", 1409 major(st.st_rdev), minor(st.st_rdev)); 1410 subsystem = g_file_read_link(subsystem_path, NULL); 1411 1412 if (subsystem && g_str_has_suffix(subsystem, "/dax")) { 1413 g_autofree char *size_path = NULL; 1414 g_autofree char *size_str = NULL; 1415 1416 size_path = g_strdup_printf("/sys/dev/char/%d:%d/size", 1417 major(st.st_rdev), minor(st.st_rdev)); 1418 1419 if (g_file_get_contents(size_path, &size_str, NULL, NULL)) { 1420 return g_ascii_strtoll(size_str, NULL, 0); 1421 } 1422 } 1423 } 1424 #endif /* defined(__linux__) */ 1425 1426 /* st.st_size may be zero for special files yet lseek(2) works */ 1427 size = lseek(fd, 0, SEEK_END); 1428 if (size < 0) { 1429 return -errno; 1430 } 1431 return size; 1432 } 1433 1434 static int64_t get_file_align(int fd) 1435 { 1436 int64_t align = -1; 1437 #if defined(__linux__) && defined(CONFIG_LIBDAXCTL) 1438 struct stat st; 1439 1440 if (fstat(fd, &st) < 0) { 1441 return -errno; 1442 } 1443 1444 /* Special handling for devdax character devices */ 1445 if (S_ISCHR(st.st_mode)) { 1446 g_autofree char *path = NULL; 1447 g_autofree char *rpath = NULL; 1448 struct daxctl_ctx *ctx; 1449 struct daxctl_region *region; 1450 int rc = 0; 1451 1452 path = g_strdup_printf("/sys/dev/char/%d:%d", 1453 major(st.st_rdev), minor(st.st_rdev)); 1454 rpath = realpath(path, NULL); 1455 if (!rpath) { 1456 return -errno; 1457 } 1458 1459 rc = daxctl_new(&ctx); 1460 if (rc) { 1461 return -1; 1462 } 1463 1464 daxctl_region_foreach(ctx, region) { 1465 if (strstr(rpath, daxctl_region_get_path(region))) { 1466 align = daxctl_region_get_align(region); 1467 break; 1468 } 1469 } 1470 daxctl_unref(ctx); 1471 } 1472 #endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */ 1473 1474 return align; 1475 } 1476 1477 static int file_ram_open(const char *path, 1478 const char *region_name, 1479 bool readonly, 1480 bool *created, 1481 Error **errp) 1482 { 1483 char *filename; 1484 char *sanitized_name; 1485 char *c; 1486 int fd = -1; 1487 1488 *created = false; 1489 for (;;) { 1490 fd = open(path, readonly ? O_RDONLY : O_RDWR); 1491 if (fd >= 0) { 1492 /* @path names an existing file, use it */ 1493 break; 1494 } 1495 if (errno == ENOENT) { 1496 /* @path names a file that doesn't exist, create it */ 1497 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); 1498 if (fd >= 0) { 1499 *created = true; 1500 break; 1501 } 1502 } else if (errno == EISDIR) { 1503 /* @path names a directory, create a file there */ 1504 /* Make name safe to use with mkstemp by replacing '/' with '_'. */ 1505 sanitized_name = g_strdup(region_name); 1506 for (c = sanitized_name; *c != '\0'; c++) { 1507 if (*c == '/') { 1508 *c = '_'; 1509 } 1510 } 1511 1512 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, 1513 sanitized_name); 1514 g_free(sanitized_name); 1515 1516 fd = mkstemp(filename); 1517 if (fd >= 0) { 1518 unlink(filename); 1519 g_free(filename); 1520 break; 1521 } 1522 g_free(filename); 1523 } 1524 if (errno != EEXIST && errno != EINTR) { 1525 error_setg_errno(errp, errno, 1526 "can't open backing store %s for guest RAM", 1527 path); 1528 return -1; 1529 } 1530 /* 1531 * Try again on EINTR and EEXIST. The latter happens when 1532 * something else creates the file between our two open(). 1533 */ 1534 } 1535 1536 return fd; 1537 } 1538 1539 static void *file_ram_alloc(RAMBlock *block, 1540 ram_addr_t memory, 1541 int fd, 1542 bool readonly, 1543 bool truncate, 1544 off_t offset, 1545 Error **errp) 1546 { 1547 uint32_t qemu_map_flags; 1548 void *area; 1549 1550 block->page_size = qemu_fd_getpagesize(fd); 1551 if (block->mr->align % block->page_size) { 1552 error_setg(errp, "alignment 0x%" PRIx64 1553 " must be multiples of page size 0x%zx", 1554 block->mr->align, block->page_size); 1555 return NULL; 1556 } else if (block->mr->align && !is_power_of_2(block->mr->align)) { 1557 error_setg(errp, "alignment 0x%" PRIx64 1558 " must be a power of two", block->mr->align); 1559 return NULL; 1560 } 1561 block->mr->align = MAX(block->page_size, block->mr->align); 1562 #if defined(__s390x__) 1563 if (kvm_enabled()) { 1564 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); 1565 } 1566 #endif 1567 1568 if (memory < block->page_size) { 1569 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " 1570 "or larger than page size 0x%zx", 1571 memory, block->page_size); 1572 return NULL; 1573 } 1574 1575 memory = ROUND_UP(memory, block->page_size); 1576 1577 /* 1578 * ftruncate is not supported by hugetlbfs in older 1579 * hosts, so don't bother bailing out on errors. 1580 * If anything goes wrong with it under other filesystems, 1581 * mmap will fail. 1582 * 1583 * Do not truncate the non-empty backend file to avoid corrupting 1584 * the existing data in the file. Disabling shrinking is not 1585 * enough. For example, the current vNVDIMM implementation stores 1586 * the guest NVDIMM labels at the end of the backend file. If the 1587 * backend file is later extended, QEMU will not be able to find 1588 * those labels. Therefore, extending the non-empty backend file 1589 * is disabled as well. 1590 */ 1591 if (truncate && ftruncate(fd, memory)) { 1592 perror("ftruncate"); 1593 } 1594 1595 qemu_map_flags = readonly ? QEMU_MAP_READONLY : 0; 1596 qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0; 1597 qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0; 1598 qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0; 1599 area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset); 1600 if (area == MAP_FAILED) { 1601 error_setg_errno(errp, errno, 1602 "unable to map backing store for guest RAM"); 1603 return NULL; 1604 } 1605 1606 block->fd = fd; 1607 return area; 1608 } 1609 #endif 1610 1611 /* Allocate space within the ram_addr_t space that governs the 1612 * dirty bitmaps. 1613 * Called with the ramlist lock held. 1614 */ 1615 static ram_addr_t find_ram_offset(ram_addr_t size) 1616 { 1617 RAMBlock *block, *next_block; 1618 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; 1619 1620 assert(size != 0); /* it would hand out same offset multiple times */ 1621 1622 if (QLIST_EMPTY_RCU(&ram_list.blocks)) { 1623 return 0; 1624 } 1625 1626 RAMBLOCK_FOREACH(block) { 1627 ram_addr_t candidate, next = RAM_ADDR_MAX; 1628 1629 /* Align blocks to start on a 'long' in the bitmap 1630 * which makes the bitmap sync'ing take the fast path. 1631 */ 1632 candidate = block->offset + block->max_length; 1633 candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS); 1634 1635 /* Search for the closest following block 1636 * and find the gap. 1637 */ 1638 RAMBLOCK_FOREACH(next_block) { 1639 if (next_block->offset >= candidate) { 1640 next = MIN(next, next_block->offset); 1641 } 1642 } 1643 1644 /* If it fits remember our place and remember the size 1645 * of gap, but keep going so that we might find a smaller 1646 * gap to fill so avoiding fragmentation. 1647 */ 1648 if (next - candidate >= size && next - candidate < mingap) { 1649 offset = candidate; 1650 mingap = next - candidate; 1651 } 1652 1653 trace_find_ram_offset_loop(size, candidate, offset, next, mingap); 1654 } 1655 1656 if (offset == RAM_ADDR_MAX) { 1657 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", 1658 (uint64_t)size); 1659 abort(); 1660 } 1661 1662 trace_find_ram_offset(size, offset); 1663 1664 return offset; 1665 } 1666 1667 static unsigned long last_ram_page(void) 1668 { 1669 RAMBlock *block; 1670 ram_addr_t last = 0; 1671 1672 RCU_READ_LOCK_GUARD(); 1673 RAMBLOCK_FOREACH(block) { 1674 last = MAX(last, block->offset + block->max_length); 1675 } 1676 return last >> TARGET_PAGE_BITS; 1677 } 1678 1679 static void qemu_ram_setup_dump(void *addr, ram_addr_t size) 1680 { 1681 int ret; 1682 1683 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ 1684 if (!machine_dump_guest_core(current_machine)) { 1685 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); 1686 if (ret) { 1687 perror("qemu_madvise"); 1688 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " 1689 "but dump_guest_core=off specified\n"); 1690 } 1691 } 1692 } 1693 1694 const char *qemu_ram_get_idstr(RAMBlock *rb) 1695 { 1696 return rb->idstr; 1697 } 1698 1699 void *qemu_ram_get_host_addr(RAMBlock *rb) 1700 { 1701 return rb->host; 1702 } 1703 1704 ram_addr_t qemu_ram_get_offset(RAMBlock *rb) 1705 { 1706 return rb->offset; 1707 } 1708 1709 ram_addr_t qemu_ram_get_used_length(RAMBlock *rb) 1710 { 1711 return rb->used_length; 1712 } 1713 1714 ram_addr_t qemu_ram_get_max_length(RAMBlock *rb) 1715 { 1716 return rb->max_length; 1717 } 1718 1719 bool qemu_ram_is_shared(RAMBlock *rb) 1720 { 1721 return rb->flags & RAM_SHARED; 1722 } 1723 1724 bool qemu_ram_is_noreserve(RAMBlock *rb) 1725 { 1726 return rb->flags & RAM_NORESERVE; 1727 } 1728 1729 /* Note: Only set at the start of postcopy */ 1730 bool qemu_ram_is_uf_zeroable(RAMBlock *rb) 1731 { 1732 return rb->flags & RAM_UF_ZEROPAGE; 1733 } 1734 1735 void qemu_ram_set_uf_zeroable(RAMBlock *rb) 1736 { 1737 rb->flags |= RAM_UF_ZEROPAGE; 1738 } 1739 1740 bool qemu_ram_is_migratable(RAMBlock *rb) 1741 { 1742 return rb->flags & RAM_MIGRATABLE; 1743 } 1744 1745 void qemu_ram_set_migratable(RAMBlock *rb) 1746 { 1747 rb->flags |= RAM_MIGRATABLE; 1748 } 1749 1750 void qemu_ram_unset_migratable(RAMBlock *rb) 1751 { 1752 rb->flags &= ~RAM_MIGRATABLE; 1753 } 1754 1755 /* Called with iothread lock held. */ 1756 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) 1757 { 1758 RAMBlock *block; 1759 1760 assert(new_block); 1761 assert(!new_block->idstr[0]); 1762 1763 if (dev) { 1764 char *id = qdev_get_dev_path(dev); 1765 if (id) { 1766 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 1767 g_free(id); 1768 } 1769 } 1770 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 1771 1772 RCU_READ_LOCK_GUARD(); 1773 RAMBLOCK_FOREACH(block) { 1774 if (block != new_block && 1775 !strcmp(block->idstr, new_block->idstr)) { 1776 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 1777 new_block->idstr); 1778 abort(); 1779 } 1780 } 1781 } 1782 1783 /* Called with iothread lock held. */ 1784 void qemu_ram_unset_idstr(RAMBlock *block) 1785 { 1786 /* FIXME: arch_init.c assumes that this is not called throughout 1787 * migration. Ignore the problem since hot-unplug during migration 1788 * does not work anyway. 1789 */ 1790 if (block) { 1791 memset(block->idstr, 0, sizeof(block->idstr)); 1792 } 1793 } 1794 1795 size_t qemu_ram_pagesize(RAMBlock *rb) 1796 { 1797 return rb->page_size; 1798 } 1799 1800 /* Returns the largest size of page in use */ 1801 size_t qemu_ram_pagesize_largest(void) 1802 { 1803 RAMBlock *block; 1804 size_t largest = 0; 1805 1806 RAMBLOCK_FOREACH(block) { 1807 largest = MAX(largest, qemu_ram_pagesize(block)); 1808 } 1809 1810 return largest; 1811 } 1812 1813 static int memory_try_enable_merging(void *addr, size_t len) 1814 { 1815 if (!machine_mem_merge(current_machine)) { 1816 /* disabled by the user */ 1817 return 0; 1818 } 1819 1820 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); 1821 } 1822 1823 /* 1824 * Resizing RAM while migrating can result in the migration being canceled. 1825 * Care has to be taken if the guest might have already detected the memory. 1826 * 1827 * As memory core doesn't know how is memory accessed, it is up to 1828 * resize callback to update device state and/or add assertions to detect 1829 * misuse, if necessary. 1830 */ 1831 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) 1832 { 1833 const ram_addr_t oldsize = block->used_length; 1834 const ram_addr_t unaligned_size = newsize; 1835 1836 assert(block); 1837 1838 newsize = HOST_PAGE_ALIGN(newsize); 1839 1840 if (block->used_length == newsize) { 1841 /* 1842 * We don't have to resize the ram block (which only knows aligned 1843 * sizes), however, we have to notify if the unaligned size changed. 1844 */ 1845 if (unaligned_size != memory_region_size(block->mr)) { 1846 memory_region_set_size(block->mr, unaligned_size); 1847 if (block->resized) { 1848 block->resized(block->idstr, unaligned_size, block->host); 1849 } 1850 } 1851 return 0; 1852 } 1853 1854 if (!(block->flags & RAM_RESIZEABLE)) { 1855 error_setg_errno(errp, EINVAL, 1856 "Size mismatch: %s: 0x" RAM_ADDR_FMT 1857 " != 0x" RAM_ADDR_FMT, block->idstr, 1858 newsize, block->used_length); 1859 return -EINVAL; 1860 } 1861 1862 if (block->max_length < newsize) { 1863 error_setg_errno(errp, EINVAL, 1864 "Size too large: %s: 0x" RAM_ADDR_FMT 1865 " > 0x" RAM_ADDR_FMT, block->idstr, 1866 newsize, block->max_length); 1867 return -EINVAL; 1868 } 1869 1870 /* Notify before modifying the ram block and touching the bitmaps. */ 1871 if (block->host) { 1872 ram_block_notify_resize(block->host, oldsize, newsize); 1873 } 1874 1875 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); 1876 block->used_length = newsize; 1877 cpu_physical_memory_set_dirty_range(block->offset, block->used_length, 1878 DIRTY_CLIENTS_ALL); 1879 memory_region_set_size(block->mr, unaligned_size); 1880 if (block->resized) { 1881 block->resized(block->idstr, unaligned_size, block->host); 1882 } 1883 return 0; 1884 } 1885 1886 /* 1887 * Trigger sync on the given ram block for range [start, start + length] 1888 * with the backing store if one is available. 1889 * Otherwise no-op. 1890 * @Note: this is supposed to be a synchronous op. 1891 */ 1892 void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) 1893 { 1894 /* The requested range should fit in within the block range */ 1895 g_assert((start + length) <= block->used_length); 1896 1897 #ifdef CONFIG_LIBPMEM 1898 /* The lack of support for pmem should not block the sync */ 1899 if (ramblock_is_pmem(block)) { 1900 void *addr = ramblock_ptr(block, start); 1901 pmem_persist(addr, length); 1902 return; 1903 } 1904 #endif 1905 if (block->fd >= 0) { 1906 /** 1907 * Case there is no support for PMEM or the memory has not been 1908 * specified as persistent (or is not one) - use the msync. 1909 * Less optimal but still achieves the same goal 1910 */ 1911 void *addr = ramblock_ptr(block, start); 1912 if (qemu_msync(addr, length, block->fd)) { 1913 warn_report("%s: failed to sync memory range: start: " 1914 RAM_ADDR_FMT " length: " RAM_ADDR_FMT, 1915 __func__, start, length); 1916 } 1917 } 1918 } 1919 1920 /* Called with ram_list.mutex held */ 1921 static void dirty_memory_extend(ram_addr_t old_ram_size, 1922 ram_addr_t new_ram_size) 1923 { 1924 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size, 1925 DIRTY_MEMORY_BLOCK_SIZE); 1926 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size, 1927 DIRTY_MEMORY_BLOCK_SIZE); 1928 int i; 1929 1930 /* Only need to extend if block count increased */ 1931 if (new_num_blocks <= old_num_blocks) { 1932 return; 1933 } 1934 1935 for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 1936 DirtyMemoryBlocks *old_blocks; 1937 DirtyMemoryBlocks *new_blocks; 1938 int j; 1939 1940 old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]); 1941 new_blocks = g_malloc(sizeof(*new_blocks) + 1942 sizeof(new_blocks->blocks[0]) * new_num_blocks); 1943 1944 if (old_num_blocks) { 1945 memcpy(new_blocks->blocks, old_blocks->blocks, 1946 old_num_blocks * sizeof(old_blocks->blocks[0])); 1947 } 1948 1949 for (j = old_num_blocks; j < new_num_blocks; j++) { 1950 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); 1951 } 1952 1953 qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); 1954 1955 if (old_blocks) { 1956 g_free_rcu(old_blocks, rcu); 1957 } 1958 } 1959 } 1960 1961 static void ram_block_add(RAMBlock *new_block, Error **errp) 1962 { 1963 const bool noreserve = qemu_ram_is_noreserve(new_block); 1964 const bool shared = qemu_ram_is_shared(new_block); 1965 RAMBlock *block; 1966 RAMBlock *last_block = NULL; 1967 ram_addr_t old_ram_size, new_ram_size; 1968 Error *err = NULL; 1969 1970 old_ram_size = last_ram_page(); 1971 1972 qemu_mutex_lock_ramlist(); 1973 new_block->offset = find_ram_offset(new_block->max_length); 1974 1975 if (!new_block->host) { 1976 if (xen_enabled()) { 1977 xen_ram_alloc(new_block->offset, new_block->max_length, 1978 new_block->mr, &err); 1979 if (err) { 1980 error_propagate(errp, err); 1981 qemu_mutex_unlock_ramlist(); 1982 return; 1983 } 1984 } else { 1985 new_block->host = qemu_anon_ram_alloc(new_block->max_length, 1986 &new_block->mr->align, 1987 shared, noreserve); 1988 if (!new_block->host) { 1989 error_setg_errno(errp, errno, 1990 "cannot set up guest memory '%s'", 1991 memory_region_name(new_block->mr)); 1992 qemu_mutex_unlock_ramlist(); 1993 return; 1994 } 1995 memory_try_enable_merging(new_block->host, new_block->max_length); 1996 } 1997 } 1998 1999 new_ram_size = MAX(old_ram_size, 2000 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS); 2001 if (new_ram_size > old_ram_size) { 2002 dirty_memory_extend(old_ram_size, new_ram_size); 2003 } 2004 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, 2005 * QLIST (which has an RCU-friendly variant) does not have insertion at 2006 * tail, so save the last element in last_block. 2007 */ 2008 RAMBLOCK_FOREACH(block) { 2009 last_block = block; 2010 if (block->max_length < new_block->max_length) { 2011 break; 2012 } 2013 } 2014 if (block) { 2015 QLIST_INSERT_BEFORE_RCU(block, new_block, next); 2016 } else if (last_block) { 2017 QLIST_INSERT_AFTER_RCU(last_block, new_block, next); 2018 } else { /* list is empty */ 2019 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); 2020 } 2021 ram_list.mru_block = NULL; 2022 2023 /* Write list before version */ 2024 smp_wmb(); 2025 ram_list.version++; 2026 qemu_mutex_unlock_ramlist(); 2027 2028 cpu_physical_memory_set_dirty_range(new_block->offset, 2029 new_block->used_length, 2030 DIRTY_CLIENTS_ALL); 2031 2032 if (new_block->host) { 2033 qemu_ram_setup_dump(new_block->host, new_block->max_length); 2034 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); 2035 /* 2036 * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU 2037 * Configure it unless the machine is a qtest server, in which case 2038 * KVM is not used and it may be forked (eg for fuzzing purposes). 2039 */ 2040 if (!qtest_enabled()) { 2041 qemu_madvise(new_block->host, new_block->max_length, 2042 QEMU_MADV_DONTFORK); 2043 } 2044 ram_block_notify_add(new_block->host, new_block->used_length, 2045 new_block->max_length); 2046 } 2047 } 2048 2049 #ifdef CONFIG_POSIX 2050 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, 2051 uint32_t ram_flags, int fd, off_t offset, 2052 bool readonly, Error **errp) 2053 { 2054 RAMBlock *new_block; 2055 Error *local_err = NULL; 2056 int64_t file_size, file_align; 2057 2058 /* Just support these ram flags by now. */ 2059 assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE | 2060 RAM_PROTECTED)) == 0); 2061 2062 if (xen_enabled()) { 2063 error_setg(errp, "-mem-path not supported with Xen"); 2064 return NULL; 2065 } 2066 2067 if (kvm_enabled() && !kvm_has_sync_mmu()) { 2068 error_setg(errp, 2069 "host lacks kvm mmu notifiers, -mem-path unsupported"); 2070 return NULL; 2071 } 2072 2073 size = HOST_PAGE_ALIGN(size); 2074 file_size = get_file_size(fd); 2075 if (file_size > 0 && file_size < size) { 2076 error_setg(errp, "backing store size 0x%" PRIx64 2077 " does not match 'size' option 0x" RAM_ADDR_FMT, 2078 file_size, size); 2079 return NULL; 2080 } 2081 2082 file_align = get_file_align(fd); 2083 if (file_align > 0 && file_align > mr->align) { 2084 error_setg(errp, "backing store align 0x%" PRIx64 2085 " is larger than 'align' option 0x%" PRIx64, 2086 file_align, mr->align); 2087 return NULL; 2088 } 2089 2090 new_block = g_malloc0(sizeof(*new_block)); 2091 new_block->mr = mr; 2092 new_block->used_length = size; 2093 new_block->max_length = size; 2094 new_block->flags = ram_flags; 2095 new_block->host = file_ram_alloc(new_block, size, fd, readonly, 2096 !file_size, offset, errp); 2097 if (!new_block->host) { 2098 g_free(new_block); 2099 return NULL; 2100 } 2101 2102 ram_block_add(new_block, &local_err); 2103 if (local_err) { 2104 g_free(new_block); 2105 error_propagate(errp, local_err); 2106 return NULL; 2107 } 2108 return new_block; 2109 2110 } 2111 2112 2113 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 2114 uint32_t ram_flags, const char *mem_path, 2115 bool readonly, Error **errp) 2116 { 2117 int fd; 2118 bool created; 2119 RAMBlock *block; 2120 2121 fd = file_ram_open(mem_path, memory_region_name(mr), readonly, &created, 2122 errp); 2123 if (fd < 0) { 2124 return NULL; 2125 } 2126 2127 block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, 0, readonly, errp); 2128 if (!block) { 2129 if (created) { 2130 unlink(mem_path); 2131 } 2132 close(fd); 2133 return NULL; 2134 } 2135 2136 return block; 2137 } 2138 #endif 2139 2140 static 2141 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, 2142 void (*resized)(const char*, 2143 uint64_t length, 2144 void *host), 2145 void *host, uint32_t ram_flags, 2146 MemoryRegion *mr, Error **errp) 2147 { 2148 RAMBlock *new_block; 2149 Error *local_err = NULL; 2150 2151 assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC | 2152 RAM_NORESERVE)) == 0); 2153 assert(!host ^ (ram_flags & RAM_PREALLOC)); 2154 2155 size = HOST_PAGE_ALIGN(size); 2156 max_size = HOST_PAGE_ALIGN(max_size); 2157 new_block = g_malloc0(sizeof(*new_block)); 2158 new_block->mr = mr; 2159 new_block->resized = resized; 2160 new_block->used_length = size; 2161 new_block->max_length = max_size; 2162 assert(max_size >= size); 2163 new_block->fd = -1; 2164 new_block->page_size = qemu_real_host_page_size; 2165 new_block->host = host; 2166 new_block->flags = ram_flags; 2167 ram_block_add(new_block, &local_err); 2168 if (local_err) { 2169 g_free(new_block); 2170 error_propagate(errp, local_err); 2171 return NULL; 2172 } 2173 return new_block; 2174 } 2175 2176 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 2177 MemoryRegion *mr, Error **errp) 2178 { 2179 return qemu_ram_alloc_internal(size, size, NULL, host, RAM_PREALLOC, mr, 2180 errp); 2181 } 2182 2183 RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, 2184 MemoryRegion *mr, Error **errp) 2185 { 2186 assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE)) == 0); 2187 return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp); 2188 } 2189 2190 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, 2191 void (*resized)(const char*, 2192 uint64_t length, 2193 void *host), 2194 MemoryRegion *mr, Error **errp) 2195 { 2196 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, 2197 RAM_RESIZEABLE, mr, errp); 2198 } 2199 2200 static void reclaim_ramblock(RAMBlock *block) 2201 { 2202 if (block->flags & RAM_PREALLOC) { 2203 ; 2204 } else if (xen_enabled()) { 2205 xen_invalidate_map_cache_entry(block->host); 2206 #ifndef _WIN32 2207 } else if (block->fd >= 0) { 2208 qemu_ram_munmap(block->fd, block->host, block->max_length); 2209 close(block->fd); 2210 #endif 2211 } else { 2212 qemu_anon_ram_free(block->host, block->max_length); 2213 } 2214 g_free(block); 2215 } 2216 2217 void qemu_ram_free(RAMBlock *block) 2218 { 2219 if (!block) { 2220 return; 2221 } 2222 2223 if (block->host) { 2224 ram_block_notify_remove(block->host, block->used_length, 2225 block->max_length); 2226 } 2227 2228 qemu_mutex_lock_ramlist(); 2229 QLIST_REMOVE_RCU(block, next); 2230 ram_list.mru_block = NULL; 2231 /* Write list before version */ 2232 smp_wmb(); 2233 ram_list.version++; 2234 call_rcu(block, reclaim_ramblock, rcu); 2235 qemu_mutex_unlock_ramlist(); 2236 } 2237 2238 #ifndef _WIN32 2239 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) 2240 { 2241 RAMBlock *block; 2242 ram_addr_t offset; 2243 int flags; 2244 void *area, *vaddr; 2245 2246 RAMBLOCK_FOREACH(block) { 2247 offset = addr - block->offset; 2248 if (offset < block->max_length) { 2249 vaddr = ramblock_ptr(block, offset); 2250 if (block->flags & RAM_PREALLOC) { 2251 ; 2252 } else if (xen_enabled()) { 2253 abort(); 2254 } else { 2255 flags = MAP_FIXED; 2256 flags |= block->flags & RAM_SHARED ? 2257 MAP_SHARED : MAP_PRIVATE; 2258 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; 2259 if (block->fd >= 0) { 2260 area = mmap(vaddr, length, PROT_READ | PROT_WRITE, 2261 flags, block->fd, offset); 2262 } else { 2263 flags |= MAP_ANONYMOUS; 2264 area = mmap(vaddr, length, PROT_READ | PROT_WRITE, 2265 flags, -1, 0); 2266 } 2267 if (area != vaddr) { 2268 error_report("Could not remap addr: " 2269 RAM_ADDR_FMT "@" RAM_ADDR_FMT "", 2270 length, addr); 2271 exit(1); 2272 } 2273 memory_try_enable_merging(vaddr, length); 2274 qemu_ram_setup_dump(vaddr, length); 2275 } 2276 } 2277 } 2278 } 2279 #endif /* !_WIN32 */ 2280 2281 /* Return a host pointer to ram allocated with qemu_ram_alloc. 2282 * This should not be used for general purpose DMA. Use address_space_map 2283 * or address_space_rw instead. For local memory (e.g. video ram) that the 2284 * device owns, use memory_region_get_ram_ptr. 2285 * 2286 * Called within RCU critical section. 2287 */ 2288 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) 2289 { 2290 RAMBlock *block = ram_block; 2291 2292 if (block == NULL) { 2293 block = qemu_get_ram_block(addr); 2294 addr -= block->offset; 2295 } 2296 2297 if (xen_enabled() && block->host == NULL) { 2298 /* We need to check if the requested address is in the RAM 2299 * because we don't want to map the entire memory in QEMU. 2300 * In that case just map until the end of the page. 2301 */ 2302 if (block->offset == 0) { 2303 return xen_map_cache(addr, 0, 0, false); 2304 } 2305 2306 block->host = xen_map_cache(block->offset, block->max_length, 1, false); 2307 } 2308 return ramblock_ptr(block, addr); 2309 } 2310 2311 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr 2312 * but takes a size argument. 2313 * 2314 * Called within RCU critical section. 2315 */ 2316 static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr, 2317 hwaddr *size, bool lock) 2318 { 2319 RAMBlock *block = ram_block; 2320 if (*size == 0) { 2321 return NULL; 2322 } 2323 2324 if (block == NULL) { 2325 block = qemu_get_ram_block(addr); 2326 addr -= block->offset; 2327 } 2328 *size = MIN(*size, block->max_length - addr); 2329 2330 if (xen_enabled() && block->host == NULL) { 2331 /* We need to check if the requested address is in the RAM 2332 * because we don't want to map the entire memory in QEMU. 2333 * In that case just map the requested area. 2334 */ 2335 if (block->offset == 0) { 2336 return xen_map_cache(addr, *size, lock, lock); 2337 } 2338 2339 block->host = xen_map_cache(block->offset, block->max_length, 1, lock); 2340 } 2341 2342 return ramblock_ptr(block, addr); 2343 } 2344 2345 /* Return the offset of a hostpointer within a ramblock */ 2346 ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) 2347 { 2348 ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; 2349 assert((uintptr_t)host >= (uintptr_t)rb->host); 2350 assert(res < rb->max_length); 2351 2352 return res; 2353 } 2354 2355 /* 2356 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset 2357 * in that RAMBlock. 2358 * 2359 * ptr: Host pointer to look up 2360 * round_offset: If true round the result offset down to a page boundary 2361 * *ram_addr: set to result ram_addr 2362 * *offset: set to result offset within the RAMBlock 2363 * 2364 * Returns: RAMBlock (or NULL if not found) 2365 * 2366 * By the time this function returns, the returned pointer is not protected 2367 * by RCU anymore. If the caller is not within an RCU critical section and 2368 * does not hold the iothread lock, it must have other means of protecting the 2369 * pointer, such as a reference to the region that includes the incoming 2370 * ram_addr_t. 2371 */ 2372 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, 2373 ram_addr_t *offset) 2374 { 2375 RAMBlock *block; 2376 uint8_t *host = ptr; 2377 2378 if (xen_enabled()) { 2379 ram_addr_t ram_addr; 2380 RCU_READ_LOCK_GUARD(); 2381 ram_addr = xen_ram_addr_from_mapcache(ptr); 2382 block = qemu_get_ram_block(ram_addr); 2383 if (block) { 2384 *offset = ram_addr - block->offset; 2385 } 2386 return block; 2387 } 2388 2389 RCU_READ_LOCK_GUARD(); 2390 block = qatomic_rcu_read(&ram_list.mru_block); 2391 if (block && block->host && host - block->host < block->max_length) { 2392 goto found; 2393 } 2394 2395 RAMBLOCK_FOREACH(block) { 2396 /* This case append when the block is not mapped. */ 2397 if (block->host == NULL) { 2398 continue; 2399 } 2400 if (host - block->host < block->max_length) { 2401 goto found; 2402 } 2403 } 2404 2405 return NULL; 2406 2407 found: 2408 *offset = (host - block->host); 2409 if (round_offset) { 2410 *offset &= TARGET_PAGE_MASK; 2411 } 2412 return block; 2413 } 2414 2415 /* 2416 * Finds the named RAMBlock 2417 * 2418 * name: The name of RAMBlock to find 2419 * 2420 * Returns: RAMBlock (or NULL if not found) 2421 */ 2422 RAMBlock *qemu_ram_block_by_name(const char *name) 2423 { 2424 RAMBlock *block; 2425 2426 RAMBLOCK_FOREACH(block) { 2427 if (!strcmp(name, block->idstr)) { 2428 return block; 2429 } 2430 } 2431 2432 return NULL; 2433 } 2434 2435 /* Some of the softmmu routines need to translate from a host pointer 2436 (typically a TLB entry) back to a ram offset. */ 2437 ram_addr_t qemu_ram_addr_from_host(void *ptr) 2438 { 2439 RAMBlock *block; 2440 ram_addr_t offset; 2441 2442 block = qemu_ram_block_from_host(ptr, false, &offset); 2443 if (!block) { 2444 return RAM_ADDR_INVALID; 2445 } 2446 2447 return block->offset + offset; 2448 } 2449 2450 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 2451 MemTxAttrs attrs, void *buf, hwaddr len); 2452 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 2453 const void *buf, hwaddr len); 2454 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 2455 bool is_write, MemTxAttrs attrs); 2456 2457 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, 2458 unsigned len, MemTxAttrs attrs) 2459 { 2460 subpage_t *subpage = opaque; 2461 uint8_t buf[8]; 2462 MemTxResult res; 2463 2464 #if defined(DEBUG_SUBPAGE) 2465 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, 2466 subpage, len, addr); 2467 #endif 2468 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len); 2469 if (res) { 2470 return res; 2471 } 2472 *data = ldn_p(buf, len); 2473 return MEMTX_OK; 2474 } 2475 2476 static MemTxResult subpage_write(void *opaque, hwaddr addr, 2477 uint64_t value, unsigned len, MemTxAttrs attrs) 2478 { 2479 subpage_t *subpage = opaque; 2480 uint8_t buf[8]; 2481 2482 #if defined(DEBUG_SUBPAGE) 2483 printf("%s: subpage %p len %u addr " TARGET_FMT_plx 2484 " value %"PRIx64"\n", 2485 __func__, subpage, len, addr, value); 2486 #endif 2487 stn_p(buf, len, value); 2488 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len); 2489 } 2490 2491 static bool subpage_accepts(void *opaque, hwaddr addr, 2492 unsigned len, bool is_write, 2493 MemTxAttrs attrs) 2494 { 2495 subpage_t *subpage = opaque; 2496 #if defined(DEBUG_SUBPAGE) 2497 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n", 2498 __func__, subpage, is_write ? 'w' : 'r', len, addr); 2499 #endif 2500 2501 return flatview_access_valid(subpage->fv, addr + subpage->base, 2502 len, is_write, attrs); 2503 } 2504 2505 static const MemoryRegionOps subpage_ops = { 2506 .read_with_attrs = subpage_read, 2507 .write_with_attrs = subpage_write, 2508 .impl.min_access_size = 1, 2509 .impl.max_access_size = 8, 2510 .valid.min_access_size = 1, 2511 .valid.max_access_size = 8, 2512 .valid.accepts = subpage_accepts, 2513 .endianness = DEVICE_NATIVE_ENDIAN, 2514 }; 2515 2516 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 2517 uint16_t section) 2518 { 2519 int idx, eidx; 2520 2521 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) 2522 return -1; 2523 idx = SUBPAGE_IDX(start); 2524 eidx = SUBPAGE_IDX(end); 2525 #if defined(DEBUG_SUBPAGE) 2526 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", 2527 __func__, mmio, start, end, idx, eidx, section); 2528 #endif 2529 for (; idx <= eidx; idx++) { 2530 mmio->sub_section[idx] = section; 2531 } 2532 2533 return 0; 2534 } 2535 2536 static subpage_t *subpage_init(FlatView *fv, hwaddr base) 2537 { 2538 subpage_t *mmio; 2539 2540 /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ 2541 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); 2542 mmio->fv = fv; 2543 mmio->base = base; 2544 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, 2545 NULL, TARGET_PAGE_SIZE); 2546 mmio->iomem.subpage = true; 2547 #if defined(DEBUG_SUBPAGE) 2548 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__, 2549 mmio, base, TARGET_PAGE_SIZE); 2550 #endif 2551 2552 return mmio; 2553 } 2554 2555 static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr) 2556 { 2557 assert(fv); 2558 MemoryRegionSection section = { 2559 .fv = fv, 2560 .mr = mr, 2561 .offset_within_address_space = 0, 2562 .offset_within_region = 0, 2563 .size = int128_2_64(), 2564 }; 2565 2566 return phys_section_add(map, §ion); 2567 } 2568 2569 MemoryRegionSection *iotlb_to_section(CPUState *cpu, 2570 hwaddr index, MemTxAttrs attrs) 2571 { 2572 int asidx = cpu_asidx_from_attrs(cpu, attrs); 2573 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; 2574 AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch); 2575 MemoryRegionSection *sections = d->map.sections; 2576 2577 return §ions[index & ~TARGET_PAGE_MASK]; 2578 } 2579 2580 static void io_mem_init(void) 2581 { 2582 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, 2583 NULL, UINT64_MAX); 2584 } 2585 2586 AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) 2587 { 2588 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); 2589 uint16_t n; 2590 2591 n = dummy_section(&d->map, fv, &io_mem_unassigned); 2592 assert(n == PHYS_SECTION_UNASSIGNED); 2593 2594 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; 2595 2596 return d; 2597 } 2598 2599 void address_space_dispatch_free(AddressSpaceDispatch *d) 2600 { 2601 phys_sections_free(&d->map); 2602 g_free(d); 2603 } 2604 2605 static void do_nothing(CPUState *cpu, run_on_cpu_data d) 2606 { 2607 } 2608 2609 static void tcg_log_global_after_sync(MemoryListener *listener) 2610 { 2611 CPUAddressSpace *cpuas; 2612 2613 /* Wait for the CPU to end the current TB. This avoids the following 2614 * incorrect race: 2615 * 2616 * vCPU migration 2617 * ---------------------- ------------------------- 2618 * TLB check -> slow path 2619 * notdirty_mem_write 2620 * write to RAM 2621 * mark dirty 2622 * clear dirty flag 2623 * TLB check -> fast path 2624 * read memory 2625 * write to RAM 2626 * 2627 * by pushing the migration thread's memory read after the vCPU thread has 2628 * written the memory. 2629 */ 2630 if (replay_mode == REPLAY_MODE_NONE) { 2631 /* 2632 * VGA can make calls to this function while updating the screen. 2633 * In record/replay mode this causes a deadlock, because 2634 * run_on_cpu waits for rr mutex. Therefore no races are possible 2635 * in this case and no need for making run_on_cpu when 2636 * record/replay is enabled. 2637 */ 2638 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2639 run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); 2640 } 2641 } 2642 2643 static void tcg_commit(MemoryListener *listener) 2644 { 2645 CPUAddressSpace *cpuas; 2646 AddressSpaceDispatch *d; 2647 2648 assert(tcg_enabled()); 2649 /* since each CPU stores ram addresses in its TLB cache, we must 2650 reset the modified entries */ 2651 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2652 cpu_reloading_memory_map(); 2653 /* The CPU and TLB are protected by the iothread lock. 2654 * We reload the dispatch pointer now because cpu_reloading_memory_map() 2655 * may have split the RCU critical section. 2656 */ 2657 d = address_space_to_dispatch(cpuas->as); 2658 qatomic_rcu_set(&cpuas->memory_dispatch, d); 2659 tlb_flush(cpuas->cpu); 2660 } 2661 2662 static void memory_map_init(void) 2663 { 2664 system_memory = g_malloc(sizeof(*system_memory)); 2665 2666 memory_region_init(system_memory, NULL, "system", UINT64_MAX); 2667 address_space_init(&address_space_memory, system_memory, "memory"); 2668 2669 system_io = g_malloc(sizeof(*system_io)); 2670 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", 2671 65536); 2672 address_space_init(&address_space_io, system_io, "I/O"); 2673 } 2674 2675 MemoryRegion *get_system_memory(void) 2676 { 2677 return system_memory; 2678 } 2679 2680 MemoryRegion *get_system_io(void) 2681 { 2682 return system_io; 2683 } 2684 2685 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, 2686 hwaddr length) 2687 { 2688 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); 2689 addr += memory_region_get_ram_addr(mr); 2690 2691 /* No early return if dirty_log_mask is or becomes 0, because 2692 * cpu_physical_memory_set_dirty_range will still call 2693 * xen_modified_memory. 2694 */ 2695 if (dirty_log_mask) { 2696 dirty_log_mask = 2697 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); 2698 } 2699 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { 2700 assert(tcg_enabled()); 2701 tb_invalidate_phys_range(addr, addr + length); 2702 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); 2703 } 2704 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); 2705 } 2706 2707 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size) 2708 { 2709 /* 2710 * In principle this function would work on other memory region types too, 2711 * but the ROM device use case is the only one where this operation is 2712 * necessary. Other memory regions should use the 2713 * address_space_read/write() APIs. 2714 */ 2715 assert(memory_region_is_romd(mr)); 2716 2717 invalidate_and_set_dirty(mr, addr, size); 2718 } 2719 2720 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) 2721 { 2722 unsigned access_size_max = mr->ops->valid.max_access_size; 2723 2724 /* Regions are assumed to support 1-4 byte accesses unless 2725 otherwise specified. */ 2726 if (access_size_max == 0) { 2727 access_size_max = 4; 2728 } 2729 2730 /* Bound the maximum access by the alignment of the address. */ 2731 if (!mr->ops->impl.unaligned) { 2732 unsigned align_size_max = addr & -addr; 2733 if (align_size_max != 0 && align_size_max < access_size_max) { 2734 access_size_max = align_size_max; 2735 } 2736 } 2737 2738 /* Don't attempt accesses larger than the maximum. */ 2739 if (l > access_size_max) { 2740 l = access_size_max; 2741 } 2742 l = pow2floor(l); 2743 2744 return l; 2745 } 2746 2747 static bool prepare_mmio_access(MemoryRegion *mr) 2748 { 2749 bool release_lock = false; 2750 2751 if (!qemu_mutex_iothread_locked()) { 2752 qemu_mutex_lock_iothread(); 2753 release_lock = true; 2754 } 2755 if (mr->flush_coalesced_mmio) { 2756 qemu_flush_coalesced_mmio_buffer(); 2757 } 2758 2759 return release_lock; 2760 } 2761 2762 /* Called within RCU critical section. */ 2763 static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, 2764 MemTxAttrs attrs, 2765 const void *ptr, 2766 hwaddr len, hwaddr addr1, 2767 hwaddr l, MemoryRegion *mr) 2768 { 2769 uint8_t *ram_ptr; 2770 uint64_t val; 2771 MemTxResult result = MEMTX_OK; 2772 bool release_lock = false; 2773 const uint8_t *buf = ptr; 2774 2775 for (;;) { 2776 if (!memory_access_is_direct(mr, true)) { 2777 release_lock |= prepare_mmio_access(mr); 2778 l = memory_access_size(mr, l, addr1); 2779 /* XXX: could force current_cpu to NULL to avoid 2780 potential bugs */ 2781 val = ldn_he_p(buf, l); 2782 result |= memory_region_dispatch_write(mr, addr1, val, 2783 size_memop(l), attrs); 2784 } else { 2785 /* RAM case */ 2786 ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); 2787 memcpy(ram_ptr, buf, l); 2788 invalidate_and_set_dirty(mr, addr1, l); 2789 } 2790 2791 if (release_lock) { 2792 qemu_mutex_unlock_iothread(); 2793 release_lock = false; 2794 } 2795 2796 len -= l; 2797 buf += l; 2798 addr += l; 2799 2800 if (!len) { 2801 break; 2802 } 2803 2804 l = len; 2805 mr = flatview_translate(fv, addr, &addr1, &l, true, attrs); 2806 } 2807 2808 return result; 2809 } 2810 2811 /* Called from RCU critical section. */ 2812 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 2813 const void *buf, hwaddr len) 2814 { 2815 hwaddr l; 2816 hwaddr addr1; 2817 MemoryRegion *mr; 2818 MemTxResult result = MEMTX_OK; 2819 2820 l = len; 2821 mr = flatview_translate(fv, addr, &addr1, &l, true, attrs); 2822 result = flatview_write_continue(fv, addr, attrs, buf, len, 2823 addr1, l, mr); 2824 2825 return result; 2826 } 2827 2828 /* Called within RCU critical section. */ 2829 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 2830 MemTxAttrs attrs, void *ptr, 2831 hwaddr len, hwaddr addr1, hwaddr l, 2832 MemoryRegion *mr) 2833 { 2834 uint8_t *ram_ptr; 2835 uint64_t val; 2836 MemTxResult result = MEMTX_OK; 2837 bool release_lock = false; 2838 uint8_t *buf = ptr; 2839 2840 fuzz_dma_read_cb(addr, len, mr); 2841 for (;;) { 2842 if (!memory_access_is_direct(mr, false)) { 2843 /* I/O case */ 2844 release_lock |= prepare_mmio_access(mr); 2845 l = memory_access_size(mr, l, addr1); 2846 result |= memory_region_dispatch_read(mr, addr1, &val, 2847 size_memop(l), attrs); 2848 stn_he_p(buf, l, val); 2849 } else { 2850 /* RAM case */ 2851 ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); 2852 memcpy(buf, ram_ptr, l); 2853 } 2854 2855 if (release_lock) { 2856 qemu_mutex_unlock_iothread(); 2857 release_lock = false; 2858 } 2859 2860 len -= l; 2861 buf += l; 2862 addr += l; 2863 2864 if (!len) { 2865 break; 2866 } 2867 2868 l = len; 2869 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); 2870 } 2871 2872 return result; 2873 } 2874 2875 /* Called from RCU critical section. */ 2876 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 2877 MemTxAttrs attrs, void *buf, hwaddr len) 2878 { 2879 hwaddr l; 2880 hwaddr addr1; 2881 MemoryRegion *mr; 2882 2883 l = len; 2884 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); 2885 return flatview_read_continue(fv, addr, attrs, buf, len, 2886 addr1, l, mr); 2887 } 2888 2889 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 2890 MemTxAttrs attrs, void *buf, hwaddr len) 2891 { 2892 MemTxResult result = MEMTX_OK; 2893 FlatView *fv; 2894 2895 if (len > 0) { 2896 RCU_READ_LOCK_GUARD(); 2897 fv = address_space_to_flatview(as); 2898 result = flatview_read(fv, addr, attrs, buf, len); 2899 } 2900 2901 return result; 2902 } 2903 2904 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 2905 MemTxAttrs attrs, 2906 const void *buf, hwaddr len) 2907 { 2908 MemTxResult result = MEMTX_OK; 2909 FlatView *fv; 2910 2911 if (len > 0) { 2912 RCU_READ_LOCK_GUARD(); 2913 fv = address_space_to_flatview(as); 2914 result = flatview_write(fv, addr, attrs, buf, len); 2915 } 2916 2917 return result; 2918 } 2919 2920 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, 2921 void *buf, hwaddr len, bool is_write) 2922 { 2923 if (is_write) { 2924 return address_space_write(as, addr, attrs, buf, len); 2925 } else { 2926 return address_space_read_full(as, addr, attrs, buf, len); 2927 } 2928 } 2929 2930 MemTxResult address_space_set(AddressSpace *as, hwaddr addr, 2931 uint8_t c, hwaddr len, MemTxAttrs attrs) 2932 { 2933 #define FILLBUF_SIZE 512 2934 uint8_t fillbuf[FILLBUF_SIZE]; 2935 int l; 2936 MemTxResult error = MEMTX_OK; 2937 2938 memset(fillbuf, c, FILLBUF_SIZE); 2939 while (len > 0) { 2940 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; 2941 error |= address_space_write(as, addr, attrs, fillbuf, l); 2942 len -= l; 2943 addr += l; 2944 } 2945 2946 return error; 2947 } 2948 2949 void cpu_physical_memory_rw(hwaddr addr, void *buf, 2950 hwaddr len, bool is_write) 2951 { 2952 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, 2953 buf, len, is_write); 2954 } 2955 2956 enum write_rom_type { 2957 WRITE_DATA, 2958 FLUSH_CACHE, 2959 }; 2960 2961 static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, 2962 hwaddr addr, 2963 MemTxAttrs attrs, 2964 const void *ptr, 2965 hwaddr len, 2966 enum write_rom_type type) 2967 { 2968 hwaddr l; 2969 uint8_t *ram_ptr; 2970 hwaddr addr1; 2971 MemoryRegion *mr; 2972 const uint8_t *buf = ptr; 2973 2974 RCU_READ_LOCK_GUARD(); 2975 while (len > 0) { 2976 l = len; 2977 mr = address_space_translate(as, addr, &addr1, &l, true, attrs); 2978 2979 if (!(memory_region_is_ram(mr) || 2980 memory_region_is_romd(mr))) { 2981 l = memory_access_size(mr, l, addr1); 2982 } else { 2983 /* ROM/RAM case */ 2984 ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 2985 switch (type) { 2986 case WRITE_DATA: 2987 memcpy(ram_ptr, buf, l); 2988 invalidate_and_set_dirty(mr, addr1, l); 2989 break; 2990 case FLUSH_CACHE: 2991 flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l); 2992 break; 2993 } 2994 } 2995 len -= l; 2996 buf += l; 2997 addr += l; 2998 } 2999 return MEMTX_OK; 3000 } 3001 3002 /* used for ROM loading : can write in RAM and ROM */ 3003 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, 3004 MemTxAttrs attrs, 3005 const void *buf, hwaddr len) 3006 { 3007 return address_space_write_rom_internal(as, addr, attrs, 3008 buf, len, WRITE_DATA); 3009 } 3010 3011 void cpu_flush_icache_range(hwaddr start, hwaddr len) 3012 { 3013 /* 3014 * This function should do the same thing as an icache flush that was 3015 * triggered from within the guest. For TCG we are always cache coherent, 3016 * so there is no need to flush anything. For KVM / Xen we need to flush 3017 * the host's instruction cache at least. 3018 */ 3019 if (tcg_enabled()) { 3020 return; 3021 } 3022 3023 address_space_write_rom_internal(&address_space_memory, 3024 start, MEMTXATTRS_UNSPECIFIED, 3025 NULL, len, FLUSH_CACHE); 3026 } 3027 3028 typedef struct { 3029 MemoryRegion *mr; 3030 void *buffer; 3031 hwaddr addr; 3032 hwaddr len; 3033 bool in_use; 3034 } BounceBuffer; 3035 3036 static BounceBuffer bounce; 3037 3038 typedef struct MapClient { 3039 QEMUBH *bh; 3040 QLIST_ENTRY(MapClient) link; 3041 } MapClient; 3042 3043 QemuMutex map_client_list_lock; 3044 static QLIST_HEAD(, MapClient) map_client_list 3045 = QLIST_HEAD_INITIALIZER(map_client_list); 3046 3047 static void cpu_unregister_map_client_do(MapClient *client) 3048 { 3049 QLIST_REMOVE(client, link); 3050 g_free(client); 3051 } 3052 3053 static void cpu_notify_map_clients_locked(void) 3054 { 3055 MapClient *client; 3056 3057 while (!QLIST_EMPTY(&map_client_list)) { 3058 client = QLIST_FIRST(&map_client_list); 3059 qemu_bh_schedule(client->bh); 3060 cpu_unregister_map_client_do(client); 3061 } 3062 } 3063 3064 void cpu_register_map_client(QEMUBH *bh) 3065 { 3066 MapClient *client = g_malloc(sizeof(*client)); 3067 3068 qemu_mutex_lock(&map_client_list_lock); 3069 client->bh = bh; 3070 QLIST_INSERT_HEAD(&map_client_list, client, link); 3071 if (!qatomic_read(&bounce.in_use)) { 3072 cpu_notify_map_clients_locked(); 3073 } 3074 qemu_mutex_unlock(&map_client_list_lock); 3075 } 3076 3077 void cpu_exec_init_all(void) 3078 { 3079 qemu_mutex_init(&ram_list.mutex); 3080 /* The data structures we set up here depend on knowing the page size, 3081 * so no more changes can be made after this point. 3082 * In an ideal world, nothing we did before we had finished the 3083 * machine setup would care about the target page size, and we could 3084 * do this much later, rather than requiring board models to state 3085 * up front what their requirements are. 3086 */ 3087 finalize_target_page_bits(); 3088 io_mem_init(); 3089 memory_map_init(); 3090 qemu_mutex_init(&map_client_list_lock); 3091 } 3092 3093 void cpu_unregister_map_client(QEMUBH *bh) 3094 { 3095 MapClient *client; 3096 3097 qemu_mutex_lock(&map_client_list_lock); 3098 QLIST_FOREACH(client, &map_client_list, link) { 3099 if (client->bh == bh) { 3100 cpu_unregister_map_client_do(client); 3101 break; 3102 } 3103 } 3104 qemu_mutex_unlock(&map_client_list_lock); 3105 } 3106 3107 static void cpu_notify_map_clients(void) 3108 { 3109 qemu_mutex_lock(&map_client_list_lock); 3110 cpu_notify_map_clients_locked(); 3111 qemu_mutex_unlock(&map_client_list_lock); 3112 } 3113 3114 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 3115 bool is_write, MemTxAttrs attrs) 3116 { 3117 MemoryRegion *mr; 3118 hwaddr l, xlat; 3119 3120 while (len > 0) { 3121 l = len; 3122 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3123 if (!memory_access_is_direct(mr, is_write)) { 3124 l = memory_access_size(mr, l, addr); 3125 if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) { 3126 return false; 3127 } 3128 } 3129 3130 len -= l; 3131 addr += l; 3132 } 3133 return true; 3134 } 3135 3136 bool address_space_access_valid(AddressSpace *as, hwaddr addr, 3137 hwaddr len, bool is_write, 3138 MemTxAttrs attrs) 3139 { 3140 FlatView *fv; 3141 bool result; 3142 3143 RCU_READ_LOCK_GUARD(); 3144 fv = address_space_to_flatview(as); 3145 result = flatview_access_valid(fv, addr, len, is_write, attrs); 3146 return result; 3147 } 3148 3149 static hwaddr 3150 flatview_extend_translation(FlatView *fv, hwaddr addr, 3151 hwaddr target_len, 3152 MemoryRegion *mr, hwaddr base, hwaddr len, 3153 bool is_write, MemTxAttrs attrs) 3154 { 3155 hwaddr done = 0; 3156 hwaddr xlat; 3157 MemoryRegion *this_mr; 3158 3159 for (;;) { 3160 target_len -= len; 3161 addr += len; 3162 done += len; 3163 if (target_len == 0) { 3164 return done; 3165 } 3166 3167 len = target_len; 3168 this_mr = flatview_translate(fv, addr, &xlat, 3169 &len, is_write, attrs); 3170 if (this_mr != mr || xlat != base + done) { 3171 return done; 3172 } 3173 } 3174 } 3175 3176 /* Map a physical memory region into a host virtual address. 3177 * May map a subset of the requested range, given by and returned in *plen. 3178 * May return NULL if resources needed to perform the mapping are exhausted. 3179 * Use only for reads OR writes - not for read-modify-write operations. 3180 * Use cpu_register_map_client() to know when retrying the map operation is 3181 * likely to succeed. 3182 */ 3183 void *address_space_map(AddressSpace *as, 3184 hwaddr addr, 3185 hwaddr *plen, 3186 bool is_write, 3187 MemTxAttrs attrs) 3188 { 3189 hwaddr len = *plen; 3190 hwaddr l, xlat; 3191 MemoryRegion *mr; 3192 void *ptr; 3193 FlatView *fv; 3194 3195 if (len == 0) { 3196 return NULL; 3197 } 3198 3199 l = len; 3200 RCU_READ_LOCK_GUARD(); 3201 fv = address_space_to_flatview(as); 3202 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3203 3204 if (!memory_access_is_direct(mr, is_write)) { 3205 if (qatomic_xchg(&bounce.in_use, true)) { 3206 *plen = 0; 3207 return NULL; 3208 } 3209 /* Avoid unbounded allocations */ 3210 l = MIN(l, TARGET_PAGE_SIZE); 3211 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); 3212 bounce.addr = addr; 3213 bounce.len = l; 3214 3215 memory_region_ref(mr); 3216 bounce.mr = mr; 3217 if (!is_write) { 3218 flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED, 3219 bounce.buffer, l); 3220 } 3221 3222 *plen = l; 3223 return bounce.buffer; 3224 } 3225 3226 3227 memory_region_ref(mr); 3228 *plen = flatview_extend_translation(fv, addr, len, mr, xlat, 3229 l, is_write, attrs); 3230 fuzz_dma_read_cb(addr, *plen, mr); 3231 ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); 3232 3233 return ptr; 3234 } 3235 3236 /* Unmaps a memory region previously mapped by address_space_map(). 3237 * Will also mark the memory as dirty if is_write is true. access_len gives 3238 * the amount of memory that was actually read or written by the caller. 3239 */ 3240 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 3241 bool is_write, hwaddr access_len) 3242 { 3243 if (buffer != bounce.buffer) { 3244 MemoryRegion *mr; 3245 ram_addr_t addr1; 3246 3247 mr = memory_region_from_host(buffer, &addr1); 3248 assert(mr != NULL); 3249 if (is_write) { 3250 invalidate_and_set_dirty(mr, addr1, access_len); 3251 } 3252 if (xen_enabled()) { 3253 xen_invalidate_map_cache_entry(buffer); 3254 } 3255 memory_region_unref(mr); 3256 return; 3257 } 3258 if (is_write) { 3259 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED, 3260 bounce.buffer, access_len); 3261 } 3262 qemu_vfree(bounce.buffer); 3263 bounce.buffer = NULL; 3264 memory_region_unref(bounce.mr); 3265 qatomic_mb_set(&bounce.in_use, false); 3266 cpu_notify_map_clients(); 3267 } 3268 3269 void *cpu_physical_memory_map(hwaddr addr, 3270 hwaddr *plen, 3271 bool is_write) 3272 { 3273 return address_space_map(&address_space_memory, addr, plen, is_write, 3274 MEMTXATTRS_UNSPECIFIED); 3275 } 3276 3277 void cpu_physical_memory_unmap(void *buffer, hwaddr len, 3278 bool is_write, hwaddr access_len) 3279 { 3280 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); 3281 } 3282 3283 #define ARG1_DECL AddressSpace *as 3284 #define ARG1 as 3285 #define SUFFIX 3286 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) 3287 #define RCU_READ_LOCK(...) rcu_read_lock() 3288 #define RCU_READ_UNLOCK(...) rcu_read_unlock() 3289 #include "memory_ldst.c.inc" 3290 3291 int64_t address_space_cache_init(MemoryRegionCache *cache, 3292 AddressSpace *as, 3293 hwaddr addr, 3294 hwaddr len, 3295 bool is_write) 3296 { 3297 AddressSpaceDispatch *d; 3298 hwaddr l; 3299 MemoryRegion *mr; 3300 Int128 diff; 3301 3302 assert(len > 0); 3303 3304 l = len; 3305 cache->fv = address_space_get_flatview(as); 3306 d = flatview_to_dispatch(cache->fv); 3307 cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); 3308 3309 /* 3310 * cache->xlat is now relative to cache->mrs.mr, not to the section itself. 3311 * Take that into account to compute how many bytes are there between 3312 * cache->xlat and the end of the section. 3313 */ 3314 diff = int128_sub(cache->mrs.size, 3315 int128_make64(cache->xlat - cache->mrs.offset_within_region)); 3316 l = int128_get64(int128_min(diff, int128_make64(l))); 3317 3318 mr = cache->mrs.mr; 3319 memory_region_ref(mr); 3320 if (memory_access_is_direct(mr, is_write)) { 3321 /* We don't care about the memory attributes here as we're only 3322 * doing this if we found actual RAM, which behaves the same 3323 * regardless of attributes; so UNSPECIFIED is fine. 3324 */ 3325 l = flatview_extend_translation(cache->fv, addr, len, mr, 3326 cache->xlat, l, is_write, 3327 MEMTXATTRS_UNSPECIFIED); 3328 cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true); 3329 } else { 3330 cache->ptr = NULL; 3331 } 3332 3333 cache->len = l; 3334 cache->is_write = is_write; 3335 return l; 3336 } 3337 3338 void address_space_cache_invalidate(MemoryRegionCache *cache, 3339 hwaddr addr, 3340 hwaddr access_len) 3341 { 3342 assert(cache->is_write); 3343 if (likely(cache->ptr)) { 3344 invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len); 3345 } 3346 } 3347 3348 void address_space_cache_destroy(MemoryRegionCache *cache) 3349 { 3350 if (!cache->mrs.mr) { 3351 return; 3352 } 3353 3354 if (xen_enabled()) { 3355 xen_invalidate_map_cache_entry(cache->ptr); 3356 } 3357 memory_region_unref(cache->mrs.mr); 3358 flatview_unref(cache->fv); 3359 cache->mrs.mr = NULL; 3360 cache->fv = NULL; 3361 } 3362 3363 /* Called from RCU critical section. This function has the same 3364 * semantics as address_space_translate, but it only works on a 3365 * predefined range of a MemoryRegion that was mapped with 3366 * address_space_cache_init. 3367 */ 3368 static inline MemoryRegion *address_space_translate_cached( 3369 MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, 3370 hwaddr *plen, bool is_write, MemTxAttrs attrs) 3371 { 3372 MemoryRegionSection section; 3373 MemoryRegion *mr; 3374 IOMMUMemoryRegion *iommu_mr; 3375 AddressSpace *target_as; 3376 3377 assert(!cache->ptr); 3378 *xlat = addr + cache->xlat; 3379 3380 mr = cache->mrs.mr; 3381 iommu_mr = memory_region_get_iommu(mr); 3382 if (!iommu_mr) { 3383 /* MMIO region. */ 3384 return mr; 3385 } 3386 3387 section = address_space_translate_iommu(iommu_mr, xlat, plen, 3388 NULL, is_write, true, 3389 &target_as, attrs); 3390 return section.mr; 3391 } 3392 3393 /* Called from RCU critical section. address_space_read_cached uses this 3394 * out of line function when the target is an MMIO or IOMMU region. 3395 */ 3396 MemTxResult 3397 address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3398 void *buf, hwaddr len) 3399 { 3400 hwaddr addr1, l; 3401 MemoryRegion *mr; 3402 3403 l = len; 3404 mr = address_space_translate_cached(cache, addr, &addr1, &l, false, 3405 MEMTXATTRS_UNSPECIFIED); 3406 return flatview_read_continue(cache->fv, 3407 addr, MEMTXATTRS_UNSPECIFIED, buf, len, 3408 addr1, l, mr); 3409 } 3410 3411 /* Called from RCU critical section. address_space_write_cached uses this 3412 * out of line function when the target is an MMIO or IOMMU region. 3413 */ 3414 MemTxResult 3415 address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3416 const void *buf, hwaddr len) 3417 { 3418 hwaddr addr1, l; 3419 MemoryRegion *mr; 3420 3421 l = len; 3422 mr = address_space_translate_cached(cache, addr, &addr1, &l, true, 3423 MEMTXATTRS_UNSPECIFIED); 3424 return flatview_write_continue(cache->fv, 3425 addr, MEMTXATTRS_UNSPECIFIED, buf, len, 3426 addr1, l, mr); 3427 } 3428 3429 #define ARG1_DECL MemoryRegionCache *cache 3430 #define ARG1 cache 3431 #define SUFFIX _cached_slow 3432 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) 3433 #define RCU_READ_LOCK() ((void)0) 3434 #define RCU_READ_UNLOCK() ((void)0) 3435 #include "memory_ldst.c.inc" 3436 3437 /* virtual memory access for debug (includes writing to ROM) */ 3438 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, 3439 void *ptr, target_ulong len, bool is_write) 3440 { 3441 hwaddr phys_addr; 3442 target_ulong l, page; 3443 uint8_t *buf = ptr; 3444 3445 cpu_synchronize_state(cpu); 3446 while (len > 0) { 3447 int asidx; 3448 MemTxAttrs attrs; 3449 MemTxResult res; 3450 3451 page = addr & TARGET_PAGE_MASK; 3452 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); 3453 asidx = cpu_asidx_from_attrs(cpu, attrs); 3454 /* if no physical page mapped, return an error */ 3455 if (phys_addr == -1) 3456 return -1; 3457 l = (page + TARGET_PAGE_SIZE) - addr; 3458 if (l > len) 3459 l = len; 3460 phys_addr += (addr & ~TARGET_PAGE_MASK); 3461 if (is_write) { 3462 res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, 3463 attrs, buf, l); 3464 } else { 3465 res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr, 3466 attrs, buf, l); 3467 } 3468 if (res != MEMTX_OK) { 3469 return -1; 3470 } 3471 len -= l; 3472 buf += l; 3473 addr += l; 3474 } 3475 return 0; 3476 } 3477 3478 /* 3479 * Allows code that needs to deal with migration bitmaps etc to still be built 3480 * target independent. 3481 */ 3482 size_t qemu_target_page_size(void) 3483 { 3484 return TARGET_PAGE_SIZE; 3485 } 3486 3487 int qemu_target_page_bits(void) 3488 { 3489 return TARGET_PAGE_BITS; 3490 } 3491 3492 int qemu_target_page_bits_min(void) 3493 { 3494 return TARGET_PAGE_BITS_MIN; 3495 } 3496 3497 bool cpu_physical_memory_is_io(hwaddr phys_addr) 3498 { 3499 MemoryRegion*mr; 3500 hwaddr l = 1; 3501 bool res; 3502 3503 RCU_READ_LOCK_GUARD(); 3504 mr = address_space_translate(&address_space_memory, 3505 phys_addr, &phys_addr, &l, false, 3506 MEMTXATTRS_UNSPECIFIED); 3507 3508 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); 3509 return res; 3510 } 3511 3512 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) 3513 { 3514 RAMBlock *block; 3515 int ret = 0; 3516 3517 RCU_READ_LOCK_GUARD(); 3518 RAMBLOCK_FOREACH(block) { 3519 ret = func(block, opaque); 3520 if (ret) { 3521 break; 3522 } 3523 } 3524 return ret; 3525 } 3526 3527 /* 3528 * Unmap pages of memory from start to start+length such that 3529 * they a) read as 0, b) Trigger whatever fault mechanism 3530 * the OS provides for postcopy. 3531 * The pages must be unmapped by the end of the function. 3532 * Returns: 0 on success, none-0 on failure 3533 * 3534 */ 3535 int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) 3536 { 3537 int ret = -1; 3538 3539 uint8_t *host_startaddr = rb->host + start; 3540 3541 if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { 3542 error_report("ram_block_discard_range: Unaligned start address: %p", 3543 host_startaddr); 3544 goto err; 3545 } 3546 3547 if ((start + length) <= rb->max_length) { 3548 bool need_madvise, need_fallocate; 3549 if (!QEMU_IS_ALIGNED(length, rb->page_size)) { 3550 error_report("ram_block_discard_range: Unaligned length: %zx", 3551 length); 3552 goto err; 3553 } 3554 3555 errno = ENOTSUP; /* If we are missing MADVISE etc */ 3556 3557 /* The logic here is messy; 3558 * madvise DONTNEED fails for hugepages 3559 * fallocate works on hugepages and shmem 3560 * shared anonymous memory requires madvise REMOVE 3561 */ 3562 need_madvise = (rb->page_size == qemu_host_page_size); 3563 need_fallocate = rb->fd != -1; 3564 if (need_fallocate) { 3565 /* For a file, this causes the area of the file to be zero'd 3566 * if read, and for hugetlbfs also causes it to be unmapped 3567 * so a userfault will trigger. 3568 */ 3569 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 3570 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 3571 start, length); 3572 if (ret) { 3573 ret = -errno; 3574 error_report("ram_block_discard_range: Failed to fallocate " 3575 "%s:%" PRIx64 " +%zx (%d)", 3576 rb->idstr, start, length, ret); 3577 goto err; 3578 } 3579 #else 3580 ret = -ENOSYS; 3581 error_report("ram_block_discard_range: fallocate not available/file" 3582 "%s:%" PRIx64 " +%zx (%d)", 3583 rb->idstr, start, length, ret); 3584 goto err; 3585 #endif 3586 } 3587 if (need_madvise) { 3588 /* For normal RAM this causes it to be unmapped, 3589 * for shared memory it causes the local mapping to disappear 3590 * and to fall back on the file contents (which we just 3591 * fallocate'd away). 3592 */ 3593 #if defined(CONFIG_MADVISE) 3594 if (qemu_ram_is_shared(rb) && rb->fd < 0) { 3595 ret = madvise(host_startaddr, length, QEMU_MADV_REMOVE); 3596 } else { 3597 ret = madvise(host_startaddr, length, QEMU_MADV_DONTNEED); 3598 } 3599 if (ret) { 3600 ret = -errno; 3601 error_report("ram_block_discard_range: Failed to discard range " 3602 "%s:%" PRIx64 " +%zx (%d)", 3603 rb->idstr, start, length, ret); 3604 goto err; 3605 } 3606 #else 3607 ret = -ENOSYS; 3608 error_report("ram_block_discard_range: MADVISE not available" 3609 "%s:%" PRIx64 " +%zx (%d)", 3610 rb->idstr, start, length, ret); 3611 goto err; 3612 #endif 3613 } 3614 trace_ram_block_discard_range(rb->idstr, host_startaddr, length, 3615 need_madvise, need_fallocate, ret); 3616 } else { 3617 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 3618 "/%zx/" RAM_ADDR_FMT")", 3619 rb->idstr, start, length, rb->max_length); 3620 } 3621 3622 err: 3623 return ret; 3624 } 3625 3626 bool ramblock_is_pmem(RAMBlock *rb) 3627 { 3628 return rb->flags & RAM_PMEM; 3629 } 3630 3631 static void mtree_print_phys_entries(int start, int end, int skip, int ptr) 3632 { 3633 if (start == end - 1) { 3634 qemu_printf("\t%3d ", start); 3635 } else { 3636 qemu_printf("\t%3d..%-3d ", start, end - 1); 3637 } 3638 qemu_printf(" skip=%d ", skip); 3639 if (ptr == PHYS_MAP_NODE_NIL) { 3640 qemu_printf(" ptr=NIL"); 3641 } else if (!skip) { 3642 qemu_printf(" ptr=#%d", ptr); 3643 } else { 3644 qemu_printf(" ptr=[%d]", ptr); 3645 } 3646 qemu_printf("\n"); 3647 } 3648 3649 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ 3650 int128_sub((size), int128_one())) : 0) 3651 3652 void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) 3653 { 3654 int i; 3655 3656 qemu_printf(" Dispatch\n"); 3657 qemu_printf(" Physical sections\n"); 3658 3659 for (i = 0; i < d->map.sections_nb; ++i) { 3660 MemoryRegionSection *s = d->map.sections + i; 3661 const char *names[] = { " [unassigned]", " [not dirty]", 3662 " [ROM]", " [watch]" }; 3663 3664 qemu_printf(" #%d @" TARGET_FMT_plx ".." TARGET_FMT_plx 3665 " %s%s%s%s%s", 3666 i, 3667 s->offset_within_address_space, 3668 s->offset_within_address_space + MR_SIZE(s->mr->size), 3669 s->mr->name ? s->mr->name : "(noname)", 3670 i < ARRAY_SIZE(names) ? names[i] : "", 3671 s->mr == root ? " [ROOT]" : "", 3672 s == d->mru_section ? " [MRU]" : "", 3673 s->mr->is_iommu ? " [iommu]" : ""); 3674 3675 if (s->mr->alias) { 3676 qemu_printf(" alias=%s", s->mr->alias->name ? 3677 s->mr->alias->name : "noname"); 3678 } 3679 qemu_printf("\n"); 3680 } 3681 3682 qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n", 3683 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); 3684 for (i = 0; i < d->map.nodes_nb; ++i) { 3685 int j, jprev; 3686 PhysPageEntry prev; 3687 Node *n = d->map.nodes + i; 3688 3689 qemu_printf(" [%d]\n", i); 3690 3691 for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) { 3692 PhysPageEntry *pe = *n + j; 3693 3694 if (pe->ptr == prev.ptr && pe->skip == prev.skip) { 3695 continue; 3696 } 3697 3698 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 3699 3700 jprev = j; 3701 prev = *pe; 3702 } 3703 3704 if (jprev != ARRAY_SIZE(*n)) { 3705 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 3706 } 3707 } 3708 } 3709 3710 /* Require any discards to work. */ 3711 static unsigned int ram_block_discard_required_cnt; 3712 /* Require only coordinated discards to work. */ 3713 static unsigned int ram_block_coordinated_discard_required_cnt; 3714 /* Disable any discards. */ 3715 static unsigned int ram_block_discard_disabled_cnt; 3716 /* Disable only uncoordinated discards. */ 3717 static unsigned int ram_block_uncoordinated_discard_disabled_cnt; 3718 static QemuMutex ram_block_discard_disable_mutex; 3719 3720 static void ram_block_discard_disable_mutex_lock(void) 3721 { 3722 static gsize initialized; 3723 3724 if (g_once_init_enter(&initialized)) { 3725 qemu_mutex_init(&ram_block_discard_disable_mutex); 3726 g_once_init_leave(&initialized, 1); 3727 } 3728 qemu_mutex_lock(&ram_block_discard_disable_mutex); 3729 } 3730 3731 static void ram_block_discard_disable_mutex_unlock(void) 3732 { 3733 qemu_mutex_unlock(&ram_block_discard_disable_mutex); 3734 } 3735 3736 int ram_block_discard_disable(bool state) 3737 { 3738 int ret = 0; 3739 3740 ram_block_discard_disable_mutex_lock(); 3741 if (!state) { 3742 ram_block_discard_disabled_cnt--; 3743 } else if (ram_block_discard_required_cnt || 3744 ram_block_coordinated_discard_required_cnt) { 3745 ret = -EBUSY; 3746 } else { 3747 ram_block_discard_disabled_cnt++; 3748 } 3749 ram_block_discard_disable_mutex_unlock(); 3750 return ret; 3751 } 3752 3753 int ram_block_uncoordinated_discard_disable(bool state) 3754 { 3755 int ret = 0; 3756 3757 ram_block_discard_disable_mutex_lock(); 3758 if (!state) { 3759 ram_block_uncoordinated_discard_disabled_cnt--; 3760 } else if (ram_block_discard_required_cnt) { 3761 ret = -EBUSY; 3762 } else { 3763 ram_block_uncoordinated_discard_disabled_cnt++; 3764 } 3765 ram_block_discard_disable_mutex_unlock(); 3766 return ret; 3767 } 3768 3769 int ram_block_discard_require(bool state) 3770 { 3771 int ret = 0; 3772 3773 ram_block_discard_disable_mutex_lock(); 3774 if (!state) { 3775 ram_block_discard_required_cnt--; 3776 } else if (ram_block_discard_disabled_cnt || 3777 ram_block_uncoordinated_discard_disabled_cnt) { 3778 ret = -EBUSY; 3779 } else { 3780 ram_block_discard_required_cnt++; 3781 } 3782 ram_block_discard_disable_mutex_unlock(); 3783 return ret; 3784 } 3785 3786 int ram_block_coordinated_discard_require(bool state) 3787 { 3788 int ret = 0; 3789 3790 ram_block_discard_disable_mutex_lock(); 3791 if (!state) { 3792 ram_block_coordinated_discard_required_cnt--; 3793 } else if (ram_block_discard_disabled_cnt) { 3794 ret = -EBUSY; 3795 } else { 3796 ram_block_coordinated_discard_required_cnt++; 3797 } 3798 ram_block_discard_disable_mutex_unlock(); 3799 return ret; 3800 } 3801 3802 bool ram_block_discard_is_disabled(void) 3803 { 3804 return qatomic_read(&ram_block_discard_disabled_cnt) || 3805 qatomic_read(&ram_block_uncoordinated_discard_disabled_cnt); 3806 } 3807 3808 bool ram_block_discard_is_required(void) 3809 { 3810 return qatomic_read(&ram_block_discard_required_cnt) || 3811 qatomic_read(&ram_block_coordinated_discard_required_cnt); 3812 } 3813