154936004Sbellard /* 25b6dd868SBlue Swirl * Virtual page mapping 354936004Sbellard * 454936004Sbellard * Copyright (c) 2003 Fabrice Bellard 554936004Sbellard * 654936004Sbellard * This library is free software; you can redistribute it and/or 754936004Sbellard * modify it under the terms of the GNU Lesser General Public 854936004Sbellard * License as published by the Free Software Foundation; either 954936004Sbellard * version 2 of the License, or (at your option) any later version. 1054936004Sbellard * 1154936004Sbellard * This library is distributed in the hope that it will be useful, 1254936004Sbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 1354936004Sbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1454936004Sbellard * Lesser General Public License for more details. 1554936004Sbellard * 1654936004Sbellard * You should have received a copy of the GNU Lesser General Public 178167ee88SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 1854936004Sbellard */ 197b31bbc2SPeter Maydell #include "qemu/osdep.h" 20da34e65cSMarkus Armbruster #include "qapi/error.h" 21777872e5SStefan Weil #ifndef _WIN32 22d5a8f07cSbellard #endif 2354936004Sbellard 24f348b6d1SVeronia Bahaa #include "qemu/cutils.h" 256180a181Sbellard #include "cpu.h" 2663c91552SPaolo Bonzini #include "exec/exec-all.h" 27b67d9a52Sbellard #include "tcg.h" 28741da0d3SPaolo Bonzini #include "hw/qdev-core.h" 294485bd26SMichael S. Tsirkin #if !defined(CONFIG_USER_ONLY) 3047c8ca53SMarcel Apfelbaum #include "hw/boards.h" 3133c11879SPaolo Bonzini #include "hw/xen/xen.h" 324485bd26SMichael S. Tsirkin #endif 339c17d615SPaolo Bonzini #include "sysemu/kvm.h" 342ff3de68SMarkus Armbruster #include "sysemu/sysemu.h" 351de7afc9SPaolo Bonzini #include "qemu/timer.h" 361de7afc9SPaolo Bonzini #include "qemu/config-file.h" 3775a34036SAndreas Färber #include "qemu/error-report.h" 3853a5960aSpbrook #if defined(CONFIG_USER_ONLY) 39a9c94277SMarkus Armbruster #include "qemu.h" 40432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */ 41741da0d3SPaolo Bonzini #include "hw/hw.h" 42741da0d3SPaolo Bonzini #include "exec/memory.h" 43df43d49cSPaolo Bonzini #include "exec/ioport.h" 44741da0d3SPaolo Bonzini #include "sysemu/dma.h" 45741da0d3SPaolo Bonzini #include "exec/address-spaces.h" 469c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h" 470ab8ed18SDaniel P. Berrange #include "trace-root.h" 4853a5960aSpbrook #endif 490d6d3c87SPaolo Bonzini #include "exec/cpu-all.h" 500dc3f44aSMike Day #include "qemu/rcu_queue.h" 514840f10eSJan Kiszka #include "qemu/main-loop.h" 525b6dd868SBlue Swirl #include "translate-all.h" 537615936eSPavel Dovgalyuk #include "sysemu/replay.h" 540cac1b66SBlue Swirl 55022c62cbSPaolo Bonzini #include "exec/memory-internal.h" 56220c3ebdSJuan Quintela #include "exec/ram_addr.h" 57508127e2SPaolo Bonzini #include "exec/log.h" 5867d95c15SAvi Kivity 599dfeca7cSBharata B Rao #include "migration/vmstate.h" 609dfeca7cSBharata B Rao 61b35ba30fSMichael S. Tsirkin #include "qemu/range.h" 62794e8f30SMichael S. Tsirkin #ifndef _WIN32 63794e8f30SMichael S. Tsirkin #include "qemu/mmap-alloc.h" 64794e8f30SMichael S. Tsirkin #endif 65b35ba30fSMichael S. Tsirkin 66db7b5426Sblueswir1 //#define DEBUG_SUBPAGE 671196be37Sths 6899773bd4Spbrook #if !defined(CONFIG_USER_ONLY) 690dc3f44aSMike Day /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes 700dc3f44aSMike Day * are protected by the ramlist lock. 710dc3f44aSMike Day */ 720d53d9feSMike Day RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; 7362152b8aSAvi Kivity 7462152b8aSAvi Kivity static MemoryRegion *system_memory; 75309cb471SAvi Kivity static MemoryRegion *system_io; 7662152b8aSAvi Kivity 77f6790af6SAvi Kivity AddressSpace address_space_io; 78f6790af6SAvi Kivity AddressSpace address_space_memory; 792673a5daSAvi Kivity 800844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty; 81acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned; 820e0df1e2SAvi Kivity 837bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ 847bd4f430SPaolo Bonzini #define RAM_PREALLOC (1 << 0) 857bd4f430SPaolo Bonzini 86dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */ 87dbcb8981SPaolo Bonzini #define RAM_SHARED (1 << 1) 88dbcb8981SPaolo Bonzini 8962be4e3aSMichael S. Tsirkin /* Only a portion of RAM (used_length) is actually used, and migrated. 9062be4e3aSMichael S. Tsirkin * This used_length size can change across reboots. 9162be4e3aSMichael S. Tsirkin */ 9262be4e3aSMichael S. Tsirkin #define RAM_RESIZEABLE (1 << 2) 9362be4e3aSMichael S. Tsirkin 94e2eef170Spbrook #endif 959fa3e853Sbellard 9620bccb82SPeter Maydell #ifdef TARGET_PAGE_BITS_VARY 9720bccb82SPeter Maydell int target_page_bits; 9820bccb82SPeter Maydell bool target_page_bits_decided; 9920bccb82SPeter Maydell #endif 10020bccb82SPeter Maydell 101bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); 1026a00d601Sbellard /* current CPU in the current thread. It is only valid inside 1036a00d601Sbellard cpu_exec() */ 104f240eb6fSPaolo Bonzini __thread CPUState *current_cpu; 1052e70f6efSpbrook /* 0 = Do not count executed instructions. 106bf20dc07Sths 1 = Precise instruction counting. 1072e70f6efSpbrook 2 = Adaptive rate instruction counting. */ 1085708fc66SPaolo Bonzini int use_icount; 1096a00d601Sbellard 11020bccb82SPeter Maydell bool set_preferred_target_page_bits(int bits) 11120bccb82SPeter Maydell { 11220bccb82SPeter Maydell /* The target page size is the lowest common denominator for all 11320bccb82SPeter Maydell * the CPUs in the system, so we can only make it smaller, never 11420bccb82SPeter Maydell * larger. And we can't make it smaller once we've committed to 11520bccb82SPeter Maydell * a particular size. 11620bccb82SPeter Maydell */ 11720bccb82SPeter Maydell #ifdef TARGET_PAGE_BITS_VARY 11820bccb82SPeter Maydell assert(bits >= TARGET_PAGE_BITS_MIN); 11920bccb82SPeter Maydell if (target_page_bits == 0 || target_page_bits > bits) { 12020bccb82SPeter Maydell if (target_page_bits_decided) { 12120bccb82SPeter Maydell return false; 12220bccb82SPeter Maydell } 12320bccb82SPeter Maydell target_page_bits = bits; 12420bccb82SPeter Maydell } 12520bccb82SPeter Maydell #endif 12620bccb82SPeter Maydell return true; 12720bccb82SPeter Maydell } 12820bccb82SPeter Maydell 129e2eef170Spbrook #if !defined(CONFIG_USER_ONLY) 1304346ae3eSAvi Kivity 13120bccb82SPeter Maydell static void finalize_target_page_bits(void) 13220bccb82SPeter Maydell { 13320bccb82SPeter Maydell #ifdef TARGET_PAGE_BITS_VARY 13420bccb82SPeter Maydell if (target_page_bits == 0) { 13520bccb82SPeter Maydell target_page_bits = TARGET_PAGE_BITS_MIN; 13620bccb82SPeter Maydell } 13720bccb82SPeter Maydell target_page_bits_decided = true; 13820bccb82SPeter Maydell #endif 13920bccb82SPeter Maydell } 14020bccb82SPeter Maydell 1411db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry; 1421db8abb1SPaolo Bonzini 1431db8abb1SPaolo Bonzini struct PhysPageEntry { 1449736e55bSMichael S. Tsirkin /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ 1458b795765SMichael S. Tsirkin uint32_t skip : 6; 1469736e55bSMichael S. Tsirkin /* index into phys_sections (!skip) or phys_map_nodes (skip) */ 1478b795765SMichael S. Tsirkin uint32_t ptr : 26; 1481db8abb1SPaolo Bonzini }; 1491db8abb1SPaolo Bonzini 1508b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) 1518b795765SMichael S. Tsirkin 15203f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables. */ 15357271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64 15403f49957SPaolo Bonzini 155026736ceSMichael S. Tsirkin #define P_L2_BITS 9 15603f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS) 15703f49957SPaolo Bonzini 15803f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) 15903f49957SPaolo Bonzini 16003f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE]; 1610475d94fSPaolo Bonzini 16253cb28cbSMarcel Apfelbaum typedef struct PhysPageMap { 16379e2b9aeSPaolo Bonzini struct rcu_head rcu; 16479e2b9aeSPaolo Bonzini 16553cb28cbSMarcel Apfelbaum unsigned sections_nb; 16653cb28cbSMarcel Apfelbaum unsigned sections_nb_alloc; 16753cb28cbSMarcel Apfelbaum unsigned nodes_nb; 16853cb28cbSMarcel Apfelbaum unsigned nodes_nb_alloc; 16953cb28cbSMarcel Apfelbaum Node *nodes; 17053cb28cbSMarcel Apfelbaum MemoryRegionSection *sections; 17153cb28cbSMarcel Apfelbaum } PhysPageMap; 17253cb28cbSMarcel Apfelbaum 1731db8abb1SPaolo Bonzini struct AddressSpaceDispatch { 17479e2b9aeSPaolo Bonzini struct rcu_head rcu; 17579e2b9aeSPaolo Bonzini 176729633c2SFam Zheng MemoryRegionSection *mru_section; 1771db8abb1SPaolo Bonzini /* This is a multi-level map on the physical address space. 1781db8abb1SPaolo Bonzini * The bottom level has pointers to MemoryRegionSections. 1791db8abb1SPaolo Bonzini */ 1801db8abb1SPaolo Bonzini PhysPageEntry phys_map; 18153cb28cbSMarcel Apfelbaum PhysPageMap map; 182acc9d80bSJan Kiszka AddressSpace *as; 1831db8abb1SPaolo Bonzini }; 1841db8abb1SPaolo Bonzini 18590260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 18690260c6cSJan Kiszka typedef struct subpage_t { 18790260c6cSJan Kiszka MemoryRegion iomem; 188acc9d80bSJan Kiszka AddressSpace *as; 18990260c6cSJan Kiszka hwaddr base; 1902615fabdSVijaya Kumar K uint16_t sub_section[]; 19190260c6cSJan Kiszka } subpage_t; 19290260c6cSJan Kiszka 193b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0 194b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1 195b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2 196b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3 1975312bd8bSAvi Kivity 198e2eef170Spbrook static void io_mem_init(void); 19962152b8aSAvi Kivity static void memory_map_init(void); 20009daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener); 201e2eef170Spbrook 2021ec9b909SAvi Kivity static MemoryRegion io_mem_watch; 20332857f4dSPeter Maydell 20432857f4dSPeter Maydell /** 20532857f4dSPeter Maydell * CPUAddressSpace: all the information a CPU needs about an AddressSpace 20632857f4dSPeter Maydell * @cpu: the CPU whose AddressSpace this is 20732857f4dSPeter Maydell * @as: the AddressSpace itself 20832857f4dSPeter Maydell * @memory_dispatch: its dispatch pointer (cached, RCU protected) 20932857f4dSPeter Maydell * @tcg_as_listener: listener for tracking changes to the AddressSpace 21032857f4dSPeter Maydell */ 21132857f4dSPeter Maydell struct CPUAddressSpace { 21232857f4dSPeter Maydell CPUState *cpu; 21332857f4dSPeter Maydell AddressSpace *as; 21432857f4dSPeter Maydell struct AddressSpaceDispatch *memory_dispatch; 21532857f4dSPeter Maydell MemoryListener tcg_as_listener; 21632857f4dSPeter Maydell }; 21732857f4dSPeter Maydell 2186658ffb8Spbrook #endif 21954936004Sbellard 2206d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY) 221d6f2ea22SAvi Kivity 22253cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) 223f7bf5461SAvi Kivity { 224101420b8SPeter Lieven static unsigned alloc_hint = 16; 22553cb28cbSMarcel Apfelbaum if (map->nodes_nb + nodes > map->nodes_nb_alloc) { 226101420b8SPeter Lieven map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint); 22753cb28cbSMarcel Apfelbaum map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes); 22853cb28cbSMarcel Apfelbaum map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); 229101420b8SPeter Lieven alloc_hint = map->nodes_nb_alloc; 230f7bf5461SAvi Kivity } 231f7bf5461SAvi Kivity } 232f7bf5461SAvi Kivity 233db94604bSPaolo Bonzini static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) 234d6f2ea22SAvi Kivity { 235d6f2ea22SAvi Kivity unsigned i; 2368b795765SMichael S. Tsirkin uint32_t ret; 237db94604bSPaolo Bonzini PhysPageEntry e; 238db94604bSPaolo Bonzini PhysPageEntry *p; 239d6f2ea22SAvi Kivity 24053cb28cbSMarcel Apfelbaum ret = map->nodes_nb++; 241db94604bSPaolo Bonzini p = map->nodes[ret]; 242d6f2ea22SAvi Kivity assert(ret != PHYS_MAP_NODE_NIL); 24353cb28cbSMarcel Apfelbaum assert(ret != map->nodes_nb_alloc); 244db94604bSPaolo Bonzini 245db94604bSPaolo Bonzini e.skip = leaf ? 0 : 1; 246db94604bSPaolo Bonzini e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; 24703f49957SPaolo Bonzini for (i = 0; i < P_L2_SIZE; ++i) { 248db94604bSPaolo Bonzini memcpy(&p[i], &e, sizeof(e)); 249d6f2ea22SAvi Kivity } 250f7bf5461SAvi Kivity return ret; 251d6f2ea22SAvi Kivity } 252d6f2ea22SAvi Kivity 25353cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, 25453cb28cbSMarcel Apfelbaum hwaddr *index, hwaddr *nb, uint16_t leaf, 2552999097bSAvi Kivity int level) 25692e873b9Sbellard { 257f7bf5461SAvi Kivity PhysPageEntry *p; 25803f49957SPaolo Bonzini hwaddr step = (hwaddr)1 << (level * P_L2_BITS); 2595cd2c5b6SRichard Henderson 2609736e55bSMichael S. Tsirkin if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { 261db94604bSPaolo Bonzini lp->ptr = phys_map_node_alloc(map, level == 0); 262db94604bSPaolo Bonzini } 26353cb28cbSMarcel Apfelbaum p = map->nodes[lp->ptr]; 26403f49957SPaolo Bonzini lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; 265f7bf5461SAvi Kivity 26603f49957SPaolo Bonzini while (*nb && lp < &p[P_L2_SIZE]) { 26707f07b31SAvi Kivity if ((*index & (step - 1)) == 0 && *nb >= step) { 2689736e55bSMichael S. Tsirkin lp->skip = 0; 269c19e8800SAvi Kivity lp->ptr = leaf; 27007f07b31SAvi Kivity *index += step; 27107f07b31SAvi Kivity *nb -= step; 272f7bf5461SAvi Kivity } else { 27353cb28cbSMarcel Apfelbaum phys_page_set_level(map, lp, index, nb, leaf, level - 1); 2742999097bSAvi Kivity } 2752999097bSAvi Kivity ++lp; 276f7bf5461SAvi Kivity } 2774346ae3eSAvi Kivity } 2785cd2c5b6SRichard Henderson 279ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d, 280a8170e5eSAvi Kivity hwaddr index, hwaddr nb, 2812999097bSAvi Kivity uint16_t leaf) 282f7bf5461SAvi Kivity { 2832999097bSAvi Kivity /* Wildly overreserve - it doesn't matter much. */ 28453cb28cbSMarcel Apfelbaum phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); 285f7bf5461SAvi Kivity 28653cb28cbSMarcel Apfelbaum phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); 28792e873b9Sbellard } 28892e873b9Sbellard 289b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child, 290b35ba30fSMichael S. Tsirkin * and update our entry so we can skip it and go directly to the destination. 291b35ba30fSMichael S. Tsirkin */ 292efee678dSMarc-André Lureau static void phys_page_compact(PhysPageEntry *lp, Node *nodes) 293b35ba30fSMichael S. Tsirkin { 294b35ba30fSMichael S. Tsirkin unsigned valid_ptr = P_L2_SIZE; 295b35ba30fSMichael S. Tsirkin int valid = 0; 296b35ba30fSMichael S. Tsirkin PhysPageEntry *p; 297b35ba30fSMichael S. Tsirkin int i; 298b35ba30fSMichael S. Tsirkin 299b35ba30fSMichael S. Tsirkin if (lp->ptr == PHYS_MAP_NODE_NIL) { 300b35ba30fSMichael S. Tsirkin return; 301b35ba30fSMichael S. Tsirkin } 302b35ba30fSMichael S. Tsirkin 303b35ba30fSMichael S. Tsirkin p = nodes[lp->ptr]; 304b35ba30fSMichael S. Tsirkin for (i = 0; i < P_L2_SIZE; i++) { 305b35ba30fSMichael S. Tsirkin if (p[i].ptr == PHYS_MAP_NODE_NIL) { 306b35ba30fSMichael S. Tsirkin continue; 307b35ba30fSMichael S. Tsirkin } 308b35ba30fSMichael S. Tsirkin 309b35ba30fSMichael S. Tsirkin valid_ptr = i; 310b35ba30fSMichael S. Tsirkin valid++; 311b35ba30fSMichael S. Tsirkin if (p[i].skip) { 312efee678dSMarc-André Lureau phys_page_compact(&p[i], nodes); 313b35ba30fSMichael S. Tsirkin } 314b35ba30fSMichael S. Tsirkin } 315b35ba30fSMichael S. Tsirkin 316b35ba30fSMichael S. Tsirkin /* We can only compress if there's only one child. */ 317b35ba30fSMichael S. Tsirkin if (valid != 1) { 318b35ba30fSMichael S. Tsirkin return; 319b35ba30fSMichael S. Tsirkin } 320b35ba30fSMichael S. Tsirkin 321b35ba30fSMichael S. Tsirkin assert(valid_ptr < P_L2_SIZE); 322b35ba30fSMichael S. Tsirkin 323b35ba30fSMichael S. Tsirkin /* Don't compress if it won't fit in the # of bits we have. */ 324b35ba30fSMichael S. Tsirkin if (lp->skip + p[valid_ptr].skip >= (1 << 3)) { 325b35ba30fSMichael S. Tsirkin return; 326b35ba30fSMichael S. Tsirkin } 327b35ba30fSMichael S. Tsirkin 328b35ba30fSMichael S. Tsirkin lp->ptr = p[valid_ptr].ptr; 329b35ba30fSMichael S. Tsirkin if (!p[valid_ptr].skip) { 330b35ba30fSMichael S. Tsirkin /* If our only child is a leaf, make this a leaf. */ 331b35ba30fSMichael S. Tsirkin /* By design, we should have made this node a leaf to begin with so we 332b35ba30fSMichael S. Tsirkin * should never reach here. 333b35ba30fSMichael S. Tsirkin * But since it's so simple to handle this, let's do it just in case we 334b35ba30fSMichael S. Tsirkin * change this rule. 335b35ba30fSMichael S. Tsirkin */ 336b35ba30fSMichael S. Tsirkin lp->skip = 0; 337b35ba30fSMichael S. Tsirkin } else { 338b35ba30fSMichael S. Tsirkin lp->skip += p[valid_ptr].skip; 339b35ba30fSMichael S. Tsirkin } 340b35ba30fSMichael S. Tsirkin } 341b35ba30fSMichael S. Tsirkin 342b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb) 343b35ba30fSMichael S. Tsirkin { 344b35ba30fSMichael S. Tsirkin if (d->phys_map.skip) { 345efee678dSMarc-André Lureau phys_page_compact(&d->phys_map, d->map.nodes); 346b35ba30fSMichael S. Tsirkin } 347b35ba30fSMichael S. Tsirkin } 348b35ba30fSMichael S. Tsirkin 34929cb533dSFam Zheng static inline bool section_covers_addr(const MemoryRegionSection *section, 35029cb533dSFam Zheng hwaddr addr) 35129cb533dSFam Zheng { 35229cb533dSFam Zheng /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means 35329cb533dSFam Zheng * the section must cover the entire address space. 35429cb533dSFam Zheng */ 355258dfaaaSRichard Henderson return int128_gethi(section->size) || 35629cb533dSFam Zheng range_covers_byte(section->offset_within_address_space, 357258dfaaaSRichard Henderson int128_getlo(section->size), addr); 35829cb533dSFam Zheng } 35929cb533dSFam Zheng 36097115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr, 3619affd6fcSPaolo Bonzini Node *nodes, MemoryRegionSection *sections) 36292e873b9Sbellard { 36331ab2b4aSAvi Kivity PhysPageEntry *p; 36497115a8dSMichael S. Tsirkin hwaddr index = addr >> TARGET_PAGE_BITS; 36531ab2b4aSAvi Kivity int i; 366f1f6e3b8SAvi Kivity 3679736e55bSMichael S. Tsirkin for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { 368c19e8800SAvi Kivity if (lp.ptr == PHYS_MAP_NODE_NIL) { 3699affd6fcSPaolo Bonzini return §ions[PHYS_SECTION_UNASSIGNED]; 370f1f6e3b8SAvi Kivity } 3719affd6fcSPaolo Bonzini p = nodes[lp.ptr]; 37203f49957SPaolo Bonzini lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; 37331ab2b4aSAvi Kivity } 374b35ba30fSMichael S. Tsirkin 37529cb533dSFam Zheng if (section_covers_addr(§ions[lp.ptr], addr)) { 3769affd6fcSPaolo Bonzini return §ions[lp.ptr]; 377b35ba30fSMichael S. Tsirkin } else { 378b35ba30fSMichael S. Tsirkin return §ions[PHYS_SECTION_UNASSIGNED]; 379b35ba30fSMichael S. Tsirkin } 380f3705d53SAvi Kivity } 381f3705d53SAvi Kivity 382e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr) 383e5548617SBlue Swirl { 3842a8e7499SPaolo Bonzini return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device 385e5548617SBlue Swirl && mr != &io_mem_watch; 386e5548617SBlue Swirl } 387149f54b5SPaolo Bonzini 38879e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 389c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, 39090260c6cSJan Kiszka hwaddr addr, 39190260c6cSJan Kiszka bool resolve_subpage) 3929f029603SJan Kiszka { 393729633c2SFam Zheng MemoryRegionSection *section = atomic_read(&d->mru_section); 39490260c6cSJan Kiszka subpage_t *subpage; 395729633c2SFam Zheng bool update; 39690260c6cSJan Kiszka 397729633c2SFam Zheng if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] && 398729633c2SFam Zheng section_covers_addr(section, addr)) { 399729633c2SFam Zheng update = false; 400729633c2SFam Zheng } else { 401729633c2SFam Zheng section = phys_page_find(d->phys_map, addr, d->map.nodes, 402729633c2SFam Zheng d->map.sections); 403729633c2SFam Zheng update = true; 404729633c2SFam Zheng } 40590260c6cSJan Kiszka if (resolve_subpage && section->mr->subpage) { 40690260c6cSJan Kiszka subpage = container_of(section->mr, subpage_t, iomem); 40753cb28cbSMarcel Apfelbaum section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; 40890260c6cSJan Kiszka } 409729633c2SFam Zheng if (update) { 410729633c2SFam Zheng atomic_set(&d->mru_section, section); 411729633c2SFam Zheng } 41290260c6cSJan Kiszka return section; 4139f029603SJan Kiszka } 4149f029603SJan Kiszka 41579e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 41690260c6cSJan Kiszka static MemoryRegionSection * 417c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, 41890260c6cSJan Kiszka hwaddr *plen, bool resolve_subpage) 419149f54b5SPaolo Bonzini { 420149f54b5SPaolo Bonzini MemoryRegionSection *section; 421965eb2fcSPaolo Bonzini MemoryRegion *mr; 422a87f3954SPaolo Bonzini Int128 diff; 423149f54b5SPaolo Bonzini 424c7086b4aSPaolo Bonzini section = address_space_lookup_region(d, addr, resolve_subpage); 425149f54b5SPaolo Bonzini /* Compute offset within MemoryRegionSection */ 426149f54b5SPaolo Bonzini addr -= section->offset_within_address_space; 427149f54b5SPaolo Bonzini 428149f54b5SPaolo Bonzini /* Compute offset within MemoryRegion */ 429149f54b5SPaolo Bonzini *xlat = addr + section->offset_within_region; 430149f54b5SPaolo Bonzini 431965eb2fcSPaolo Bonzini mr = section->mr; 432b242e0e0SPaolo Bonzini 433b242e0e0SPaolo Bonzini /* MMIO registers can be expected to perform full-width accesses based only 434b242e0e0SPaolo Bonzini * on their address, without considering adjacent registers that could 435b242e0e0SPaolo Bonzini * decode to completely different MemoryRegions. When such registers 436b242e0e0SPaolo Bonzini * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO 437b242e0e0SPaolo Bonzini * regions overlap wildly. For this reason we cannot clamp the accesses 438b242e0e0SPaolo Bonzini * here. 439b242e0e0SPaolo Bonzini * 440b242e0e0SPaolo Bonzini * If the length is small (as is the case for address_space_ldl/stl), 441b242e0e0SPaolo Bonzini * everything works fine. If the incoming length is large, however, 442b242e0e0SPaolo Bonzini * the caller really has to do the clamping through memory_access_size. 443b242e0e0SPaolo Bonzini */ 444965eb2fcSPaolo Bonzini if (memory_region_is_ram(mr)) { 445e4a511f8SPaolo Bonzini diff = int128_sub(section->size, int128_make64(addr)); 4463752a036SPeter Maydell *plen = int128_get64(int128_min(diff, int128_make64(*plen))); 447965eb2fcSPaolo Bonzini } 448149f54b5SPaolo Bonzini return section; 449149f54b5SPaolo Bonzini } 45090260c6cSJan Kiszka 45141063e1eSPaolo Bonzini /* Called from RCU critical section */ 452052c8fa9SJason Wang IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 453052c8fa9SJason Wang bool is_write) 454052c8fa9SJason Wang { 455052c8fa9SJason Wang IOMMUTLBEntry iotlb = {0}; 456052c8fa9SJason Wang MemoryRegionSection *section; 457052c8fa9SJason Wang MemoryRegion *mr; 458052c8fa9SJason Wang 459052c8fa9SJason Wang for (;;) { 460052c8fa9SJason Wang AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch); 461052c8fa9SJason Wang section = address_space_lookup_region(d, addr, false); 462052c8fa9SJason Wang addr = addr - section->offset_within_address_space 463052c8fa9SJason Wang + section->offset_within_region; 464052c8fa9SJason Wang mr = section->mr; 465052c8fa9SJason Wang 466052c8fa9SJason Wang if (!mr->iommu_ops) { 467052c8fa9SJason Wang break; 468052c8fa9SJason Wang } 469052c8fa9SJason Wang 470052c8fa9SJason Wang iotlb = mr->iommu_ops->translate(mr, addr, is_write); 471052c8fa9SJason Wang if (!(iotlb.perm & (1 << is_write))) { 472052c8fa9SJason Wang iotlb.target_as = NULL; 473052c8fa9SJason Wang break; 474052c8fa9SJason Wang } 475052c8fa9SJason Wang 476052c8fa9SJason Wang addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 477052c8fa9SJason Wang | (addr & iotlb.addr_mask)); 478052c8fa9SJason Wang as = iotlb.target_as; 479052c8fa9SJason Wang } 480052c8fa9SJason Wang 481052c8fa9SJason Wang return iotlb; 482052c8fa9SJason Wang } 483052c8fa9SJason Wang 484052c8fa9SJason Wang /* Called from RCU critical section */ 4855c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, 48690260c6cSJan Kiszka hwaddr *xlat, hwaddr *plen, 48790260c6cSJan Kiszka bool is_write) 48890260c6cSJan Kiszka { 48930951157SAvi Kivity IOMMUTLBEntry iotlb; 49030951157SAvi Kivity MemoryRegionSection *section; 49130951157SAvi Kivity MemoryRegion *mr; 49230951157SAvi Kivity 49330951157SAvi Kivity for (;;) { 49479e2b9aeSPaolo Bonzini AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch); 49579e2b9aeSPaolo Bonzini section = address_space_translate_internal(d, addr, &addr, plen, true); 49630951157SAvi Kivity mr = section->mr; 49730951157SAvi Kivity 49830951157SAvi Kivity if (!mr->iommu_ops) { 49930951157SAvi Kivity break; 50030951157SAvi Kivity } 50130951157SAvi Kivity 5028d7b8cb9SLe Tan iotlb = mr->iommu_ops->translate(mr, addr, is_write); 50330951157SAvi Kivity addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 50430951157SAvi Kivity | (addr & iotlb.addr_mask)); 50523820dbfSPeter Crosthwaite *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); 50630951157SAvi Kivity if (!(iotlb.perm & (1 << is_write))) { 50730951157SAvi Kivity mr = &io_mem_unassigned; 50830951157SAvi Kivity break; 50930951157SAvi Kivity } 51030951157SAvi Kivity 51130951157SAvi Kivity as = iotlb.target_as; 51230951157SAvi Kivity } 51330951157SAvi Kivity 514fe680d0dSAlexey Kardashevskiy if (xen_enabled() && memory_access_is_direct(mr, is_write)) { 515a87f3954SPaolo Bonzini hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; 51623820dbfSPeter Crosthwaite *plen = MIN(page, *plen); 517a87f3954SPaolo Bonzini } 518a87f3954SPaolo Bonzini 51930951157SAvi Kivity *xlat = addr; 52030951157SAvi Kivity return mr; 52190260c6cSJan Kiszka } 52290260c6cSJan Kiszka 52379e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 52490260c6cSJan Kiszka MemoryRegionSection * 525d7898cdaSPeter Maydell address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, 5269d82b5a7SPaolo Bonzini hwaddr *xlat, hwaddr *plen) 52790260c6cSJan Kiszka { 52830951157SAvi Kivity MemoryRegionSection *section; 529f35e44e7SAlex Bennée AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); 530d7898cdaSPeter Maydell 531d7898cdaSPeter Maydell section = address_space_translate_internal(d, addr, xlat, plen, false); 53230951157SAvi Kivity 53330951157SAvi Kivity assert(!section->mr->iommu_ops); 53430951157SAvi Kivity return section; 53590260c6cSJan Kiszka } 5369fa3e853Sbellard #endif 537fd6ce8f6Sbellard 538b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY) 5399656f324Spbrook 540e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id) 541e7f4eff7SJuan Quintela { 542259186a7SAndreas Färber CPUState *cpu = opaque; 543e7f4eff7SJuan Quintela 5443098dba0Saurel32 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the 5453098dba0Saurel32 version_id is increased. */ 546259186a7SAndreas Färber cpu->interrupt_request &= ~0x01; 547d10eb08fSAlex Bennée tlb_flush(cpu); 5489656f324Spbrook 5499656f324Spbrook return 0; 5509656f324Spbrook } 551e7f4eff7SJuan Quintela 5526c3bff0eSPavel Dovgaluk static int cpu_common_pre_load(void *opaque) 5536c3bff0eSPavel Dovgaluk { 5546c3bff0eSPavel Dovgaluk CPUState *cpu = opaque; 5556c3bff0eSPavel Dovgaluk 556adee6424SPaolo Bonzini cpu->exception_index = -1; 5576c3bff0eSPavel Dovgaluk 5586c3bff0eSPavel Dovgaluk return 0; 5596c3bff0eSPavel Dovgaluk } 5606c3bff0eSPavel Dovgaluk 5616c3bff0eSPavel Dovgaluk static bool cpu_common_exception_index_needed(void *opaque) 5626c3bff0eSPavel Dovgaluk { 5636c3bff0eSPavel Dovgaluk CPUState *cpu = opaque; 5646c3bff0eSPavel Dovgaluk 565adee6424SPaolo Bonzini return tcg_enabled() && cpu->exception_index != -1; 5666c3bff0eSPavel Dovgaluk } 5676c3bff0eSPavel Dovgaluk 5686c3bff0eSPavel Dovgaluk static const VMStateDescription vmstate_cpu_common_exception_index = { 5696c3bff0eSPavel Dovgaluk .name = "cpu_common/exception_index", 5706c3bff0eSPavel Dovgaluk .version_id = 1, 5716c3bff0eSPavel Dovgaluk .minimum_version_id = 1, 5725cd8cadaSJuan Quintela .needed = cpu_common_exception_index_needed, 5736c3bff0eSPavel Dovgaluk .fields = (VMStateField[]) { 5746c3bff0eSPavel Dovgaluk VMSTATE_INT32(exception_index, CPUState), 5756c3bff0eSPavel Dovgaluk VMSTATE_END_OF_LIST() 5766c3bff0eSPavel Dovgaluk } 5776c3bff0eSPavel Dovgaluk }; 5786c3bff0eSPavel Dovgaluk 579bac05aa9SAndrey Smetanin static bool cpu_common_crash_occurred_needed(void *opaque) 580bac05aa9SAndrey Smetanin { 581bac05aa9SAndrey Smetanin CPUState *cpu = opaque; 582bac05aa9SAndrey Smetanin 583bac05aa9SAndrey Smetanin return cpu->crash_occurred; 584bac05aa9SAndrey Smetanin } 585bac05aa9SAndrey Smetanin 586bac05aa9SAndrey Smetanin static const VMStateDescription vmstate_cpu_common_crash_occurred = { 587bac05aa9SAndrey Smetanin .name = "cpu_common/crash_occurred", 588bac05aa9SAndrey Smetanin .version_id = 1, 589bac05aa9SAndrey Smetanin .minimum_version_id = 1, 590bac05aa9SAndrey Smetanin .needed = cpu_common_crash_occurred_needed, 591bac05aa9SAndrey Smetanin .fields = (VMStateField[]) { 592bac05aa9SAndrey Smetanin VMSTATE_BOOL(crash_occurred, CPUState), 593bac05aa9SAndrey Smetanin VMSTATE_END_OF_LIST() 594bac05aa9SAndrey Smetanin } 595bac05aa9SAndrey Smetanin }; 596bac05aa9SAndrey Smetanin 5971a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = { 598e7f4eff7SJuan Quintela .name = "cpu_common", 599e7f4eff7SJuan Quintela .version_id = 1, 600e7f4eff7SJuan Quintela .minimum_version_id = 1, 6016c3bff0eSPavel Dovgaluk .pre_load = cpu_common_pre_load, 602e7f4eff7SJuan Quintela .post_load = cpu_common_post_load, 603e7f4eff7SJuan Quintela .fields = (VMStateField[]) { 604259186a7SAndreas Färber VMSTATE_UINT32(halted, CPUState), 605259186a7SAndreas Färber VMSTATE_UINT32(interrupt_request, CPUState), 606e7f4eff7SJuan Quintela VMSTATE_END_OF_LIST() 6076c3bff0eSPavel Dovgaluk }, 6085cd8cadaSJuan Quintela .subsections = (const VMStateDescription*[]) { 6095cd8cadaSJuan Quintela &vmstate_cpu_common_exception_index, 610bac05aa9SAndrey Smetanin &vmstate_cpu_common_crash_occurred, 6115cd8cadaSJuan Quintela NULL 612e7f4eff7SJuan Quintela } 613e7f4eff7SJuan Quintela }; 6141a1562f5SAndreas Färber 6159656f324Spbrook #endif 6169656f324Spbrook 61738d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index) 618950f1472SGlauber Costa { 619bdc44640SAndreas Färber CPUState *cpu; 620950f1472SGlauber Costa 621bdc44640SAndreas Färber CPU_FOREACH(cpu) { 62255e5c285SAndreas Färber if (cpu->cpu_index == index) { 623bdc44640SAndreas Färber return cpu; 62455e5c285SAndreas Färber } 625950f1472SGlauber Costa } 626950f1472SGlauber Costa 627bdc44640SAndreas Färber return NULL; 628950f1472SGlauber Costa } 629950f1472SGlauber Costa 63009daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY) 63156943e8cSPeter Maydell void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx) 63209daed84SEdgar E. Iglesias { 63312ebc9a7SPeter Maydell CPUAddressSpace *newas; 63412ebc9a7SPeter Maydell 63512ebc9a7SPeter Maydell /* Target code should have set num_ases before calling us */ 63612ebc9a7SPeter Maydell assert(asidx < cpu->num_ases); 63712ebc9a7SPeter Maydell 63856943e8cSPeter Maydell if (asidx == 0) { 63956943e8cSPeter Maydell /* address space 0 gets the convenience alias */ 64056943e8cSPeter Maydell cpu->as = as; 64156943e8cSPeter Maydell } 64256943e8cSPeter Maydell 64312ebc9a7SPeter Maydell /* KVM cannot currently support multiple address spaces. */ 64412ebc9a7SPeter Maydell assert(asidx == 0 || !kvm_enabled()); 64509daed84SEdgar E. Iglesias 64612ebc9a7SPeter Maydell if (!cpu->cpu_ases) { 64712ebc9a7SPeter Maydell cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); 64809daed84SEdgar E. Iglesias } 64932857f4dSPeter Maydell 65012ebc9a7SPeter Maydell newas = &cpu->cpu_ases[asidx]; 65112ebc9a7SPeter Maydell newas->cpu = cpu; 65212ebc9a7SPeter Maydell newas->as = as; 65356943e8cSPeter Maydell if (tcg_enabled()) { 65412ebc9a7SPeter Maydell newas->tcg_as_listener.commit = tcg_commit; 65512ebc9a7SPeter Maydell memory_listener_register(&newas->tcg_as_listener, as); 65609daed84SEdgar E. Iglesias } 65756943e8cSPeter Maydell } 658651a5bc0SPeter Maydell 659651a5bc0SPeter Maydell AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) 660651a5bc0SPeter Maydell { 661651a5bc0SPeter Maydell /* Return the AddressSpace corresponding to the specified index */ 662651a5bc0SPeter Maydell return cpu->cpu_ases[asidx].as; 663651a5bc0SPeter Maydell } 66409daed84SEdgar E. Iglesias #endif 66509daed84SEdgar E. Iglesias 6667bbc124eSLaurent Vivier void cpu_exec_unrealizefn(CPUState *cpu) 6671c59eb39SBharata B Rao { 6689dfeca7cSBharata B Rao CPUClass *cc = CPU_GET_CLASS(cpu); 6699dfeca7cSBharata B Rao 670267f685bSPaolo Bonzini cpu_list_remove(cpu); 6719dfeca7cSBharata B Rao 6729dfeca7cSBharata B Rao if (cc->vmsd != NULL) { 6739dfeca7cSBharata B Rao vmstate_unregister(NULL, cc->vmsd, cpu); 6749dfeca7cSBharata B Rao } 6759dfeca7cSBharata B Rao if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { 6769dfeca7cSBharata B Rao vmstate_unregister(NULL, &vmstate_cpu_common, cpu); 6779dfeca7cSBharata B Rao } 6781c59eb39SBharata B Rao } 6791c59eb39SBharata B Rao 68039e329e3SLaurent Vivier void cpu_exec_initfn(CPUState *cpu) 681fd6ce8f6Sbellard { 68256943e8cSPeter Maydell cpu->as = NULL; 68312ebc9a7SPeter Maydell cpu->num_ases = 0; 68456943e8cSPeter Maydell 685291135b5SEduardo Habkost #ifndef CONFIG_USER_ONLY 686291135b5SEduardo Habkost cpu->thread_id = qemu_get_thread_id(); 6876731d864SPeter Crosthwaite 6886731d864SPeter Crosthwaite /* This is a softmmu CPU object, so create a property for it 6896731d864SPeter Crosthwaite * so users can wire up its memory. (This can't go in qom/cpu.c 6906731d864SPeter Crosthwaite * because that file is compiled only once for both user-mode 6916731d864SPeter Crosthwaite * and system builds.) The default if no link is set up is to use 6926731d864SPeter Crosthwaite * the system address space. 6936731d864SPeter Crosthwaite */ 6946731d864SPeter Crosthwaite object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION, 6956731d864SPeter Crosthwaite (Object **)&cpu->memory, 6966731d864SPeter Crosthwaite qdev_prop_allow_set_link_before_realize, 6976731d864SPeter Crosthwaite OBJ_PROP_LINK_UNREF_ON_RELEASE, 6986731d864SPeter Crosthwaite &error_abort); 6996731d864SPeter Crosthwaite cpu->memory = system_memory; 7006731d864SPeter Crosthwaite object_ref(OBJECT(cpu->memory)); 701291135b5SEduardo Habkost #endif 70239e329e3SLaurent Vivier } 70339e329e3SLaurent Vivier 704ce5b1bbfSLaurent Vivier void cpu_exec_realizefn(CPUState *cpu, Error **errp) 70539e329e3SLaurent Vivier { 70639e329e3SLaurent Vivier CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu); 707291135b5SEduardo Habkost 708267f685bSPaolo Bonzini cpu_list_add(cpu); 7091bc7e522SIgor Mammedov 7101bc7e522SIgor Mammedov #ifndef CONFIG_USER_ONLY 711e0d47944SAndreas Färber if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { 712741da0d3SPaolo Bonzini vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); 713e0d47944SAndreas Färber } 714b170fce3SAndreas Färber if (cc->vmsd != NULL) { 715741da0d3SPaolo Bonzini vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu); 716b170fce3SAndreas Färber } 717741da0d3SPaolo Bonzini #endif 718fd6ce8f6Sbellard } 719fd6ce8f6Sbellard 72000b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) 72194df27fdSPaul Brook { 722a9353fe8SPeter Maydell /* Flush the whole TB as this will not have race conditions 723a9353fe8SPeter Maydell * even if we don't have proper locking yet. 724a9353fe8SPeter Maydell * Ideally we would just invalidate the TBs for the 725a9353fe8SPeter Maydell * specified PC. 726a9353fe8SPeter Maydell */ 727a9353fe8SPeter Maydell tb_flush(cpu); 72894df27fdSPaul Brook } 729d720b93dSbellard 730c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY) 73175a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask) 732c527ee8fSPaul Brook 733c527ee8fSPaul Brook { 734c527ee8fSPaul Brook } 735c527ee8fSPaul Brook 7363ee887e8SPeter Maydell int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, 7373ee887e8SPeter Maydell int flags) 7383ee887e8SPeter Maydell { 7393ee887e8SPeter Maydell return -ENOSYS; 7403ee887e8SPeter Maydell } 7413ee887e8SPeter Maydell 7423ee887e8SPeter Maydell void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) 7433ee887e8SPeter Maydell { 7443ee887e8SPeter Maydell } 7453ee887e8SPeter Maydell 74675a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 747c527ee8fSPaul Brook int flags, CPUWatchpoint **watchpoint) 748c527ee8fSPaul Brook { 749c527ee8fSPaul Brook return -ENOSYS; 750c527ee8fSPaul Brook } 751c527ee8fSPaul Brook #else 7526658ffb8Spbrook /* Add a watchpoint. */ 75375a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 754a1d1bb31Saliguori int flags, CPUWatchpoint **watchpoint) 7556658ffb8Spbrook { 756c0ce998eSaliguori CPUWatchpoint *wp; 7576658ffb8Spbrook 75805068c0dSPeter Maydell /* forbid ranges which are empty or run off the end of the address space */ 75907e2863dSMax Filippov if (len == 0 || (addr + len - 1) < addr) { 76075a34036SAndreas Färber error_report("tried to set invalid watchpoint at %" 76175a34036SAndreas Färber VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); 762b4051334Saliguori return -EINVAL; 763b4051334Saliguori } 7647267c094SAnthony Liguori wp = g_malloc(sizeof(*wp)); 7656658ffb8Spbrook 766a1d1bb31Saliguori wp->vaddr = addr; 76705068c0dSPeter Maydell wp->len = len; 768a1d1bb31Saliguori wp->flags = flags; 769a1d1bb31Saliguori 7702dc9f411Saliguori /* keep all GDB-injected watchpoints in front */ 771ff4700b0SAndreas Färber if (flags & BP_GDB) { 772ff4700b0SAndreas Färber QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); 773ff4700b0SAndreas Färber } else { 774ff4700b0SAndreas Färber QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); 775ff4700b0SAndreas Färber } 776a1d1bb31Saliguori 77731b030d4SAndreas Färber tlb_flush_page(cpu, addr); 778a1d1bb31Saliguori 779a1d1bb31Saliguori if (watchpoint) 780a1d1bb31Saliguori *watchpoint = wp; 781a1d1bb31Saliguori return 0; 7826658ffb8Spbrook } 7836658ffb8Spbrook 784a1d1bb31Saliguori /* Remove a specific watchpoint. */ 78575a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, 786a1d1bb31Saliguori int flags) 7876658ffb8Spbrook { 788a1d1bb31Saliguori CPUWatchpoint *wp; 7896658ffb8Spbrook 790ff4700b0SAndreas Färber QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 79105068c0dSPeter Maydell if (addr == wp->vaddr && len == wp->len 7926e140f28Saliguori && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { 79375a34036SAndreas Färber cpu_watchpoint_remove_by_ref(cpu, wp); 7946658ffb8Spbrook return 0; 7956658ffb8Spbrook } 7966658ffb8Spbrook } 797a1d1bb31Saliguori return -ENOENT; 7986658ffb8Spbrook } 7996658ffb8Spbrook 800a1d1bb31Saliguori /* Remove a specific watchpoint by reference. */ 80175a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) 802a1d1bb31Saliguori { 803ff4700b0SAndreas Färber QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); 8047d03f82fSedgar_igl 80531b030d4SAndreas Färber tlb_flush_page(cpu, watchpoint->vaddr); 806a1d1bb31Saliguori 8077267c094SAnthony Liguori g_free(watchpoint); 8087d03f82fSedgar_igl } 8097d03f82fSedgar_igl 810a1d1bb31Saliguori /* Remove all matching watchpoints. */ 81175a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask) 812a1d1bb31Saliguori { 813c0ce998eSaliguori CPUWatchpoint *wp, *next; 814a1d1bb31Saliguori 815ff4700b0SAndreas Färber QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { 81675a34036SAndreas Färber if (wp->flags & mask) { 81775a34036SAndreas Färber cpu_watchpoint_remove_by_ref(cpu, wp); 81875a34036SAndreas Färber } 819a1d1bb31Saliguori } 820c0ce998eSaliguori } 82105068c0dSPeter Maydell 82205068c0dSPeter Maydell /* Return true if this watchpoint address matches the specified 82305068c0dSPeter Maydell * access (ie the address range covered by the watchpoint overlaps 82405068c0dSPeter Maydell * partially or completely with the address range covered by the 82505068c0dSPeter Maydell * access). 82605068c0dSPeter Maydell */ 82705068c0dSPeter Maydell static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, 82805068c0dSPeter Maydell vaddr addr, 82905068c0dSPeter Maydell vaddr len) 83005068c0dSPeter Maydell { 83105068c0dSPeter Maydell /* We know the lengths are non-zero, but a little caution is 83205068c0dSPeter Maydell * required to avoid errors in the case where the range ends 83305068c0dSPeter Maydell * exactly at the top of the address space and so addr + len 83405068c0dSPeter Maydell * wraps round to zero. 83505068c0dSPeter Maydell */ 83605068c0dSPeter Maydell vaddr wpend = wp->vaddr + wp->len - 1; 83705068c0dSPeter Maydell vaddr addrend = addr + len - 1; 83805068c0dSPeter Maydell 83905068c0dSPeter Maydell return !(addr > wpend || wp->vaddr > addrend); 84005068c0dSPeter Maydell } 84105068c0dSPeter Maydell 842c527ee8fSPaul Brook #endif 843a1d1bb31Saliguori 844a1d1bb31Saliguori /* Add a breakpoint. */ 845b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, 846a1d1bb31Saliguori CPUBreakpoint **breakpoint) 8474c3a88a2Sbellard { 848c0ce998eSaliguori CPUBreakpoint *bp; 8494c3a88a2Sbellard 8507267c094SAnthony Liguori bp = g_malloc(sizeof(*bp)); 8514c3a88a2Sbellard 852a1d1bb31Saliguori bp->pc = pc; 853a1d1bb31Saliguori bp->flags = flags; 854a1d1bb31Saliguori 8552dc9f411Saliguori /* keep all GDB-injected breakpoints in front */ 85600b941e5SAndreas Färber if (flags & BP_GDB) { 857f0c3c505SAndreas Färber QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); 85800b941e5SAndreas Färber } else { 859f0c3c505SAndreas Färber QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); 86000b941e5SAndreas Färber } 861d720b93dSbellard 862f0c3c505SAndreas Färber breakpoint_invalidate(cpu, pc); 863a1d1bb31Saliguori 86400b941e5SAndreas Färber if (breakpoint) { 865a1d1bb31Saliguori *breakpoint = bp; 86600b941e5SAndreas Färber } 8674c3a88a2Sbellard return 0; 8684c3a88a2Sbellard } 8694c3a88a2Sbellard 870a1d1bb31Saliguori /* Remove a specific breakpoint. */ 871b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) 872a1d1bb31Saliguori { 873a1d1bb31Saliguori CPUBreakpoint *bp; 874a1d1bb31Saliguori 875f0c3c505SAndreas Färber QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 876a1d1bb31Saliguori if (bp->pc == pc && bp->flags == flags) { 877b3310ab3SAndreas Färber cpu_breakpoint_remove_by_ref(cpu, bp); 878a1d1bb31Saliguori return 0; 8797d03f82fSedgar_igl } 880a1d1bb31Saliguori } 881a1d1bb31Saliguori return -ENOENT; 8827d03f82fSedgar_igl } 8837d03f82fSedgar_igl 884a1d1bb31Saliguori /* Remove a specific breakpoint by reference. */ 885b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) 8864c3a88a2Sbellard { 887f0c3c505SAndreas Färber QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); 888f0c3c505SAndreas Färber 889f0c3c505SAndreas Färber breakpoint_invalidate(cpu, breakpoint->pc); 890a1d1bb31Saliguori 8917267c094SAnthony Liguori g_free(breakpoint); 892a1d1bb31Saliguori } 893a1d1bb31Saliguori 894a1d1bb31Saliguori /* Remove all matching breakpoints. */ 895b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask) 896a1d1bb31Saliguori { 897c0ce998eSaliguori CPUBreakpoint *bp, *next; 898a1d1bb31Saliguori 899f0c3c505SAndreas Färber QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { 900b3310ab3SAndreas Färber if (bp->flags & mask) { 901b3310ab3SAndreas Färber cpu_breakpoint_remove_by_ref(cpu, bp); 902b3310ab3SAndreas Färber } 903c0ce998eSaliguori } 9044c3a88a2Sbellard } 9054c3a88a2Sbellard 906c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the 907c33a346eSbellard CPU loop after each instruction */ 9083825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled) 909c33a346eSbellard { 910ed2803daSAndreas Färber if (cpu->singlestep_enabled != enabled) { 911ed2803daSAndreas Färber cpu->singlestep_enabled = enabled; 912ed2803daSAndreas Färber if (kvm_enabled()) { 91338e478ecSStefan Weil kvm_update_guest_debug(cpu, 0); 914ed2803daSAndreas Färber } else { 915ccbb4d44SStuart Brady /* must flush all the translated code to avoid inconsistencies */ 9169fa3e853Sbellard /* XXX: only flush what is necessary */ 917bbd77c18SPeter Crosthwaite tb_flush(cpu); 918c33a346eSbellard } 919e22a25c9Saliguori } 920c33a346eSbellard } 921c33a346eSbellard 922a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...) 9237501267eSbellard { 9247501267eSbellard va_list ap; 925493ae1f0Spbrook va_list ap2; 9267501267eSbellard 9277501267eSbellard va_start(ap, fmt); 928493ae1f0Spbrook va_copy(ap2, ap); 9297501267eSbellard fprintf(stderr, "qemu: fatal: "); 9307501267eSbellard vfprintf(stderr, fmt, ap); 9317501267eSbellard fprintf(stderr, "\n"); 932878096eeSAndreas Färber cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); 933013a2942SPaolo Bonzini if (qemu_log_separate()) { 9341ee73216SRichard Henderson qemu_log_lock(); 93593fcfe39Saliguori qemu_log("qemu: fatal: "); 93693fcfe39Saliguori qemu_log_vprintf(fmt, ap2); 93793fcfe39Saliguori qemu_log("\n"); 938a0762859SAndreas Färber log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); 93931b1a7b4Saliguori qemu_log_flush(); 9401ee73216SRichard Henderson qemu_log_unlock(); 94193fcfe39Saliguori qemu_log_close(); 942924edcaeSbalrog } 943493ae1f0Spbrook va_end(ap2); 944f9373291Sj_mayer va_end(ap); 9457615936eSPavel Dovgalyuk replay_finish(); 946fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY) 947fd052bf6SRiku Voipio { 948fd052bf6SRiku Voipio struct sigaction act; 949fd052bf6SRiku Voipio sigfillset(&act.sa_mask); 950fd052bf6SRiku Voipio act.sa_handler = SIG_DFL; 951fd052bf6SRiku Voipio sigaction(SIGABRT, &act, NULL); 952fd052bf6SRiku Voipio } 953fd052bf6SRiku Voipio #endif 9547501267eSbellard abort(); 9557501267eSbellard } 9567501267eSbellard 9570124311eSbellard #if !defined(CONFIG_USER_ONLY) 9580dc3f44aSMike Day /* Called from RCU critical section */ 959041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr) 960041603feSPaolo Bonzini { 961041603feSPaolo Bonzini RAMBlock *block; 962041603feSPaolo Bonzini 96343771539SPaolo Bonzini block = atomic_rcu_read(&ram_list.mru_block); 9649b8424d5SMichael S. Tsirkin if (block && addr - block->offset < block->max_length) { 96568851b98SPaolo Bonzini return block; 966041603feSPaolo Bonzini } 9670dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 9689b8424d5SMichael S. Tsirkin if (addr - block->offset < block->max_length) { 969041603feSPaolo Bonzini goto found; 970041603feSPaolo Bonzini } 971041603feSPaolo Bonzini } 972041603feSPaolo Bonzini 973041603feSPaolo Bonzini fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 974041603feSPaolo Bonzini abort(); 975041603feSPaolo Bonzini 976041603feSPaolo Bonzini found: 97743771539SPaolo Bonzini /* It is safe to write mru_block outside the iothread lock. This 97843771539SPaolo Bonzini * is what happens: 97943771539SPaolo Bonzini * 98043771539SPaolo Bonzini * mru_block = xxx 98143771539SPaolo Bonzini * rcu_read_unlock() 98243771539SPaolo Bonzini * xxx removed from list 98343771539SPaolo Bonzini * rcu_read_lock() 98443771539SPaolo Bonzini * read mru_block 98543771539SPaolo Bonzini * mru_block = NULL; 98643771539SPaolo Bonzini * call_rcu(reclaim_ramblock, xxx); 98743771539SPaolo Bonzini * rcu_read_unlock() 98843771539SPaolo Bonzini * 98943771539SPaolo Bonzini * atomic_rcu_set is not needed here. The block was already published 99043771539SPaolo Bonzini * when it was placed into the list. Here we're just making an extra 99143771539SPaolo Bonzini * copy of the pointer. 99243771539SPaolo Bonzini */ 993041603feSPaolo Bonzini ram_list.mru_block = block; 994041603feSPaolo Bonzini return block; 995041603feSPaolo Bonzini } 996041603feSPaolo Bonzini 997a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) 9981ccde1cbSbellard { 9999a13565dSPeter Crosthwaite CPUState *cpu; 1000041603feSPaolo Bonzini ram_addr_t start1; 1001a2f4d5beSJuan Quintela RAMBlock *block; 1002a2f4d5beSJuan Quintela ram_addr_t end; 1003a2f4d5beSJuan Quintela 1004a2f4d5beSJuan Quintela end = TARGET_PAGE_ALIGN(start + length); 1005a2f4d5beSJuan Quintela start &= TARGET_PAGE_MASK; 1006f23db169Sbellard 10070dc3f44aSMike Day rcu_read_lock(); 1008041603feSPaolo Bonzini block = qemu_get_ram_block(start); 1009041603feSPaolo Bonzini assert(block == qemu_get_ram_block(end - 1)); 10101240be24SMichael S. Tsirkin start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); 10119a13565dSPeter Crosthwaite CPU_FOREACH(cpu) { 10129a13565dSPeter Crosthwaite tlb_reset_dirty(cpu, start1, length); 10139a13565dSPeter Crosthwaite } 10140dc3f44aSMike Day rcu_read_unlock(); 1015d24981d3SJuan Quintela } 1016d24981d3SJuan Quintela 1017d24981d3SJuan Quintela /* Note: start and end must be within the same ram block. */ 101803eebc9eSStefan Hajnoczi bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, 101903eebc9eSStefan Hajnoczi ram_addr_t length, 102052159192SJuan Quintela unsigned client) 1021d24981d3SJuan Quintela { 10225b82b703SStefan Hajnoczi DirtyMemoryBlocks *blocks; 102303eebc9eSStefan Hajnoczi unsigned long end, page; 10245b82b703SStefan Hajnoczi bool dirty = false; 1025d24981d3SJuan Quintela 102603eebc9eSStefan Hajnoczi if (length == 0) { 102703eebc9eSStefan Hajnoczi return false; 102803eebc9eSStefan Hajnoczi } 102903eebc9eSStefan Hajnoczi 103003eebc9eSStefan Hajnoczi end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 103103eebc9eSStefan Hajnoczi page = start >> TARGET_PAGE_BITS; 10325b82b703SStefan Hajnoczi 10335b82b703SStefan Hajnoczi rcu_read_lock(); 10345b82b703SStefan Hajnoczi 10355b82b703SStefan Hajnoczi blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); 10365b82b703SStefan Hajnoczi 10375b82b703SStefan Hajnoczi while (page < end) { 10385b82b703SStefan Hajnoczi unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 10395b82b703SStefan Hajnoczi unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; 10405b82b703SStefan Hajnoczi unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset); 10415b82b703SStefan Hajnoczi 10425b82b703SStefan Hajnoczi dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], 10435b82b703SStefan Hajnoczi offset, num); 10445b82b703SStefan Hajnoczi page += num; 10455b82b703SStefan Hajnoczi } 10465b82b703SStefan Hajnoczi 10475b82b703SStefan Hajnoczi rcu_read_unlock(); 104803eebc9eSStefan Hajnoczi 104903eebc9eSStefan Hajnoczi if (dirty && tcg_enabled()) { 1050a2f4d5beSJuan Quintela tlb_reset_dirty_range_all(start, length); 1051d24981d3SJuan Quintela } 105203eebc9eSStefan Hajnoczi 105303eebc9eSStefan Hajnoczi return dirty; 10541ccde1cbSbellard } 10551ccde1cbSbellard 105679e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 1057bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu, 1058e5548617SBlue Swirl MemoryRegionSection *section, 1059e5548617SBlue Swirl target_ulong vaddr, 1060149f54b5SPaolo Bonzini hwaddr paddr, hwaddr xlat, 1061e5548617SBlue Swirl int prot, 1062e5548617SBlue Swirl target_ulong *address) 1063e5548617SBlue Swirl { 1064a8170e5eSAvi Kivity hwaddr iotlb; 1065e5548617SBlue Swirl CPUWatchpoint *wp; 1066e5548617SBlue Swirl 1067cc5bea60SBlue Swirl if (memory_region_is_ram(section->mr)) { 1068e5548617SBlue Swirl /* Normal RAM. */ 1069e4e69794SPaolo Bonzini iotlb = memory_region_get_ram_addr(section->mr) + xlat; 1070e5548617SBlue Swirl if (!section->readonly) { 1071b41aac4fSLiu Ping Fan iotlb |= PHYS_SECTION_NOTDIRTY; 1072e5548617SBlue Swirl } else { 1073b41aac4fSLiu Ping Fan iotlb |= PHYS_SECTION_ROM; 1074e5548617SBlue Swirl } 1075e5548617SBlue Swirl } else { 10760b8e2c10SPeter Maydell AddressSpaceDispatch *d; 10770b8e2c10SPeter Maydell 10780b8e2c10SPeter Maydell d = atomic_rcu_read(§ion->address_space->dispatch); 10790b8e2c10SPeter Maydell iotlb = section - d->map.sections; 1080149f54b5SPaolo Bonzini iotlb += xlat; 1081e5548617SBlue Swirl } 1082e5548617SBlue Swirl 1083e5548617SBlue Swirl /* Make accesses to pages with watchpoints go via the 1084e5548617SBlue Swirl watchpoint trap routines. */ 1085ff4700b0SAndreas Färber QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 108605068c0dSPeter Maydell if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) { 1087e5548617SBlue Swirl /* Avoid trapping reads of pages with a write breakpoint. */ 1088e5548617SBlue Swirl if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { 1089b41aac4fSLiu Ping Fan iotlb = PHYS_SECTION_WATCH + paddr; 1090e5548617SBlue Swirl *address |= TLB_MMIO; 1091e5548617SBlue Swirl break; 1092e5548617SBlue Swirl } 1093e5548617SBlue Swirl } 1094e5548617SBlue Swirl } 1095e5548617SBlue Swirl 1096e5548617SBlue Swirl return iotlb; 1097e5548617SBlue Swirl } 10989fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */ 109933417e70Sbellard 1100e2eef170Spbrook #if !defined(CONFIG_USER_ONLY) 11018da3ff18Spbrook 1102c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 11035312bd8bSAvi Kivity uint16_t section); 1104acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base); 110554688b1eSAvi Kivity 1106a2b257d6SIgor Mammedov static void *(*phys_mem_alloc)(size_t size, uint64_t *align) = 1107a2b257d6SIgor Mammedov qemu_anon_ram_alloc; 110891138037SMarkus Armbruster 110991138037SMarkus Armbruster /* 111091138037SMarkus Armbruster * Set a custom physical guest memory alloator. 111191138037SMarkus Armbruster * Accelerators with unusual needs may need this. Hopefully, we can 111291138037SMarkus Armbruster * get rid of it eventually. 111391138037SMarkus Armbruster */ 1114a2b257d6SIgor Mammedov void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)) 111591138037SMarkus Armbruster { 111691138037SMarkus Armbruster phys_mem_alloc = alloc; 111791138037SMarkus Armbruster } 111891138037SMarkus Armbruster 111953cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map, 112053cb28cbSMarcel Apfelbaum MemoryRegionSection *section) 11215312bd8bSAvi Kivity { 112268f3f65bSPaolo Bonzini /* The physical section number is ORed with a page-aligned 112368f3f65bSPaolo Bonzini * pointer to produce the iotlb entries. Thus it should 112468f3f65bSPaolo Bonzini * never overflow into the page-aligned value. 112568f3f65bSPaolo Bonzini */ 112653cb28cbSMarcel Apfelbaum assert(map->sections_nb < TARGET_PAGE_SIZE); 112768f3f65bSPaolo Bonzini 112853cb28cbSMarcel Apfelbaum if (map->sections_nb == map->sections_nb_alloc) { 112953cb28cbSMarcel Apfelbaum map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); 113053cb28cbSMarcel Apfelbaum map->sections = g_renew(MemoryRegionSection, map->sections, 113153cb28cbSMarcel Apfelbaum map->sections_nb_alloc); 11325312bd8bSAvi Kivity } 113353cb28cbSMarcel Apfelbaum map->sections[map->sections_nb] = *section; 1134dfde4e6eSPaolo Bonzini memory_region_ref(section->mr); 113553cb28cbSMarcel Apfelbaum return map->sections_nb++; 11365312bd8bSAvi Kivity } 11375312bd8bSAvi Kivity 1138058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr) 1139058bc4b5SPaolo Bonzini { 114055b4e80bSDon Slutz bool have_sub_page = mr->subpage; 114155b4e80bSDon Slutz 1142dfde4e6eSPaolo Bonzini memory_region_unref(mr); 1143dfde4e6eSPaolo Bonzini 114455b4e80bSDon Slutz if (have_sub_page) { 1145058bc4b5SPaolo Bonzini subpage_t *subpage = container_of(mr, subpage_t, iomem); 1146b4fefef9SPeter Crosthwaite object_unref(OBJECT(&subpage->iomem)); 1147058bc4b5SPaolo Bonzini g_free(subpage); 1148058bc4b5SPaolo Bonzini } 1149058bc4b5SPaolo Bonzini } 1150058bc4b5SPaolo Bonzini 11516092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map) 11525312bd8bSAvi Kivity { 11539affd6fcSPaolo Bonzini while (map->sections_nb > 0) { 11549affd6fcSPaolo Bonzini MemoryRegionSection *section = &map->sections[--map->sections_nb]; 1155058bc4b5SPaolo Bonzini phys_section_destroy(section->mr); 1156058bc4b5SPaolo Bonzini } 11579affd6fcSPaolo Bonzini g_free(map->sections); 11589affd6fcSPaolo Bonzini g_free(map->nodes); 11595312bd8bSAvi Kivity } 11605312bd8bSAvi Kivity 1161ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) 11620f0cb164SAvi Kivity { 11630f0cb164SAvi Kivity subpage_t *subpage; 1164a8170e5eSAvi Kivity hwaddr base = section->offset_within_address_space 11650f0cb164SAvi Kivity & TARGET_PAGE_MASK; 116697115a8dSMichael S. Tsirkin MemoryRegionSection *existing = phys_page_find(d->phys_map, base, 116753cb28cbSMarcel Apfelbaum d->map.nodes, d->map.sections); 11680f0cb164SAvi Kivity MemoryRegionSection subsection = { 11690f0cb164SAvi Kivity .offset_within_address_space = base, 1170052e87b0SPaolo Bonzini .size = int128_make64(TARGET_PAGE_SIZE), 11710f0cb164SAvi Kivity }; 1172a8170e5eSAvi Kivity hwaddr start, end; 11730f0cb164SAvi Kivity 1174f3705d53SAvi Kivity assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); 11750f0cb164SAvi Kivity 1176f3705d53SAvi Kivity if (!(existing->mr->subpage)) { 1177acc9d80bSJan Kiszka subpage = subpage_init(d->as, base); 11783be91e86SEdgar E. Iglesias subsection.address_space = d->as; 11790f0cb164SAvi Kivity subsection.mr = &subpage->iomem; 1180ac1970fbSAvi Kivity phys_page_set(d, base >> TARGET_PAGE_BITS, 1, 118153cb28cbSMarcel Apfelbaum phys_section_add(&d->map, &subsection)); 11820f0cb164SAvi Kivity } else { 1183f3705d53SAvi Kivity subpage = container_of(existing->mr, subpage_t, iomem); 11840f0cb164SAvi Kivity } 11850f0cb164SAvi Kivity start = section->offset_within_address_space & ~TARGET_PAGE_MASK; 1186052e87b0SPaolo Bonzini end = start + int128_get64(section->size) - 1; 118753cb28cbSMarcel Apfelbaum subpage_register(subpage, start, end, 118853cb28cbSMarcel Apfelbaum phys_section_add(&d->map, section)); 11890f0cb164SAvi Kivity } 11900f0cb164SAvi Kivity 11910f0cb164SAvi Kivity 1192052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d, 1193052e87b0SPaolo Bonzini MemoryRegionSection *section) 119433417e70Sbellard { 1195a8170e5eSAvi Kivity hwaddr start_addr = section->offset_within_address_space; 119653cb28cbSMarcel Apfelbaum uint16_t section_index = phys_section_add(&d->map, section); 1197052e87b0SPaolo Bonzini uint64_t num_pages = int128_get64(int128_rshift(section->size, 1198052e87b0SPaolo Bonzini TARGET_PAGE_BITS)); 1199dd81124bSAvi Kivity 1200733d5ef5SPaolo Bonzini assert(num_pages); 1201733d5ef5SPaolo Bonzini phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); 120233417e70Sbellard } 120333417e70Sbellard 1204ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section) 12050f0cb164SAvi Kivity { 120689ae337aSPaolo Bonzini AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); 120700752703SPaolo Bonzini AddressSpaceDispatch *d = as->next_dispatch; 120899b9cc06SPaolo Bonzini MemoryRegionSection now = *section, remain = *section; 1209052e87b0SPaolo Bonzini Int128 page_size = int128_make64(TARGET_PAGE_SIZE); 12100f0cb164SAvi Kivity 1211733d5ef5SPaolo Bonzini if (now.offset_within_address_space & ~TARGET_PAGE_MASK) { 1212733d5ef5SPaolo Bonzini uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) 1213733d5ef5SPaolo Bonzini - now.offset_within_address_space; 1214733d5ef5SPaolo Bonzini 1215052e87b0SPaolo Bonzini now.size = int128_min(int128_make64(left), now.size); 1216ac1970fbSAvi Kivity register_subpage(d, &now); 1217733d5ef5SPaolo Bonzini } else { 1218052e87b0SPaolo Bonzini now.size = int128_zero(); 1219733d5ef5SPaolo Bonzini } 1220052e87b0SPaolo Bonzini while (int128_ne(remain.size, now.size)) { 1221052e87b0SPaolo Bonzini remain.size = int128_sub(remain.size, now.size); 1222052e87b0SPaolo Bonzini remain.offset_within_address_space += int128_get64(now.size); 1223052e87b0SPaolo Bonzini remain.offset_within_region += int128_get64(now.size); 12240f0cb164SAvi Kivity now = remain; 1225052e87b0SPaolo Bonzini if (int128_lt(remain.size, page_size)) { 1226733d5ef5SPaolo Bonzini register_subpage(d, &now); 122788266249SHu Tao } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { 1228052e87b0SPaolo Bonzini now.size = page_size; 1229ac1970fbSAvi Kivity register_subpage(d, &now); 123069b67646STyler Hall } else { 1231052e87b0SPaolo Bonzini now.size = int128_and(now.size, int128_neg(page_size)); 1232ac1970fbSAvi Kivity register_multipage(d, &now); 123369b67646STyler Hall } 12340f0cb164SAvi Kivity } 12350f0cb164SAvi Kivity } 12360f0cb164SAvi Kivity 123762a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void) 123862a2744cSSheng Yang { 123962a2744cSSheng Yang if (kvm_enabled()) 124062a2744cSSheng Yang kvm_flush_coalesced_mmio_buffer(); 124162a2744cSSheng Yang } 124262a2744cSSheng Yang 1243b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void) 1244b2a8658eSUmesh Deshpande { 1245b2a8658eSUmesh Deshpande qemu_mutex_lock(&ram_list.mutex); 1246b2a8658eSUmesh Deshpande } 1247b2a8658eSUmesh Deshpande 1248b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void) 1249b2a8658eSUmesh Deshpande { 1250b2a8658eSUmesh Deshpande qemu_mutex_unlock(&ram_list.mutex); 1251b2a8658eSUmesh Deshpande } 1252b2a8658eSUmesh Deshpande 1253e1e84ba0SMarkus Armbruster #ifdef __linux__ 1254d6af99c9SHaozhong Zhang static int64_t get_file_size(int fd) 1255d6af99c9SHaozhong Zhang { 1256d6af99c9SHaozhong Zhang int64_t size = lseek(fd, 0, SEEK_END); 1257d6af99c9SHaozhong Zhang if (size < 0) { 1258d6af99c9SHaozhong Zhang return -errno; 1259d6af99c9SHaozhong Zhang } 1260d6af99c9SHaozhong Zhang return size; 1261d6af99c9SHaozhong Zhang } 1262d6af99c9SHaozhong Zhang 126304b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block, 126404b16653SAlex Williamson ram_addr_t memory, 12657f56e740SPaolo Bonzini const char *path, 12667f56e740SPaolo Bonzini Error **errp) 1267c902760fSMarcelo Tosatti { 1268fd97fd44SMarkus Armbruster bool unlink_on_error = false; 1269c902760fSMarcelo Tosatti char *filename; 12708ca761f6SPeter Feiner char *sanitized_name; 12718ca761f6SPeter Feiner char *c; 1272056b68afSIgor Mammedov void *area = MAP_FAILED; 12735c3ece79SPaolo Bonzini int fd = -1; 1274d6af99c9SHaozhong Zhang int64_t file_size; 1275c902760fSMarcelo Tosatti 1276c902760fSMarcelo Tosatti if (kvm_enabled() && !kvm_has_sync_mmu()) { 12777f56e740SPaolo Bonzini error_setg(errp, 12787f56e740SPaolo Bonzini "host lacks kvm mmu notifiers, -mem-path unsupported"); 1279fd97fd44SMarkus Armbruster return NULL; 1280c902760fSMarcelo Tosatti } 1281c902760fSMarcelo Tosatti 1282fd97fd44SMarkus Armbruster for (;;) { 1283fd97fd44SMarkus Armbruster fd = open(path, O_RDWR); 1284fd97fd44SMarkus Armbruster if (fd >= 0) { 1285fd97fd44SMarkus Armbruster /* @path names an existing file, use it */ 1286fd97fd44SMarkus Armbruster break; 1287fd97fd44SMarkus Armbruster } 1288fd97fd44SMarkus Armbruster if (errno == ENOENT) { 1289fd97fd44SMarkus Armbruster /* @path names a file that doesn't exist, create it */ 1290fd97fd44SMarkus Armbruster fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); 1291fd97fd44SMarkus Armbruster if (fd >= 0) { 1292fd97fd44SMarkus Armbruster unlink_on_error = true; 1293fd97fd44SMarkus Armbruster break; 1294fd97fd44SMarkus Armbruster } 1295fd97fd44SMarkus Armbruster } else if (errno == EISDIR) { 1296fd97fd44SMarkus Armbruster /* @path names a directory, create a file there */ 12978ca761f6SPeter Feiner /* Make name safe to use with mkstemp by replacing '/' with '_'. */ 129883234bf2SPeter Crosthwaite sanitized_name = g_strdup(memory_region_name(block->mr)); 12998ca761f6SPeter Feiner for (c = sanitized_name; *c != '\0'; c++) { 13008d31d6b6SPavel Fedin if (*c == '/') { 13018ca761f6SPeter Feiner *c = '_'; 13028ca761f6SPeter Feiner } 13038d31d6b6SPavel Fedin } 13048ca761f6SPeter Feiner 13058ca761f6SPeter Feiner filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, 13068ca761f6SPeter Feiner sanitized_name); 13078ca761f6SPeter Feiner g_free(sanitized_name); 1308c902760fSMarcelo Tosatti 1309c902760fSMarcelo Tosatti fd = mkstemp(filename); 13108d31d6b6SPavel Fedin if (fd >= 0) { 13118d31d6b6SPavel Fedin unlink(filename); 1312fd97fd44SMarkus Armbruster g_free(filename); 1313fd97fd44SMarkus Armbruster break; 13148d31d6b6SPavel Fedin } 13158d31d6b6SPavel Fedin g_free(filename); 1316fd97fd44SMarkus Armbruster } 1317fd97fd44SMarkus Armbruster if (errno != EEXIST && errno != EINTR) { 1318fd97fd44SMarkus Armbruster error_setg_errno(errp, errno, 1319fd97fd44SMarkus Armbruster "can't open backing store %s for guest RAM", 1320fd97fd44SMarkus Armbruster path); 1321fd97fd44SMarkus Armbruster goto error; 1322fd97fd44SMarkus Armbruster } 1323fd97fd44SMarkus Armbruster /* 1324fd97fd44SMarkus Armbruster * Try again on EINTR and EEXIST. The latter happens when 1325fd97fd44SMarkus Armbruster * something else creates the file between our two open(). 1326fd97fd44SMarkus Armbruster */ 13278d31d6b6SPavel Fedin } 13288d31d6b6SPavel Fedin 1329863e9621SDr. David Alan Gilbert block->page_size = qemu_fd_getpagesize(fd); 13308360668eSHaozhong Zhang block->mr->align = block->page_size; 13318360668eSHaozhong Zhang #if defined(__s390x__) 13328360668eSHaozhong Zhang if (kvm_enabled()) { 13338360668eSHaozhong Zhang block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); 13348360668eSHaozhong Zhang } 13358360668eSHaozhong Zhang #endif 1336fd97fd44SMarkus Armbruster 1337d6af99c9SHaozhong Zhang file_size = get_file_size(fd); 1338d6af99c9SHaozhong Zhang 1339863e9621SDr. David Alan Gilbert if (memory < block->page_size) { 1340fd97fd44SMarkus Armbruster error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " 1341863e9621SDr. David Alan Gilbert "or larger than page size 0x%zx", 1342863e9621SDr. David Alan Gilbert memory, block->page_size); 1343f9a49dfaSMarcelo Tosatti goto error; 1344c902760fSMarcelo Tosatti } 1345c902760fSMarcelo Tosatti 13461775f111SHaozhong Zhang if (file_size > 0 && file_size < memory) { 13471775f111SHaozhong Zhang error_setg(errp, "backing store %s size 0x%" PRIx64 13481775f111SHaozhong Zhang " does not match 'size' option 0x" RAM_ADDR_FMT, 13491775f111SHaozhong Zhang path, file_size, memory); 13501775f111SHaozhong Zhang goto error; 13511775f111SHaozhong Zhang } 13521775f111SHaozhong Zhang 1353863e9621SDr. David Alan Gilbert memory = ROUND_UP(memory, block->page_size); 1354c902760fSMarcelo Tosatti 1355c902760fSMarcelo Tosatti /* 1356c902760fSMarcelo Tosatti * ftruncate is not supported by hugetlbfs in older 1357c902760fSMarcelo Tosatti * hosts, so don't bother bailing out on errors. 1358c902760fSMarcelo Tosatti * If anything goes wrong with it under other filesystems, 1359c902760fSMarcelo Tosatti * mmap will fail. 1360d6af99c9SHaozhong Zhang * 1361d6af99c9SHaozhong Zhang * Do not truncate the non-empty backend file to avoid corrupting 1362d6af99c9SHaozhong Zhang * the existing data in the file. Disabling shrinking is not 1363d6af99c9SHaozhong Zhang * enough. For example, the current vNVDIMM implementation stores 1364d6af99c9SHaozhong Zhang * the guest NVDIMM labels at the end of the backend file. If the 1365d6af99c9SHaozhong Zhang * backend file is later extended, QEMU will not be able to find 1366d6af99c9SHaozhong Zhang * those labels. Therefore, extending the non-empty backend file 1367d6af99c9SHaozhong Zhang * is disabled as well. 1368c902760fSMarcelo Tosatti */ 1369d6af99c9SHaozhong Zhang if (!file_size && ftruncate(fd, memory)) { 1370c902760fSMarcelo Tosatti perror("ftruncate"); 13717f56e740SPaolo Bonzini } 1372c902760fSMarcelo Tosatti 1373d2f39addSDominik Dingel area = qemu_ram_mmap(fd, memory, block->mr->align, 1374d2f39addSDominik Dingel block->flags & RAM_SHARED); 1375c902760fSMarcelo Tosatti if (area == MAP_FAILED) { 13767f56e740SPaolo Bonzini error_setg_errno(errp, errno, 1377fd97fd44SMarkus Armbruster "unable to map backing store for guest RAM"); 1378f9a49dfaSMarcelo Tosatti goto error; 1379c902760fSMarcelo Tosatti } 1380ef36fa14SMarcelo Tosatti 1381ef36fa14SMarcelo Tosatti if (mem_prealloc) { 1382056b68afSIgor Mammedov os_mem_prealloc(fd, area, memory, errp); 1383056b68afSIgor Mammedov if (errp && *errp) { 1384056b68afSIgor Mammedov goto error; 1385056b68afSIgor Mammedov } 1386ef36fa14SMarcelo Tosatti } 1387ef36fa14SMarcelo Tosatti 138804b16653SAlex Williamson block->fd = fd; 1389c902760fSMarcelo Tosatti return area; 1390f9a49dfaSMarcelo Tosatti 1391f9a49dfaSMarcelo Tosatti error: 1392056b68afSIgor Mammedov if (area != MAP_FAILED) { 1393056b68afSIgor Mammedov qemu_ram_munmap(area, memory); 1394056b68afSIgor Mammedov } 1395fd97fd44SMarkus Armbruster if (unlink_on_error) { 1396fd97fd44SMarkus Armbruster unlink(path); 1397fd97fd44SMarkus Armbruster } 13985c3ece79SPaolo Bonzini if (fd != -1) { 1399fd97fd44SMarkus Armbruster close(fd); 14005c3ece79SPaolo Bonzini } 1401f9a49dfaSMarcelo Tosatti return NULL; 1402c902760fSMarcelo Tosatti } 1403c902760fSMarcelo Tosatti #endif 1404c902760fSMarcelo Tosatti 14050dc3f44aSMike Day /* Called with the ramlist lock held. */ 1406d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size) 1407d17b5288SAlex Williamson { 140804b16653SAlex Williamson RAMBlock *block, *next_block; 14093e837b2cSAlex Williamson ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; 141004b16653SAlex Williamson 141149cd9ac6SStefan Hajnoczi assert(size != 0); /* it would hand out same offset multiple times */ 141249cd9ac6SStefan Hajnoczi 14130dc3f44aSMike Day if (QLIST_EMPTY_RCU(&ram_list.blocks)) { 141404b16653SAlex Williamson return 0; 14150d53d9feSMike Day } 141604b16653SAlex Williamson 14170dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1418f15fbc4bSAnthony PERARD ram_addr_t end, next = RAM_ADDR_MAX; 141904b16653SAlex Williamson 142062be4e3aSMichael S. Tsirkin end = block->offset + block->max_length; 142104b16653SAlex Williamson 14220dc3f44aSMike Day QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) { 142304b16653SAlex Williamson if (next_block->offset >= end) { 142404b16653SAlex Williamson next = MIN(next, next_block->offset); 142504b16653SAlex Williamson } 142604b16653SAlex Williamson } 142704b16653SAlex Williamson if (next - end >= size && next - end < mingap) { 142804b16653SAlex Williamson offset = end; 142904b16653SAlex Williamson mingap = next - end; 143004b16653SAlex Williamson } 143104b16653SAlex Williamson } 14323e837b2cSAlex Williamson 14333e837b2cSAlex Williamson if (offset == RAM_ADDR_MAX) { 14343e837b2cSAlex Williamson fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", 14353e837b2cSAlex Williamson (uint64_t)size); 14363e837b2cSAlex Williamson abort(); 14373e837b2cSAlex Williamson } 14383e837b2cSAlex Williamson 143904b16653SAlex Williamson return offset; 144004b16653SAlex Williamson } 144104b16653SAlex Williamson 1442652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void) 144304b16653SAlex Williamson { 1444d17b5288SAlex Williamson RAMBlock *block; 1445d17b5288SAlex Williamson ram_addr_t last = 0; 1446d17b5288SAlex Williamson 14470dc3f44aSMike Day rcu_read_lock(); 14480dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 144962be4e3aSMichael S. Tsirkin last = MAX(last, block->offset + block->max_length); 14500d53d9feSMike Day } 14510dc3f44aSMike Day rcu_read_unlock(); 1452d17b5288SAlex Williamson return last; 1453d17b5288SAlex Williamson } 1454d17b5288SAlex Williamson 1455ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size) 1456ddb97f1dSJason Baron { 1457ddb97f1dSJason Baron int ret; 1458ddb97f1dSJason Baron 1459ddb97f1dSJason Baron /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ 146047c8ca53SMarcel Apfelbaum if (!machine_dump_guest_core(current_machine)) { 1461ddb97f1dSJason Baron ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); 1462ddb97f1dSJason Baron if (ret) { 1463ddb97f1dSJason Baron perror("qemu_madvise"); 1464ddb97f1dSJason Baron fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " 1465ddb97f1dSJason Baron "but dump_guest_core=off specified\n"); 1466ddb97f1dSJason Baron } 1467ddb97f1dSJason Baron } 1468ddb97f1dSJason Baron } 1469ddb97f1dSJason Baron 1470422148d3SDr. David Alan Gilbert const char *qemu_ram_get_idstr(RAMBlock *rb) 1471422148d3SDr. David Alan Gilbert { 1472422148d3SDr. David Alan Gilbert return rb->idstr; 1473422148d3SDr. David Alan Gilbert } 1474422148d3SDr. David Alan Gilbert 1475ae3a7047SMike Day /* Called with iothread lock held. */ 1476fa53a0e5SGonglei void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) 147720cfe881SHu Tao { 1478fa53a0e5SGonglei RAMBlock *block; 147920cfe881SHu Tao 1480c5705a77SAvi Kivity assert(new_block); 1481c5705a77SAvi Kivity assert(!new_block->idstr[0]); 148284b89d78SCam Macdonell 148309e5ab63SAnthony Liguori if (dev) { 148409e5ab63SAnthony Liguori char *id = qdev_get_dev_path(dev); 148584b89d78SCam Macdonell if (id) { 148684b89d78SCam Macdonell snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 14877267c094SAnthony Liguori g_free(id); 148884b89d78SCam Macdonell } 148984b89d78SCam Macdonell } 149084b89d78SCam Macdonell pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 149184b89d78SCam Macdonell 1492ab0a9956SGonglei rcu_read_lock(); 14930dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1494fa53a0e5SGonglei if (block != new_block && 1495fa53a0e5SGonglei !strcmp(block->idstr, new_block->idstr)) { 149684b89d78SCam Macdonell fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 149784b89d78SCam Macdonell new_block->idstr); 149884b89d78SCam Macdonell abort(); 149984b89d78SCam Macdonell } 150084b89d78SCam Macdonell } 15010dc3f44aSMike Day rcu_read_unlock(); 1502c5705a77SAvi Kivity } 1503c5705a77SAvi Kivity 1504ae3a7047SMike Day /* Called with iothread lock held. */ 1505fa53a0e5SGonglei void qemu_ram_unset_idstr(RAMBlock *block) 150620cfe881SHu Tao { 1507ae3a7047SMike Day /* FIXME: arch_init.c assumes that this is not called throughout 1508ae3a7047SMike Day * migration. Ignore the problem since hot-unplug during migration 1509ae3a7047SMike Day * does not work anyway. 1510ae3a7047SMike Day */ 151120cfe881SHu Tao if (block) { 151220cfe881SHu Tao memset(block->idstr, 0, sizeof(block->idstr)); 151320cfe881SHu Tao } 151420cfe881SHu Tao } 151520cfe881SHu Tao 1516863e9621SDr. David Alan Gilbert size_t qemu_ram_pagesize(RAMBlock *rb) 1517863e9621SDr. David Alan Gilbert { 1518863e9621SDr. David Alan Gilbert return rb->page_size; 1519863e9621SDr. David Alan Gilbert } 1520863e9621SDr. David Alan Gilbert 15218490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len) 15228490fc78SLuiz Capitulino { 152375cc7f01SMarcel Apfelbaum if (!machine_mem_merge(current_machine)) { 15248490fc78SLuiz Capitulino /* disabled by the user */ 15258490fc78SLuiz Capitulino return 0; 15268490fc78SLuiz Capitulino } 15278490fc78SLuiz Capitulino 15288490fc78SLuiz Capitulino return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); 15298490fc78SLuiz Capitulino } 15308490fc78SLuiz Capitulino 153162be4e3aSMichael S. Tsirkin /* Only legal before guest might have detected the memory size: e.g. on 153262be4e3aSMichael S. Tsirkin * incoming migration, or right after reset. 153362be4e3aSMichael S. Tsirkin * 153462be4e3aSMichael S. Tsirkin * As memory core doesn't know how is memory accessed, it is up to 153562be4e3aSMichael S. Tsirkin * resize callback to update device state and/or add assertions to detect 153662be4e3aSMichael S. Tsirkin * misuse, if necessary. 153762be4e3aSMichael S. Tsirkin */ 1538fa53a0e5SGonglei int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) 153962be4e3aSMichael S. Tsirkin { 154062be4e3aSMichael S. Tsirkin assert(block); 154162be4e3aSMichael S. Tsirkin 15424ed023ceSDr. David Alan Gilbert newsize = HOST_PAGE_ALIGN(newsize); 1543129ddaf3SMichael S. Tsirkin 154462be4e3aSMichael S. Tsirkin if (block->used_length == newsize) { 154562be4e3aSMichael S. Tsirkin return 0; 154662be4e3aSMichael S. Tsirkin } 154762be4e3aSMichael S. Tsirkin 154862be4e3aSMichael S. Tsirkin if (!(block->flags & RAM_RESIZEABLE)) { 154962be4e3aSMichael S. Tsirkin error_setg_errno(errp, EINVAL, 155062be4e3aSMichael S. Tsirkin "Length mismatch: %s: 0x" RAM_ADDR_FMT 155162be4e3aSMichael S. Tsirkin " in != 0x" RAM_ADDR_FMT, block->idstr, 155262be4e3aSMichael S. Tsirkin newsize, block->used_length); 155362be4e3aSMichael S. Tsirkin return -EINVAL; 155462be4e3aSMichael S. Tsirkin } 155562be4e3aSMichael S. Tsirkin 155662be4e3aSMichael S. Tsirkin if (block->max_length < newsize) { 155762be4e3aSMichael S. Tsirkin error_setg_errno(errp, EINVAL, 155862be4e3aSMichael S. Tsirkin "Length too large: %s: 0x" RAM_ADDR_FMT 155962be4e3aSMichael S. Tsirkin " > 0x" RAM_ADDR_FMT, block->idstr, 156062be4e3aSMichael S. Tsirkin newsize, block->max_length); 156162be4e3aSMichael S. Tsirkin return -EINVAL; 156262be4e3aSMichael S. Tsirkin } 156362be4e3aSMichael S. Tsirkin 156462be4e3aSMichael S. Tsirkin cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); 156562be4e3aSMichael S. Tsirkin block->used_length = newsize; 156658d2707eSPaolo Bonzini cpu_physical_memory_set_dirty_range(block->offset, block->used_length, 156758d2707eSPaolo Bonzini DIRTY_CLIENTS_ALL); 156862be4e3aSMichael S. Tsirkin memory_region_set_size(block->mr, newsize); 156962be4e3aSMichael S. Tsirkin if (block->resized) { 157062be4e3aSMichael S. Tsirkin block->resized(block->idstr, newsize, block->host); 157162be4e3aSMichael S. Tsirkin } 157262be4e3aSMichael S. Tsirkin return 0; 157362be4e3aSMichael S. Tsirkin } 157462be4e3aSMichael S. Tsirkin 15755b82b703SStefan Hajnoczi /* Called with ram_list.mutex held */ 15765b82b703SStefan Hajnoczi static void dirty_memory_extend(ram_addr_t old_ram_size, 15775b82b703SStefan Hajnoczi ram_addr_t new_ram_size) 15785b82b703SStefan Hajnoczi { 15795b82b703SStefan Hajnoczi ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size, 15805b82b703SStefan Hajnoczi DIRTY_MEMORY_BLOCK_SIZE); 15815b82b703SStefan Hajnoczi ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size, 15825b82b703SStefan Hajnoczi DIRTY_MEMORY_BLOCK_SIZE); 15835b82b703SStefan Hajnoczi int i; 15845b82b703SStefan Hajnoczi 15855b82b703SStefan Hajnoczi /* Only need to extend if block count increased */ 15865b82b703SStefan Hajnoczi if (new_num_blocks <= old_num_blocks) { 15875b82b703SStefan Hajnoczi return; 15885b82b703SStefan Hajnoczi } 15895b82b703SStefan Hajnoczi 15905b82b703SStefan Hajnoczi for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 15915b82b703SStefan Hajnoczi DirtyMemoryBlocks *old_blocks; 15925b82b703SStefan Hajnoczi DirtyMemoryBlocks *new_blocks; 15935b82b703SStefan Hajnoczi int j; 15945b82b703SStefan Hajnoczi 15955b82b703SStefan Hajnoczi old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]); 15965b82b703SStefan Hajnoczi new_blocks = g_malloc(sizeof(*new_blocks) + 15975b82b703SStefan Hajnoczi sizeof(new_blocks->blocks[0]) * new_num_blocks); 15985b82b703SStefan Hajnoczi 15995b82b703SStefan Hajnoczi if (old_num_blocks) { 16005b82b703SStefan Hajnoczi memcpy(new_blocks->blocks, old_blocks->blocks, 16015b82b703SStefan Hajnoczi old_num_blocks * sizeof(old_blocks->blocks[0])); 16025b82b703SStefan Hajnoczi } 16035b82b703SStefan Hajnoczi 16045b82b703SStefan Hajnoczi for (j = old_num_blocks; j < new_num_blocks; j++) { 16055b82b703SStefan Hajnoczi new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); 16065b82b703SStefan Hajnoczi } 16075b82b703SStefan Hajnoczi 16085b82b703SStefan Hajnoczi atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); 16095b82b703SStefan Hajnoczi 16105b82b703SStefan Hajnoczi if (old_blocks) { 16115b82b703SStefan Hajnoczi g_free_rcu(old_blocks, rcu); 16125b82b703SStefan Hajnoczi } 16135b82b703SStefan Hajnoczi } 16145b82b703SStefan Hajnoczi } 16155b82b703SStefan Hajnoczi 1616528f46afSFam Zheng static void ram_block_add(RAMBlock *new_block, Error **errp) 1617c5705a77SAvi Kivity { 1618e1c57ab8SPaolo Bonzini RAMBlock *block; 16190d53d9feSMike Day RAMBlock *last_block = NULL; 16202152f5caSJuan Quintela ram_addr_t old_ram_size, new_ram_size; 162137aa7a0eSMarkus Armbruster Error *err = NULL; 16222152f5caSJuan Quintela 16232152f5caSJuan Quintela old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS; 1624c5705a77SAvi Kivity 1625b2a8658eSUmesh Deshpande qemu_mutex_lock_ramlist(); 16269b8424d5SMichael S. Tsirkin new_block->offset = find_ram_offset(new_block->max_length); 1627e1c57ab8SPaolo Bonzini 16280628c182SMarkus Armbruster if (!new_block->host) { 1629e1c57ab8SPaolo Bonzini if (xen_enabled()) { 16309b8424d5SMichael S. Tsirkin xen_ram_alloc(new_block->offset, new_block->max_length, 163137aa7a0eSMarkus Armbruster new_block->mr, &err); 163237aa7a0eSMarkus Armbruster if (err) { 163337aa7a0eSMarkus Armbruster error_propagate(errp, err); 163437aa7a0eSMarkus Armbruster qemu_mutex_unlock_ramlist(); 163539c350eeSPaolo Bonzini return; 163637aa7a0eSMarkus Armbruster } 1637e1c57ab8SPaolo Bonzini } else { 16389b8424d5SMichael S. Tsirkin new_block->host = phys_mem_alloc(new_block->max_length, 1639a2b257d6SIgor Mammedov &new_block->mr->align); 164039228250SMarkus Armbruster if (!new_block->host) { 1641ef701d7bSHu Tao error_setg_errno(errp, errno, 1642ef701d7bSHu Tao "cannot set up guest memory '%s'", 1643ef701d7bSHu Tao memory_region_name(new_block->mr)); 1644ef701d7bSHu Tao qemu_mutex_unlock_ramlist(); 164539c350eeSPaolo Bonzini return; 164639228250SMarkus Armbruster } 16479b8424d5SMichael S. Tsirkin memory_try_enable_merging(new_block->host, new_block->max_length); 1648c902760fSMarcelo Tosatti } 16496977dfe6SYoshiaki Tamura } 165094a6b54fSpbrook 1651dd631697SLi Zhijian new_ram_size = MAX(old_ram_size, 1652dd631697SLi Zhijian (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS); 1653dd631697SLi Zhijian if (new_ram_size > old_ram_size) { 1654dd631697SLi Zhijian migration_bitmap_extend(old_ram_size, new_ram_size); 16555b82b703SStefan Hajnoczi dirty_memory_extend(old_ram_size, new_ram_size); 1656dd631697SLi Zhijian } 16570d53d9feSMike Day /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, 16580d53d9feSMike Day * QLIST (which has an RCU-friendly variant) does not have insertion at 16590d53d9feSMike Day * tail, so save the last element in last_block. 16600d53d9feSMike Day */ 16610dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 16620d53d9feSMike Day last_block = block; 16639b8424d5SMichael S. Tsirkin if (block->max_length < new_block->max_length) { 1664abb26d63SPaolo Bonzini break; 1665abb26d63SPaolo Bonzini } 1666abb26d63SPaolo Bonzini } 1667abb26d63SPaolo Bonzini if (block) { 16680dc3f44aSMike Day QLIST_INSERT_BEFORE_RCU(block, new_block, next); 16690d53d9feSMike Day } else if (last_block) { 16700dc3f44aSMike Day QLIST_INSERT_AFTER_RCU(last_block, new_block, next); 16710d53d9feSMike Day } else { /* list is empty */ 16720dc3f44aSMike Day QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); 1673abb26d63SPaolo Bonzini } 16740d6d3c87SPaolo Bonzini ram_list.mru_block = NULL; 167594a6b54fSpbrook 16760dc3f44aSMike Day /* Write list before version */ 16770dc3f44aSMike Day smp_wmb(); 1678f798b07fSUmesh Deshpande ram_list.version++; 1679b2a8658eSUmesh Deshpande qemu_mutex_unlock_ramlist(); 1680f798b07fSUmesh Deshpande 16819b8424d5SMichael S. Tsirkin cpu_physical_memory_set_dirty_range(new_block->offset, 168258d2707eSPaolo Bonzini new_block->used_length, 168358d2707eSPaolo Bonzini DIRTY_CLIENTS_ALL); 168494a6b54fSpbrook 1685a904c911SPaolo Bonzini if (new_block->host) { 16869b8424d5SMichael S. Tsirkin qemu_ram_setup_dump(new_block->host, new_block->max_length); 16879b8424d5SMichael S. Tsirkin qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); 1688c2cd627dSCao jin /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */ 16899b8424d5SMichael S. Tsirkin qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK); 16900987d735SPaolo Bonzini ram_block_notify_add(new_block->host, new_block->max_length); 1691a904c911SPaolo Bonzini } 169294a6b54fSpbrook } 1693e9a1ab19Sbellard 16940b183fc8SPaolo Bonzini #ifdef __linux__ 1695528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 1696dbcb8981SPaolo Bonzini bool share, const char *mem_path, 16977f56e740SPaolo Bonzini Error **errp) 1698e1c57ab8SPaolo Bonzini { 1699e1c57ab8SPaolo Bonzini RAMBlock *new_block; 1700ef701d7bSHu Tao Error *local_err = NULL; 1701e1c57ab8SPaolo Bonzini 1702e1c57ab8SPaolo Bonzini if (xen_enabled()) { 17037f56e740SPaolo Bonzini error_setg(errp, "-mem-path not supported with Xen"); 1704528f46afSFam Zheng return NULL; 1705e1c57ab8SPaolo Bonzini } 1706e1c57ab8SPaolo Bonzini 1707e1c57ab8SPaolo Bonzini if (phys_mem_alloc != qemu_anon_ram_alloc) { 1708e1c57ab8SPaolo Bonzini /* 1709e1c57ab8SPaolo Bonzini * file_ram_alloc() needs to allocate just like 1710e1c57ab8SPaolo Bonzini * phys_mem_alloc, but we haven't bothered to provide 1711e1c57ab8SPaolo Bonzini * a hook there. 1712e1c57ab8SPaolo Bonzini */ 17137f56e740SPaolo Bonzini error_setg(errp, 17147f56e740SPaolo Bonzini "-mem-path not supported with this accelerator"); 1715528f46afSFam Zheng return NULL; 1716e1c57ab8SPaolo Bonzini } 1717e1c57ab8SPaolo Bonzini 17184ed023ceSDr. David Alan Gilbert size = HOST_PAGE_ALIGN(size); 1719e1c57ab8SPaolo Bonzini new_block = g_malloc0(sizeof(*new_block)); 1720e1c57ab8SPaolo Bonzini new_block->mr = mr; 17219b8424d5SMichael S. Tsirkin new_block->used_length = size; 17229b8424d5SMichael S. Tsirkin new_block->max_length = size; 1723dbcb8981SPaolo Bonzini new_block->flags = share ? RAM_SHARED : 0; 17247f56e740SPaolo Bonzini new_block->host = file_ram_alloc(new_block, size, 17257f56e740SPaolo Bonzini mem_path, errp); 17267f56e740SPaolo Bonzini if (!new_block->host) { 17277f56e740SPaolo Bonzini g_free(new_block); 1728528f46afSFam Zheng return NULL; 17297f56e740SPaolo Bonzini } 17307f56e740SPaolo Bonzini 1731528f46afSFam Zheng ram_block_add(new_block, &local_err); 1732ef701d7bSHu Tao if (local_err) { 1733ef701d7bSHu Tao g_free(new_block); 1734ef701d7bSHu Tao error_propagate(errp, local_err); 1735528f46afSFam Zheng return NULL; 1736ef701d7bSHu Tao } 1737528f46afSFam Zheng return new_block; 1738e1c57ab8SPaolo Bonzini } 17390b183fc8SPaolo Bonzini #endif 1740e1c57ab8SPaolo Bonzini 174162be4e3aSMichael S. Tsirkin static 1742528f46afSFam Zheng RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, 174362be4e3aSMichael S. Tsirkin void (*resized)(const char*, 174462be4e3aSMichael S. Tsirkin uint64_t length, 174562be4e3aSMichael S. Tsirkin void *host), 174662be4e3aSMichael S. Tsirkin void *host, bool resizeable, 1747ef701d7bSHu Tao MemoryRegion *mr, Error **errp) 1748e1c57ab8SPaolo Bonzini { 1749e1c57ab8SPaolo Bonzini RAMBlock *new_block; 1750ef701d7bSHu Tao Error *local_err = NULL; 1751e1c57ab8SPaolo Bonzini 17524ed023ceSDr. David Alan Gilbert size = HOST_PAGE_ALIGN(size); 17534ed023ceSDr. David Alan Gilbert max_size = HOST_PAGE_ALIGN(max_size); 1754e1c57ab8SPaolo Bonzini new_block = g_malloc0(sizeof(*new_block)); 1755e1c57ab8SPaolo Bonzini new_block->mr = mr; 175662be4e3aSMichael S. Tsirkin new_block->resized = resized; 17579b8424d5SMichael S. Tsirkin new_block->used_length = size; 17589b8424d5SMichael S. Tsirkin new_block->max_length = max_size; 175962be4e3aSMichael S. Tsirkin assert(max_size >= size); 1760e1c57ab8SPaolo Bonzini new_block->fd = -1; 1761863e9621SDr. David Alan Gilbert new_block->page_size = getpagesize(); 1762e1c57ab8SPaolo Bonzini new_block->host = host; 1763e1c57ab8SPaolo Bonzini if (host) { 17647bd4f430SPaolo Bonzini new_block->flags |= RAM_PREALLOC; 1765e1c57ab8SPaolo Bonzini } 176662be4e3aSMichael S. Tsirkin if (resizeable) { 176762be4e3aSMichael S. Tsirkin new_block->flags |= RAM_RESIZEABLE; 176862be4e3aSMichael S. Tsirkin } 1769528f46afSFam Zheng ram_block_add(new_block, &local_err); 1770ef701d7bSHu Tao if (local_err) { 1771ef701d7bSHu Tao g_free(new_block); 1772ef701d7bSHu Tao error_propagate(errp, local_err); 1773528f46afSFam Zheng return NULL; 1774ef701d7bSHu Tao } 1775528f46afSFam Zheng return new_block; 1776e1c57ab8SPaolo Bonzini } 1777e1c57ab8SPaolo Bonzini 1778528f46afSFam Zheng RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 177962be4e3aSMichael S. Tsirkin MemoryRegion *mr, Error **errp) 178062be4e3aSMichael S. Tsirkin { 178162be4e3aSMichael S. Tsirkin return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp); 178262be4e3aSMichael S. Tsirkin } 178362be4e3aSMichael S. Tsirkin 1784528f46afSFam Zheng RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp) 17856977dfe6SYoshiaki Tamura { 178662be4e3aSMichael S. Tsirkin return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp); 178762be4e3aSMichael S. Tsirkin } 178862be4e3aSMichael S. Tsirkin 1789528f46afSFam Zheng RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, 179062be4e3aSMichael S. Tsirkin void (*resized)(const char*, 179162be4e3aSMichael S. Tsirkin uint64_t length, 179262be4e3aSMichael S. Tsirkin void *host), 179362be4e3aSMichael S. Tsirkin MemoryRegion *mr, Error **errp) 179462be4e3aSMichael S. Tsirkin { 179562be4e3aSMichael S. Tsirkin return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp); 17966977dfe6SYoshiaki Tamura } 17976977dfe6SYoshiaki Tamura 179843771539SPaolo Bonzini static void reclaim_ramblock(RAMBlock *block) 1799e9a1ab19Sbellard { 18007bd4f430SPaolo Bonzini if (block->flags & RAM_PREALLOC) { 1801cd19cfa2SHuang Ying ; 1802dfeaf2abSMarkus Armbruster } else if (xen_enabled()) { 1803dfeaf2abSMarkus Armbruster xen_invalidate_map_cache_entry(block->host); 1804089f3f76SStefan Weil #ifndef _WIN32 18053435f395SMarkus Armbruster } else if (block->fd >= 0) { 1806794e8f30SMichael S. Tsirkin qemu_ram_munmap(block->host, block->max_length); 180704b16653SAlex Williamson close(block->fd); 1808089f3f76SStefan Weil #endif 180904b16653SAlex Williamson } else { 18109b8424d5SMichael S. Tsirkin qemu_anon_ram_free(block->host, block->max_length); 181104b16653SAlex Williamson } 18127267c094SAnthony Liguori g_free(block); 181343771539SPaolo Bonzini } 181443771539SPaolo Bonzini 1815f1060c55SFam Zheng void qemu_ram_free(RAMBlock *block) 181643771539SPaolo Bonzini { 181785bc2a15SMarc-André Lureau if (!block) { 181885bc2a15SMarc-André Lureau return; 181985bc2a15SMarc-André Lureau } 182085bc2a15SMarc-André Lureau 18210987d735SPaolo Bonzini if (block->host) { 18220987d735SPaolo Bonzini ram_block_notify_remove(block->host, block->max_length); 18230987d735SPaolo Bonzini } 18240987d735SPaolo Bonzini 182543771539SPaolo Bonzini qemu_mutex_lock_ramlist(); 18260dc3f44aSMike Day QLIST_REMOVE_RCU(block, next); 182743771539SPaolo Bonzini ram_list.mru_block = NULL; 18280dc3f44aSMike Day /* Write list before version */ 18290dc3f44aSMike Day smp_wmb(); 183043771539SPaolo Bonzini ram_list.version++; 183143771539SPaolo Bonzini call_rcu(block, reclaim_ramblock, rcu); 1832b2a8658eSUmesh Deshpande qemu_mutex_unlock_ramlist(); 1833e9a1ab19Sbellard } 1834e9a1ab19Sbellard 1835cd19cfa2SHuang Ying #ifndef _WIN32 1836cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) 1837cd19cfa2SHuang Ying { 1838cd19cfa2SHuang Ying RAMBlock *block; 1839cd19cfa2SHuang Ying ram_addr_t offset; 1840cd19cfa2SHuang Ying int flags; 1841cd19cfa2SHuang Ying void *area, *vaddr; 1842cd19cfa2SHuang Ying 18430dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1844cd19cfa2SHuang Ying offset = addr - block->offset; 18459b8424d5SMichael S. Tsirkin if (offset < block->max_length) { 18461240be24SMichael S. Tsirkin vaddr = ramblock_ptr(block, offset); 18477bd4f430SPaolo Bonzini if (block->flags & RAM_PREALLOC) { 1848cd19cfa2SHuang Ying ; 1849dfeaf2abSMarkus Armbruster } else if (xen_enabled()) { 1850dfeaf2abSMarkus Armbruster abort(); 1851cd19cfa2SHuang Ying } else { 1852cd19cfa2SHuang Ying flags = MAP_FIXED; 18533435f395SMarkus Armbruster if (block->fd >= 0) { 1854dbcb8981SPaolo Bonzini flags |= (block->flags & RAM_SHARED ? 1855dbcb8981SPaolo Bonzini MAP_SHARED : MAP_PRIVATE); 1856cd19cfa2SHuang Ying area = mmap(vaddr, length, PROT_READ | PROT_WRITE, 1857cd19cfa2SHuang Ying flags, block->fd, offset); 1858cd19cfa2SHuang Ying } else { 18592eb9fbaaSMarkus Armbruster /* 18602eb9fbaaSMarkus Armbruster * Remap needs to match alloc. Accelerators that 18612eb9fbaaSMarkus Armbruster * set phys_mem_alloc never remap. If they did, 18622eb9fbaaSMarkus Armbruster * we'd need a remap hook here. 18632eb9fbaaSMarkus Armbruster */ 18642eb9fbaaSMarkus Armbruster assert(phys_mem_alloc == qemu_anon_ram_alloc); 18652eb9fbaaSMarkus Armbruster 1866cd19cfa2SHuang Ying flags |= MAP_PRIVATE | MAP_ANONYMOUS; 1867cd19cfa2SHuang Ying area = mmap(vaddr, length, PROT_READ | PROT_WRITE, 1868cd19cfa2SHuang Ying flags, -1, 0); 1869cd19cfa2SHuang Ying } 1870cd19cfa2SHuang Ying if (area != vaddr) { 1871f15fbc4bSAnthony PERARD fprintf(stderr, "Could not remap addr: " 1872f15fbc4bSAnthony PERARD RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", 1873cd19cfa2SHuang Ying length, addr); 1874cd19cfa2SHuang Ying exit(1); 1875cd19cfa2SHuang Ying } 18768490fc78SLuiz Capitulino memory_try_enable_merging(vaddr, length); 1877ddb97f1dSJason Baron qemu_ram_setup_dump(vaddr, length); 1878cd19cfa2SHuang Ying } 1879cd19cfa2SHuang Ying } 1880cd19cfa2SHuang Ying } 1881cd19cfa2SHuang Ying } 1882cd19cfa2SHuang Ying #endif /* !_WIN32 */ 1883cd19cfa2SHuang Ying 18841b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc. 1885ae3a7047SMike Day * This should not be used for general purpose DMA. Use address_space_map 1886ae3a7047SMike Day * or address_space_rw instead. For local memory (e.g. video ram) that the 1887ae3a7047SMike Day * device owns, use memory_region_get_ram_ptr. 18880dc3f44aSMike Day * 188949b24afcSPaolo Bonzini * Called within RCU critical section. 18901b5ec234SPaolo Bonzini */ 18910878d0e1SPaolo Bonzini void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) 18921b5ec234SPaolo Bonzini { 18933655cb9cSGonglei RAMBlock *block = ram_block; 18943655cb9cSGonglei 18953655cb9cSGonglei if (block == NULL) { 18963655cb9cSGonglei block = qemu_get_ram_block(addr); 18970878d0e1SPaolo Bonzini addr -= block->offset; 18983655cb9cSGonglei } 1899ae3a7047SMike Day 1900ae3a7047SMike Day if (xen_enabled() && block->host == NULL) { 1901432d268cSJun Nakajima /* We need to check if the requested address is in the RAM 1902432d268cSJun Nakajima * because we don't want to map the entire memory in QEMU. 1903712c2b41SStefano Stabellini * In that case just map until the end of the page. 1904432d268cSJun Nakajima */ 1905432d268cSJun Nakajima if (block->offset == 0) { 190649b24afcSPaolo Bonzini return xen_map_cache(addr, 0, 0); 1907432d268cSJun Nakajima } 1908ae3a7047SMike Day 1909ae3a7047SMike Day block->host = xen_map_cache(block->offset, block->max_length, 1); 1910432d268cSJun Nakajima } 19110878d0e1SPaolo Bonzini return ramblock_ptr(block, addr); 191294a6b54fSpbrook } 1913f471a17eSAlex Williamson 19140878d0e1SPaolo Bonzini /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr 1915ae3a7047SMike Day * but takes a size argument. 19160dc3f44aSMike Day * 1917e81bcda5SPaolo Bonzini * Called within RCU critical section. 1918ae3a7047SMike Day */ 19193655cb9cSGonglei static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr, 19203655cb9cSGonglei hwaddr *size) 192138bee5dcSStefano Stabellini { 19223655cb9cSGonglei RAMBlock *block = ram_block; 19238ab934f9SStefano Stabellini if (*size == 0) { 19248ab934f9SStefano Stabellini return NULL; 19258ab934f9SStefano Stabellini } 1926e81bcda5SPaolo Bonzini 19273655cb9cSGonglei if (block == NULL) { 1928e81bcda5SPaolo Bonzini block = qemu_get_ram_block(addr); 19290878d0e1SPaolo Bonzini addr -= block->offset; 19303655cb9cSGonglei } 19310878d0e1SPaolo Bonzini *size = MIN(*size, block->max_length - addr); 1932e81bcda5SPaolo Bonzini 1933e81bcda5SPaolo Bonzini if (xen_enabled() && block->host == NULL) { 1934e81bcda5SPaolo Bonzini /* We need to check if the requested address is in the RAM 1935e81bcda5SPaolo Bonzini * because we don't want to map the entire memory in QEMU. 1936e81bcda5SPaolo Bonzini * In that case just map the requested area. 1937e81bcda5SPaolo Bonzini */ 1938e81bcda5SPaolo Bonzini if (block->offset == 0) { 1939e41d7c69SJan Kiszka return xen_map_cache(addr, *size, 1); 194038bee5dcSStefano Stabellini } 194138bee5dcSStefano Stabellini 1942e81bcda5SPaolo Bonzini block->host = xen_map_cache(block->offset, block->max_length, 1); 194338bee5dcSStefano Stabellini } 1944e81bcda5SPaolo Bonzini 19450878d0e1SPaolo Bonzini return ramblock_ptr(block, addr); 194638bee5dcSStefano Stabellini } 194738bee5dcSStefano Stabellini 1948422148d3SDr. David Alan Gilbert /* 1949422148d3SDr. David Alan Gilbert * Translates a host ptr back to a RAMBlock, a ram_addr and an offset 1950422148d3SDr. David Alan Gilbert * in that RAMBlock. 1951422148d3SDr. David Alan Gilbert * 1952422148d3SDr. David Alan Gilbert * ptr: Host pointer to look up 1953422148d3SDr. David Alan Gilbert * round_offset: If true round the result offset down to a page boundary 1954422148d3SDr. David Alan Gilbert * *ram_addr: set to result ram_addr 1955422148d3SDr. David Alan Gilbert * *offset: set to result offset within the RAMBlock 1956422148d3SDr. David Alan Gilbert * 1957422148d3SDr. David Alan Gilbert * Returns: RAMBlock (or NULL if not found) 1958ae3a7047SMike Day * 1959ae3a7047SMike Day * By the time this function returns, the returned pointer is not protected 1960ae3a7047SMike Day * by RCU anymore. If the caller is not within an RCU critical section and 1961ae3a7047SMike Day * does not hold the iothread lock, it must have other means of protecting the 1962ae3a7047SMike Day * pointer, such as a reference to the region that includes the incoming 1963ae3a7047SMike Day * ram_addr_t. 1964ae3a7047SMike Day */ 1965422148d3SDr. David Alan Gilbert RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, 1966422148d3SDr. David Alan Gilbert ram_addr_t *offset) 19675579c7f3Spbrook { 196894a6b54fSpbrook RAMBlock *block; 196994a6b54fSpbrook uint8_t *host = ptr; 197094a6b54fSpbrook 1971868bb33fSJan Kiszka if (xen_enabled()) { 1972f615f396SPaolo Bonzini ram_addr_t ram_addr; 19730dc3f44aSMike Day rcu_read_lock(); 1974f615f396SPaolo Bonzini ram_addr = xen_ram_addr_from_mapcache(ptr); 1975f615f396SPaolo Bonzini block = qemu_get_ram_block(ram_addr); 1976422148d3SDr. David Alan Gilbert if (block) { 1977d6b6aec4SAnthony PERARD *offset = ram_addr - block->offset; 1978422148d3SDr. David Alan Gilbert } 19790dc3f44aSMike Day rcu_read_unlock(); 1980422148d3SDr. David Alan Gilbert return block; 1981712c2b41SStefano Stabellini } 1982712c2b41SStefano Stabellini 19830dc3f44aSMike Day rcu_read_lock(); 19840dc3f44aSMike Day block = atomic_rcu_read(&ram_list.mru_block); 19859b8424d5SMichael S. Tsirkin if (block && block->host && host - block->host < block->max_length) { 198623887b79SPaolo Bonzini goto found; 198723887b79SPaolo Bonzini } 198823887b79SPaolo Bonzini 19890dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1990432d268cSJun Nakajima /* This case append when the block is not mapped. */ 1991432d268cSJun Nakajima if (block->host == NULL) { 1992432d268cSJun Nakajima continue; 1993432d268cSJun Nakajima } 19949b8424d5SMichael S. Tsirkin if (host - block->host < block->max_length) { 199523887b79SPaolo Bonzini goto found; 199694a6b54fSpbrook } 1997f471a17eSAlex Williamson } 1998432d268cSJun Nakajima 19990dc3f44aSMike Day rcu_read_unlock(); 20001b5ec234SPaolo Bonzini return NULL; 200123887b79SPaolo Bonzini 200223887b79SPaolo Bonzini found: 2003422148d3SDr. David Alan Gilbert *offset = (host - block->host); 2004422148d3SDr. David Alan Gilbert if (round_offset) { 2005422148d3SDr. David Alan Gilbert *offset &= TARGET_PAGE_MASK; 2006422148d3SDr. David Alan Gilbert } 20070dc3f44aSMike Day rcu_read_unlock(); 2008422148d3SDr. David Alan Gilbert return block; 2009422148d3SDr. David Alan Gilbert } 2010422148d3SDr. David Alan Gilbert 2011e3dd7493SDr. David Alan Gilbert /* 2012e3dd7493SDr. David Alan Gilbert * Finds the named RAMBlock 2013e3dd7493SDr. David Alan Gilbert * 2014e3dd7493SDr. David Alan Gilbert * name: The name of RAMBlock to find 2015e3dd7493SDr. David Alan Gilbert * 2016e3dd7493SDr. David Alan Gilbert * Returns: RAMBlock (or NULL if not found) 2017e3dd7493SDr. David Alan Gilbert */ 2018e3dd7493SDr. David Alan Gilbert RAMBlock *qemu_ram_block_by_name(const char *name) 2019e3dd7493SDr. David Alan Gilbert { 2020e3dd7493SDr. David Alan Gilbert RAMBlock *block; 2021e3dd7493SDr. David Alan Gilbert 2022e3dd7493SDr. David Alan Gilbert QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 2023e3dd7493SDr. David Alan Gilbert if (!strcmp(name, block->idstr)) { 2024e3dd7493SDr. David Alan Gilbert return block; 2025e3dd7493SDr. David Alan Gilbert } 2026e3dd7493SDr. David Alan Gilbert } 2027e3dd7493SDr. David Alan Gilbert 2028e3dd7493SDr. David Alan Gilbert return NULL; 2029e3dd7493SDr. David Alan Gilbert } 2030e3dd7493SDr. David Alan Gilbert 2031422148d3SDr. David Alan Gilbert /* Some of the softmmu routines need to translate from a host pointer 2032422148d3SDr. David Alan Gilbert (typically a TLB entry) back to a ram offset. */ 203307bdaa41SPaolo Bonzini ram_addr_t qemu_ram_addr_from_host(void *ptr) 2034422148d3SDr. David Alan Gilbert { 2035422148d3SDr. David Alan Gilbert RAMBlock *block; 2036f615f396SPaolo Bonzini ram_addr_t offset; 2037422148d3SDr. David Alan Gilbert 2038f615f396SPaolo Bonzini block = qemu_ram_block_from_host(ptr, false, &offset); 2039422148d3SDr. David Alan Gilbert if (!block) { 204007bdaa41SPaolo Bonzini return RAM_ADDR_INVALID; 2041422148d3SDr. David Alan Gilbert } 2042422148d3SDr. David Alan Gilbert 204307bdaa41SPaolo Bonzini return block->offset + offset; 2044e890261fSMarcelo Tosatti } 2045f471a17eSAlex Williamson 204649b24afcSPaolo Bonzini /* Called within RCU critical section. */ 2047a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr, 20480e0df1e2SAvi Kivity uint64_t val, unsigned size) 20491ccde1cbSbellard { 2050ba051fb5SAlex Bennée bool locked = false; 2051ba051fb5SAlex Bennée 205252159192SJuan Quintela if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 2053ba051fb5SAlex Bennée locked = true; 2054ba051fb5SAlex Bennée tb_lock(); 20550e0df1e2SAvi Kivity tb_invalidate_phys_page_fast(ram_addr, size); 20563a7d929eSbellard } 20570e0df1e2SAvi Kivity switch (size) { 20580e0df1e2SAvi Kivity case 1: 20590878d0e1SPaolo Bonzini stb_p(qemu_map_ram_ptr(NULL, ram_addr), val); 20600e0df1e2SAvi Kivity break; 20610e0df1e2SAvi Kivity case 2: 20620878d0e1SPaolo Bonzini stw_p(qemu_map_ram_ptr(NULL, ram_addr), val); 20630e0df1e2SAvi Kivity break; 20640e0df1e2SAvi Kivity case 4: 20650878d0e1SPaolo Bonzini stl_p(qemu_map_ram_ptr(NULL, ram_addr), val); 20660e0df1e2SAvi Kivity break; 20670e0df1e2SAvi Kivity default: 20680e0df1e2SAvi Kivity abort(); 20690e0df1e2SAvi Kivity } 2070ba051fb5SAlex Bennée 2071ba051fb5SAlex Bennée if (locked) { 2072ba051fb5SAlex Bennée tb_unlock(); 2073ba051fb5SAlex Bennée } 2074ba051fb5SAlex Bennée 207558d2707eSPaolo Bonzini /* Set both VGA and migration bits for simplicity and to remove 207658d2707eSPaolo Bonzini * the notdirty callback faster. 207758d2707eSPaolo Bonzini */ 207858d2707eSPaolo Bonzini cpu_physical_memory_set_dirty_range(ram_addr, size, 207958d2707eSPaolo Bonzini DIRTY_CLIENTS_NOCODE); 2080f23db169Sbellard /* we remove the notdirty callback only if the code has been 2081f23db169Sbellard flushed */ 2082a2cd8c85SJuan Quintela if (!cpu_physical_memory_is_clean(ram_addr)) { 2083bcae01e4SPeter Crosthwaite tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr); 20844917cf44SAndreas Färber } 20851ccde1cbSbellard } 20861ccde1cbSbellard 2087b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr, 2088b018ddf6SPaolo Bonzini unsigned size, bool is_write) 2089b018ddf6SPaolo Bonzini { 2090b018ddf6SPaolo Bonzini return is_write; 2091b018ddf6SPaolo Bonzini } 2092b018ddf6SPaolo Bonzini 20930e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = { 20940e0df1e2SAvi Kivity .write = notdirty_mem_write, 2095b018ddf6SPaolo Bonzini .valid.accepts = notdirty_mem_accepts, 20960e0df1e2SAvi Kivity .endianness = DEVICE_NATIVE_ENDIAN, 20971ccde1cbSbellard }; 20981ccde1cbSbellard 20990f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit. */ 210066b9b43cSPeter Maydell static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) 21010f459d16Spbrook { 210293afeadeSAndreas Färber CPUState *cpu = current_cpu; 2103568496c0SSergey Fedorov CPUClass *cc = CPU_GET_CLASS(cpu); 210493afeadeSAndreas Färber CPUArchState *env = cpu->env_ptr; 210506d55cc1Saliguori target_ulong pc, cs_base; 21060f459d16Spbrook target_ulong vaddr; 2107a1d1bb31Saliguori CPUWatchpoint *wp; 210889fee74aSEmilio G. Cota uint32_t cpu_flags; 21090f459d16Spbrook 2110ff4700b0SAndreas Färber if (cpu->watchpoint_hit) { 211106d55cc1Saliguori /* We re-entered the check after replacing the TB. Now raise 211206d55cc1Saliguori * the debug interrupt so that is will trigger after the 211306d55cc1Saliguori * current instruction. */ 211493afeadeSAndreas Färber cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); 211506d55cc1Saliguori return; 211606d55cc1Saliguori } 211793afeadeSAndreas Färber vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; 211840612000SJulian Brown vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len); 2119ff4700b0SAndreas Färber QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 212005068c0dSPeter Maydell if (cpu_watchpoint_address_matches(wp, vaddr, len) 212105068c0dSPeter Maydell && (wp->flags & flags)) { 212208225676SPeter Maydell if (flags == BP_MEM_READ) { 212308225676SPeter Maydell wp->flags |= BP_WATCHPOINT_HIT_READ; 212408225676SPeter Maydell } else { 212508225676SPeter Maydell wp->flags |= BP_WATCHPOINT_HIT_WRITE; 212608225676SPeter Maydell } 212708225676SPeter Maydell wp->hitaddr = vaddr; 212866b9b43cSPeter Maydell wp->hitattrs = attrs; 2129ff4700b0SAndreas Färber if (!cpu->watchpoint_hit) { 2130568496c0SSergey Fedorov if (wp->flags & BP_CPU && 2131568496c0SSergey Fedorov !cc->debug_check_watchpoint(cpu, wp)) { 2132568496c0SSergey Fedorov wp->flags &= ~BP_WATCHPOINT_HIT; 2133568496c0SSergey Fedorov continue; 2134568496c0SSergey Fedorov } 2135ff4700b0SAndreas Färber cpu->watchpoint_hit = wp; 2136a5e99826SKONRAD Frederic 2137a5e99826SKONRAD Frederic /* The tb_lock will be reset when cpu_loop_exit or 2138a5e99826SKONRAD Frederic * cpu_loop_exit_noexc longjmp back into the cpu_exec 2139a5e99826SKONRAD Frederic * main loop. 2140a5e99826SKONRAD Frederic */ 2141a5e99826SKONRAD Frederic tb_lock(); 2142239c51a5SAndreas Färber tb_check_watchpoint(cpu); 214306d55cc1Saliguori if (wp->flags & BP_STOP_BEFORE_ACCESS) { 214427103424SAndreas Färber cpu->exception_index = EXCP_DEBUG; 21455638d180SAndreas Färber cpu_loop_exit(cpu); 214606d55cc1Saliguori } else { 214706d55cc1Saliguori cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); 2148648f034cSAndreas Färber tb_gen_code(cpu, pc, cs_base, cpu_flags, 1); 21496886b980SPeter Maydell cpu_loop_exit_noexc(cpu); 21500f459d16Spbrook } 2151488d6577SMax Filippov } 21526e140f28Saliguori } else { 21536e140f28Saliguori wp->flags &= ~BP_WATCHPOINT_HIT; 21546e140f28Saliguori } 21550f459d16Spbrook } 21560f459d16Spbrook } 21570f459d16Spbrook 21586658ffb8Spbrook /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, 21596658ffb8Spbrook so these check for a hit then pass through to the normal out-of-line 21606658ffb8Spbrook phys routines. */ 216166b9b43cSPeter Maydell static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata, 216266b9b43cSPeter Maydell unsigned size, MemTxAttrs attrs) 21636658ffb8Spbrook { 216466b9b43cSPeter Maydell MemTxResult res; 216566b9b43cSPeter Maydell uint64_t data; 216679ed0416SPeter Maydell int asidx = cpu_asidx_from_attrs(current_cpu, attrs); 216779ed0416SPeter Maydell AddressSpace *as = current_cpu->cpu_ases[asidx].as; 21686658ffb8Spbrook 216966b9b43cSPeter Maydell check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ); 21701ec9b909SAvi Kivity switch (size) { 217167364150SMax Filippov case 1: 217279ed0416SPeter Maydell data = address_space_ldub(as, addr, attrs, &res); 217367364150SMax Filippov break; 217467364150SMax Filippov case 2: 217579ed0416SPeter Maydell data = address_space_lduw(as, addr, attrs, &res); 217667364150SMax Filippov break; 217767364150SMax Filippov case 4: 217879ed0416SPeter Maydell data = address_space_ldl(as, addr, attrs, &res); 217967364150SMax Filippov break; 21801ec9b909SAvi Kivity default: abort(); 21811ec9b909SAvi Kivity } 218266b9b43cSPeter Maydell *pdata = data; 218366b9b43cSPeter Maydell return res; 218466b9b43cSPeter Maydell } 218566b9b43cSPeter Maydell 218666b9b43cSPeter Maydell static MemTxResult watch_mem_write(void *opaque, hwaddr addr, 218766b9b43cSPeter Maydell uint64_t val, unsigned size, 218866b9b43cSPeter Maydell MemTxAttrs attrs) 218966b9b43cSPeter Maydell { 219066b9b43cSPeter Maydell MemTxResult res; 219179ed0416SPeter Maydell int asidx = cpu_asidx_from_attrs(current_cpu, attrs); 219279ed0416SPeter Maydell AddressSpace *as = current_cpu->cpu_ases[asidx].as; 219366b9b43cSPeter Maydell 219466b9b43cSPeter Maydell check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE); 219566b9b43cSPeter Maydell switch (size) { 219666b9b43cSPeter Maydell case 1: 219779ed0416SPeter Maydell address_space_stb(as, addr, val, attrs, &res); 219866b9b43cSPeter Maydell break; 219966b9b43cSPeter Maydell case 2: 220079ed0416SPeter Maydell address_space_stw(as, addr, val, attrs, &res); 220166b9b43cSPeter Maydell break; 220266b9b43cSPeter Maydell case 4: 220379ed0416SPeter Maydell address_space_stl(as, addr, val, attrs, &res); 220466b9b43cSPeter Maydell break; 220566b9b43cSPeter Maydell default: abort(); 220666b9b43cSPeter Maydell } 220766b9b43cSPeter Maydell return res; 22086658ffb8Spbrook } 22096658ffb8Spbrook 22101ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = { 221166b9b43cSPeter Maydell .read_with_attrs = watch_mem_read, 221266b9b43cSPeter Maydell .write_with_attrs = watch_mem_write, 22131ec9b909SAvi Kivity .endianness = DEVICE_NATIVE_ENDIAN, 22146658ffb8Spbrook }; 22156658ffb8Spbrook 2216f25a49e0SPeter Maydell static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, 2217f25a49e0SPeter Maydell unsigned len, MemTxAttrs attrs) 2218db7b5426Sblueswir1 { 2219acc9d80bSJan Kiszka subpage_t *subpage = opaque; 2220ff6cff75SPaolo Bonzini uint8_t buf[8]; 22215c9eb028SPeter Maydell MemTxResult res; 2222791af8c8SPaolo Bonzini 2223db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 2224016e9d62SAmos Kong printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, 2225acc9d80bSJan Kiszka subpage, len, addr); 2226db7b5426Sblueswir1 #endif 22275c9eb028SPeter Maydell res = address_space_read(subpage->as, addr + subpage->base, 22285c9eb028SPeter Maydell attrs, buf, len); 22295c9eb028SPeter Maydell if (res) { 22305c9eb028SPeter Maydell return res; 2231f25a49e0SPeter Maydell } 2232acc9d80bSJan Kiszka switch (len) { 2233acc9d80bSJan Kiszka case 1: 2234f25a49e0SPeter Maydell *data = ldub_p(buf); 2235f25a49e0SPeter Maydell return MEMTX_OK; 2236acc9d80bSJan Kiszka case 2: 2237f25a49e0SPeter Maydell *data = lduw_p(buf); 2238f25a49e0SPeter Maydell return MEMTX_OK; 2239acc9d80bSJan Kiszka case 4: 2240f25a49e0SPeter Maydell *data = ldl_p(buf); 2241f25a49e0SPeter Maydell return MEMTX_OK; 2242ff6cff75SPaolo Bonzini case 8: 2243f25a49e0SPeter Maydell *data = ldq_p(buf); 2244f25a49e0SPeter Maydell return MEMTX_OK; 2245acc9d80bSJan Kiszka default: 2246acc9d80bSJan Kiszka abort(); 2247acc9d80bSJan Kiszka } 2248db7b5426Sblueswir1 } 2249db7b5426Sblueswir1 2250f25a49e0SPeter Maydell static MemTxResult subpage_write(void *opaque, hwaddr addr, 2251f25a49e0SPeter Maydell uint64_t value, unsigned len, MemTxAttrs attrs) 2252db7b5426Sblueswir1 { 2253acc9d80bSJan Kiszka subpage_t *subpage = opaque; 2254ff6cff75SPaolo Bonzini uint8_t buf[8]; 2255acc9d80bSJan Kiszka 2256db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 2257016e9d62SAmos Kong printf("%s: subpage %p len %u addr " TARGET_FMT_plx 2258acc9d80bSJan Kiszka " value %"PRIx64"\n", 2259acc9d80bSJan Kiszka __func__, subpage, len, addr, value); 2260db7b5426Sblueswir1 #endif 2261acc9d80bSJan Kiszka switch (len) { 2262acc9d80bSJan Kiszka case 1: 2263acc9d80bSJan Kiszka stb_p(buf, value); 2264acc9d80bSJan Kiszka break; 2265acc9d80bSJan Kiszka case 2: 2266acc9d80bSJan Kiszka stw_p(buf, value); 2267acc9d80bSJan Kiszka break; 2268acc9d80bSJan Kiszka case 4: 2269acc9d80bSJan Kiszka stl_p(buf, value); 2270acc9d80bSJan Kiszka break; 2271ff6cff75SPaolo Bonzini case 8: 2272ff6cff75SPaolo Bonzini stq_p(buf, value); 2273ff6cff75SPaolo Bonzini break; 2274acc9d80bSJan Kiszka default: 2275acc9d80bSJan Kiszka abort(); 2276acc9d80bSJan Kiszka } 22775c9eb028SPeter Maydell return address_space_write(subpage->as, addr + subpage->base, 22785c9eb028SPeter Maydell attrs, buf, len); 2279db7b5426Sblueswir1 } 2280db7b5426Sblueswir1 2281c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr, 2282016e9d62SAmos Kong unsigned len, bool is_write) 2283c353e4ccSPaolo Bonzini { 2284acc9d80bSJan Kiszka subpage_t *subpage = opaque; 2285c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE) 2286016e9d62SAmos Kong printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n", 2287acc9d80bSJan Kiszka __func__, subpage, is_write ? 'w' : 'r', len, addr); 2288c353e4ccSPaolo Bonzini #endif 2289c353e4ccSPaolo Bonzini 2290acc9d80bSJan Kiszka return address_space_access_valid(subpage->as, addr + subpage->base, 2291016e9d62SAmos Kong len, is_write); 2292c353e4ccSPaolo Bonzini } 2293c353e4ccSPaolo Bonzini 229470c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = { 2295f25a49e0SPeter Maydell .read_with_attrs = subpage_read, 2296f25a49e0SPeter Maydell .write_with_attrs = subpage_write, 2297ff6cff75SPaolo Bonzini .impl.min_access_size = 1, 2298ff6cff75SPaolo Bonzini .impl.max_access_size = 8, 2299ff6cff75SPaolo Bonzini .valid.min_access_size = 1, 2300ff6cff75SPaolo Bonzini .valid.max_access_size = 8, 2301c353e4ccSPaolo Bonzini .valid.accepts = subpage_accepts, 230270c68e44SAvi Kivity .endianness = DEVICE_NATIVE_ENDIAN, 2303db7b5426Sblueswir1 }; 2304db7b5426Sblueswir1 2305c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 23065312bd8bSAvi Kivity uint16_t section) 2307db7b5426Sblueswir1 { 2308db7b5426Sblueswir1 int idx, eidx; 2309db7b5426Sblueswir1 2310db7b5426Sblueswir1 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) 2311db7b5426Sblueswir1 return -1; 2312db7b5426Sblueswir1 idx = SUBPAGE_IDX(start); 2313db7b5426Sblueswir1 eidx = SUBPAGE_IDX(end); 2314db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 2315016e9d62SAmos Kong printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", 2316016e9d62SAmos Kong __func__, mmio, start, end, idx, eidx, section); 2317db7b5426Sblueswir1 #endif 2318db7b5426Sblueswir1 for (; idx <= eidx; idx++) { 23195312bd8bSAvi Kivity mmio->sub_section[idx] = section; 2320db7b5426Sblueswir1 } 2321db7b5426Sblueswir1 2322db7b5426Sblueswir1 return 0; 2323db7b5426Sblueswir1 } 2324db7b5426Sblueswir1 2325acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base) 2326db7b5426Sblueswir1 { 2327c227f099SAnthony Liguori subpage_t *mmio; 2328db7b5426Sblueswir1 23292615fabdSVijaya Kumar K mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); 2330acc9d80bSJan Kiszka mmio->as = as; 2331db7b5426Sblueswir1 mmio->base = base; 23322c9b15caSPaolo Bonzini memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, 2333b4fefef9SPeter Crosthwaite NULL, TARGET_PAGE_SIZE); 2334b3b00c78SAvi Kivity mmio->iomem.subpage = true; 2335db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 2336016e9d62SAmos Kong printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__, 2337016e9d62SAmos Kong mmio, base, TARGET_PAGE_SIZE); 2338db7b5426Sblueswir1 #endif 2339b41aac4fSLiu Ping Fan subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED); 2340db7b5426Sblueswir1 2341db7b5426Sblueswir1 return mmio; 2342db7b5426Sblueswir1 } 2343db7b5426Sblueswir1 2344a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as, 2345a656e22fSPeter Crosthwaite MemoryRegion *mr) 23465312bd8bSAvi Kivity { 2347a656e22fSPeter Crosthwaite assert(as); 23485312bd8bSAvi Kivity MemoryRegionSection section = { 2349a656e22fSPeter Crosthwaite .address_space = as, 23505312bd8bSAvi Kivity .mr = mr, 23515312bd8bSAvi Kivity .offset_within_address_space = 0, 23525312bd8bSAvi Kivity .offset_within_region = 0, 2353052e87b0SPaolo Bonzini .size = int128_2_64(), 23545312bd8bSAvi Kivity }; 23555312bd8bSAvi Kivity 235653cb28cbSMarcel Apfelbaum return phys_section_add(map, §ion); 23575312bd8bSAvi Kivity } 23585312bd8bSAvi Kivity 2359a54c87b6SPeter Maydell MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs) 2360aa102231SAvi Kivity { 2361a54c87b6SPeter Maydell int asidx = cpu_asidx_from_attrs(cpu, attrs); 2362a54c87b6SPeter Maydell CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; 236332857f4dSPeter Maydell AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch); 236479e2b9aeSPaolo Bonzini MemoryRegionSection *sections = d->map.sections; 23659d82b5a7SPaolo Bonzini 23669d82b5a7SPaolo Bonzini return sections[index & ~TARGET_PAGE_MASK].mr; 2367aa102231SAvi Kivity } 2368aa102231SAvi Kivity 2369e9179ce1SAvi Kivity static void io_mem_init(void) 2370e9179ce1SAvi Kivity { 23711f6245e5SPaolo Bonzini memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX); 23722c9b15caSPaolo Bonzini memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, 23731f6245e5SPaolo Bonzini NULL, UINT64_MAX); 23742c9b15caSPaolo Bonzini memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, 23751f6245e5SPaolo Bonzini NULL, UINT64_MAX); 23762c9b15caSPaolo Bonzini memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL, 23771f6245e5SPaolo Bonzini NULL, UINT64_MAX); 2378e9179ce1SAvi Kivity } 2379e9179ce1SAvi Kivity 2380ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener) 2381ac1970fbSAvi Kivity { 238289ae337aSPaolo Bonzini AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); 238353cb28cbSMarcel Apfelbaum AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); 238453cb28cbSMarcel Apfelbaum uint16_t n; 238553cb28cbSMarcel Apfelbaum 2386a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_unassigned); 238753cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_UNASSIGNED); 2388a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_notdirty); 238953cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_NOTDIRTY); 2390a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_rom); 239153cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_ROM); 2392a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_watch); 239353cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_WATCH); 239400752703SPaolo Bonzini 23959736e55bSMichael S. Tsirkin d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; 239600752703SPaolo Bonzini d->as = as; 239700752703SPaolo Bonzini as->next_dispatch = d; 239800752703SPaolo Bonzini } 239900752703SPaolo Bonzini 240079e2b9aeSPaolo Bonzini static void address_space_dispatch_free(AddressSpaceDispatch *d) 240179e2b9aeSPaolo Bonzini { 240279e2b9aeSPaolo Bonzini phys_sections_free(&d->map); 240379e2b9aeSPaolo Bonzini g_free(d); 240479e2b9aeSPaolo Bonzini } 240579e2b9aeSPaolo Bonzini 240600752703SPaolo Bonzini static void mem_commit(MemoryListener *listener) 240700752703SPaolo Bonzini { 240800752703SPaolo Bonzini AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); 24090475d94fSPaolo Bonzini AddressSpaceDispatch *cur = as->dispatch; 24100475d94fSPaolo Bonzini AddressSpaceDispatch *next = as->next_dispatch; 2411ac1970fbSAvi Kivity 241253cb28cbSMarcel Apfelbaum phys_page_compact_all(next, next->map.nodes_nb); 2413b35ba30fSMichael S. Tsirkin 241479e2b9aeSPaolo Bonzini atomic_rcu_set(&as->dispatch, next); 241553cb28cbSMarcel Apfelbaum if (cur) { 241679e2b9aeSPaolo Bonzini call_rcu(cur, address_space_dispatch_free, rcu); 2417ac1970fbSAvi Kivity } 24189affd6fcSPaolo Bonzini } 24199affd6fcSPaolo Bonzini 24201d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener) 242150c1e149SAvi Kivity { 242232857f4dSPeter Maydell CPUAddressSpace *cpuas; 242332857f4dSPeter Maydell AddressSpaceDispatch *d; 2424117712c3SAvi Kivity 2425117712c3SAvi Kivity /* since each CPU stores ram addresses in its TLB cache, we must 2426117712c3SAvi Kivity reset the modified entries */ 242732857f4dSPeter Maydell cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 242832857f4dSPeter Maydell cpu_reloading_memory_map(); 242932857f4dSPeter Maydell /* The CPU and TLB are protected by the iothread lock. 243032857f4dSPeter Maydell * We reload the dispatch pointer now because cpu_reloading_memory_map() 243132857f4dSPeter Maydell * may have split the RCU critical section. 243232857f4dSPeter Maydell */ 243332857f4dSPeter Maydell d = atomic_rcu_read(&cpuas->as->dispatch); 2434f35e44e7SAlex Bennée atomic_rcu_set(&cpuas->memory_dispatch, d); 2435d10eb08fSAlex Bennée tlb_flush(cpuas->cpu); 243650c1e149SAvi Kivity } 243750c1e149SAvi Kivity 2438ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as) 2439ac1970fbSAvi Kivity { 244000752703SPaolo Bonzini as->dispatch = NULL; 244189ae337aSPaolo Bonzini as->dispatch_listener = (MemoryListener) { 2442ac1970fbSAvi Kivity .begin = mem_begin, 244300752703SPaolo Bonzini .commit = mem_commit, 2444ac1970fbSAvi Kivity .region_add = mem_add, 2445ac1970fbSAvi Kivity .region_nop = mem_add, 2446ac1970fbSAvi Kivity .priority = 0, 2447ac1970fbSAvi Kivity }; 244889ae337aSPaolo Bonzini memory_listener_register(&as->dispatch_listener, as); 2449ac1970fbSAvi Kivity } 2450ac1970fbSAvi Kivity 24516e48e8f9SPaolo Bonzini void address_space_unregister(AddressSpace *as) 24526e48e8f9SPaolo Bonzini { 24536e48e8f9SPaolo Bonzini memory_listener_unregister(&as->dispatch_listener); 24546e48e8f9SPaolo Bonzini } 24556e48e8f9SPaolo Bonzini 245683f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as) 245783f3c251SAvi Kivity { 245883f3c251SAvi Kivity AddressSpaceDispatch *d = as->dispatch; 245983f3c251SAvi Kivity 246079e2b9aeSPaolo Bonzini atomic_rcu_set(&as->dispatch, NULL); 246179e2b9aeSPaolo Bonzini if (d) { 246279e2b9aeSPaolo Bonzini call_rcu(d, address_space_dispatch_free, rcu); 246379e2b9aeSPaolo Bonzini } 246483f3c251SAvi Kivity } 246583f3c251SAvi Kivity 246662152b8aSAvi Kivity static void memory_map_init(void) 246762152b8aSAvi Kivity { 24687267c094SAnthony Liguori system_memory = g_malloc(sizeof(*system_memory)); 246903f49957SPaolo Bonzini 247057271d63SPaolo Bonzini memory_region_init(system_memory, NULL, "system", UINT64_MAX); 24717dca8043SAlexey Kardashevskiy address_space_init(&address_space_memory, system_memory, "memory"); 2472309cb471SAvi Kivity 24737267c094SAnthony Liguori system_io = g_malloc(sizeof(*system_io)); 24743bb28b72SJan Kiszka memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", 24753bb28b72SJan Kiszka 65536); 24767dca8043SAlexey Kardashevskiy address_space_init(&address_space_io, system_io, "I/O"); 24772641689aSliguang } 247862152b8aSAvi Kivity 247962152b8aSAvi Kivity MemoryRegion *get_system_memory(void) 248062152b8aSAvi Kivity { 248162152b8aSAvi Kivity return system_memory; 248262152b8aSAvi Kivity } 248362152b8aSAvi Kivity 2484309cb471SAvi Kivity MemoryRegion *get_system_io(void) 2485309cb471SAvi Kivity { 2486309cb471SAvi Kivity return system_io; 2487309cb471SAvi Kivity } 2488309cb471SAvi Kivity 2489e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */ 2490e2eef170Spbrook 249113eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */ 249213eb76e0Sbellard #if defined(CONFIG_USER_ONLY) 2493f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, 2494a68fe89cSPaul Brook uint8_t *buf, int len, int is_write) 249513eb76e0Sbellard { 249613eb76e0Sbellard int l, flags; 249713eb76e0Sbellard target_ulong page; 249853a5960aSpbrook void * p; 249913eb76e0Sbellard 250013eb76e0Sbellard while (len > 0) { 250113eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 250213eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 250313eb76e0Sbellard if (l > len) 250413eb76e0Sbellard l = len; 250513eb76e0Sbellard flags = page_get_flags(page); 250613eb76e0Sbellard if (!(flags & PAGE_VALID)) 2507a68fe89cSPaul Brook return -1; 250813eb76e0Sbellard if (is_write) { 250913eb76e0Sbellard if (!(flags & PAGE_WRITE)) 2510a68fe89cSPaul Brook return -1; 2511579a97f7Sbellard /* XXX: this code should not depend on lock_user */ 251272fb7daaSaurel32 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) 2513a68fe89cSPaul Brook return -1; 251472fb7daaSaurel32 memcpy(p, buf, l); 251572fb7daaSaurel32 unlock_user(p, addr, l); 251613eb76e0Sbellard } else { 251713eb76e0Sbellard if (!(flags & PAGE_READ)) 2518a68fe89cSPaul Brook return -1; 2519579a97f7Sbellard /* XXX: this code should not depend on lock_user */ 252072fb7daaSaurel32 if (!(p = lock_user(VERIFY_READ, addr, l, 1))) 2521a68fe89cSPaul Brook return -1; 252272fb7daaSaurel32 memcpy(buf, p, l); 25235b257578Saurel32 unlock_user(p, addr, 0); 252413eb76e0Sbellard } 252513eb76e0Sbellard len -= l; 252613eb76e0Sbellard buf += l; 252713eb76e0Sbellard addr += l; 252813eb76e0Sbellard } 2529a68fe89cSPaul Brook return 0; 253013eb76e0Sbellard } 25318df1cd07Sbellard 253213eb76e0Sbellard #else 253351d7a9ebSAnthony PERARD 2534845b6214SPaolo Bonzini static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, 2535a8170e5eSAvi Kivity hwaddr length) 253651d7a9ebSAnthony PERARD { 2537845b6214SPaolo Bonzini uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); 25380878d0e1SPaolo Bonzini addr += memory_region_get_ram_addr(mr); 25390878d0e1SPaolo Bonzini 2540e87f7778SPaolo Bonzini /* No early return if dirty_log_mask is or becomes 0, because 2541e87f7778SPaolo Bonzini * cpu_physical_memory_set_dirty_range will still call 2542e87f7778SPaolo Bonzini * xen_modified_memory. 2543e87f7778SPaolo Bonzini */ 2544e87f7778SPaolo Bonzini if (dirty_log_mask) { 2545e87f7778SPaolo Bonzini dirty_log_mask = 2546e87f7778SPaolo Bonzini cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); 2547e87f7778SPaolo Bonzini } 2548845b6214SPaolo Bonzini if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { 2549ba051fb5SAlex Bennée tb_lock(); 255035865339SPaolo Bonzini tb_invalidate_phys_range(addr, addr + length); 2551ba051fb5SAlex Bennée tb_unlock(); 2552845b6214SPaolo Bonzini dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); 2553845b6214SPaolo Bonzini } 255458d2707eSPaolo Bonzini cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); 255549dfcec4SPaolo Bonzini } 255651d7a9ebSAnthony PERARD 255723326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) 255882f2563fSPaolo Bonzini { 2559e1622f4bSPaolo Bonzini unsigned access_size_max = mr->ops->valid.max_access_size; 256023326164SRichard Henderson 256123326164SRichard Henderson /* Regions are assumed to support 1-4 byte accesses unless 256223326164SRichard Henderson otherwise specified. */ 256323326164SRichard Henderson if (access_size_max == 0) { 256423326164SRichard Henderson access_size_max = 4; 256582f2563fSPaolo Bonzini } 256623326164SRichard Henderson 256723326164SRichard Henderson /* Bound the maximum access by the alignment of the address. */ 256823326164SRichard Henderson if (!mr->ops->impl.unaligned) { 256923326164SRichard Henderson unsigned align_size_max = addr & -addr; 257023326164SRichard Henderson if (align_size_max != 0 && align_size_max < access_size_max) { 257123326164SRichard Henderson access_size_max = align_size_max; 257223326164SRichard Henderson } 257323326164SRichard Henderson } 257423326164SRichard Henderson 257523326164SRichard Henderson /* Don't attempt accesses larger than the maximum. */ 257623326164SRichard Henderson if (l > access_size_max) { 257723326164SRichard Henderson l = access_size_max; 257823326164SRichard Henderson } 25796554f5c0SPeter Maydell l = pow2floor(l); 258023326164SRichard Henderson 258123326164SRichard Henderson return l; 258282f2563fSPaolo Bonzini } 258382f2563fSPaolo Bonzini 25844840f10eSJan Kiszka static bool prepare_mmio_access(MemoryRegion *mr) 2585125b3806SPaolo Bonzini { 25864840f10eSJan Kiszka bool unlocked = !qemu_mutex_iothread_locked(); 25874840f10eSJan Kiszka bool release_lock = false; 25884840f10eSJan Kiszka 25894840f10eSJan Kiszka if (unlocked && mr->global_locking) { 25904840f10eSJan Kiszka qemu_mutex_lock_iothread(); 25914840f10eSJan Kiszka unlocked = false; 25924840f10eSJan Kiszka release_lock = true; 2593125b3806SPaolo Bonzini } 25944840f10eSJan Kiszka if (mr->flush_coalesced_mmio) { 25954840f10eSJan Kiszka if (unlocked) { 25964840f10eSJan Kiszka qemu_mutex_lock_iothread(); 25974840f10eSJan Kiszka } 25984840f10eSJan Kiszka qemu_flush_coalesced_mmio_buffer(); 25994840f10eSJan Kiszka if (unlocked) { 26004840f10eSJan Kiszka qemu_mutex_unlock_iothread(); 26014840f10eSJan Kiszka } 26024840f10eSJan Kiszka } 26034840f10eSJan Kiszka 26044840f10eSJan Kiszka return release_lock; 2605125b3806SPaolo Bonzini } 2606125b3806SPaolo Bonzini 2607a203ac70SPaolo Bonzini /* Called within RCU critical section. */ 2608a203ac70SPaolo Bonzini static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr, 2609a203ac70SPaolo Bonzini MemTxAttrs attrs, 2610a203ac70SPaolo Bonzini const uint8_t *buf, 2611a203ac70SPaolo Bonzini int len, hwaddr addr1, 2612a203ac70SPaolo Bonzini hwaddr l, MemoryRegion *mr) 261313eb76e0Sbellard { 261413eb76e0Sbellard uint8_t *ptr; 2615791af8c8SPaolo Bonzini uint64_t val; 26163b643495SPeter Maydell MemTxResult result = MEMTX_OK; 26174840f10eSJan Kiszka bool release_lock = false; 261813eb76e0Sbellard 2619a203ac70SPaolo Bonzini for (;;) { 2620eb7eeb88SPaolo Bonzini if (!memory_access_is_direct(mr, true)) { 26214840f10eSJan Kiszka release_lock |= prepare_mmio_access(mr); 26225c8a00ceSPaolo Bonzini l = memory_access_size(mr, l, addr1); 26234917cf44SAndreas Färber /* XXX: could force current_cpu to NULL to avoid 26246a00d601Sbellard potential bugs */ 262523326164SRichard Henderson switch (l) { 262623326164SRichard Henderson case 8: 262723326164SRichard Henderson /* 64 bit write access */ 262823326164SRichard Henderson val = ldq_p(buf); 26293b643495SPeter Maydell result |= memory_region_dispatch_write(mr, addr1, val, 8, 26303b643495SPeter Maydell attrs); 263123326164SRichard Henderson break; 263223326164SRichard Henderson case 4: 26331c213d19Sbellard /* 32 bit write access */ 26346da67de6SLadi Prosek val = (uint32_t)ldl_p(buf); 26353b643495SPeter Maydell result |= memory_region_dispatch_write(mr, addr1, val, 4, 26363b643495SPeter Maydell attrs); 263723326164SRichard Henderson break; 263823326164SRichard Henderson case 2: 26391c213d19Sbellard /* 16 bit write access */ 2640c27004ecSbellard val = lduw_p(buf); 26413b643495SPeter Maydell result |= memory_region_dispatch_write(mr, addr1, val, 2, 26423b643495SPeter Maydell attrs); 264323326164SRichard Henderson break; 264423326164SRichard Henderson case 1: 26451c213d19Sbellard /* 8 bit write access */ 2646c27004ecSbellard val = ldub_p(buf); 26473b643495SPeter Maydell result |= memory_region_dispatch_write(mr, addr1, val, 1, 26483b643495SPeter Maydell attrs); 264923326164SRichard Henderson break; 265023326164SRichard Henderson default: 265123326164SRichard Henderson abort(); 265213eb76e0Sbellard } 26532bbfa05dSPaolo Bonzini } else { 265413eb76e0Sbellard /* RAM case */ 26550878d0e1SPaolo Bonzini ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 265613eb76e0Sbellard memcpy(ptr, buf, l); 2657845b6214SPaolo Bonzini invalidate_and_set_dirty(mr, addr1, l); 26583a7d929eSbellard } 2659eb7eeb88SPaolo Bonzini 2660eb7eeb88SPaolo Bonzini if (release_lock) { 2661eb7eeb88SPaolo Bonzini qemu_mutex_unlock_iothread(); 2662eb7eeb88SPaolo Bonzini release_lock = false; 2663eb7eeb88SPaolo Bonzini } 2664eb7eeb88SPaolo Bonzini 2665eb7eeb88SPaolo Bonzini len -= l; 2666eb7eeb88SPaolo Bonzini buf += l; 2667eb7eeb88SPaolo Bonzini addr += l; 2668a203ac70SPaolo Bonzini 2669a203ac70SPaolo Bonzini if (!len) { 2670a203ac70SPaolo Bonzini break; 2671eb7eeb88SPaolo Bonzini } 2672a203ac70SPaolo Bonzini 2673a203ac70SPaolo Bonzini l = len; 2674a203ac70SPaolo Bonzini mr = address_space_translate(as, addr, &addr1, &l, true); 2675a203ac70SPaolo Bonzini } 2676eb7eeb88SPaolo Bonzini 2677eb7eeb88SPaolo Bonzini return result; 2678eb7eeb88SPaolo Bonzini } 2679eb7eeb88SPaolo Bonzini 2680a203ac70SPaolo Bonzini MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, 2681a203ac70SPaolo Bonzini const uint8_t *buf, int len) 2682eb7eeb88SPaolo Bonzini { 2683eb7eeb88SPaolo Bonzini hwaddr l; 2684eb7eeb88SPaolo Bonzini hwaddr addr1; 2685eb7eeb88SPaolo Bonzini MemoryRegion *mr; 2686eb7eeb88SPaolo Bonzini MemTxResult result = MEMTX_OK; 2687a203ac70SPaolo Bonzini 2688a203ac70SPaolo Bonzini if (len > 0) { 2689a203ac70SPaolo Bonzini rcu_read_lock(); 2690a203ac70SPaolo Bonzini l = len; 2691a203ac70SPaolo Bonzini mr = address_space_translate(as, addr, &addr1, &l, true); 2692a203ac70SPaolo Bonzini result = address_space_write_continue(as, addr, attrs, buf, len, 2693a203ac70SPaolo Bonzini addr1, l, mr); 2694a203ac70SPaolo Bonzini rcu_read_unlock(); 2695a203ac70SPaolo Bonzini } 2696a203ac70SPaolo Bonzini 2697a203ac70SPaolo Bonzini return result; 2698a203ac70SPaolo Bonzini } 2699a203ac70SPaolo Bonzini 2700a203ac70SPaolo Bonzini /* Called within RCU critical section. */ 2701a203ac70SPaolo Bonzini MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr, 2702a203ac70SPaolo Bonzini MemTxAttrs attrs, uint8_t *buf, 2703a203ac70SPaolo Bonzini int len, hwaddr addr1, hwaddr l, 2704a203ac70SPaolo Bonzini MemoryRegion *mr) 2705a203ac70SPaolo Bonzini { 2706a203ac70SPaolo Bonzini uint8_t *ptr; 2707a203ac70SPaolo Bonzini uint64_t val; 2708a203ac70SPaolo Bonzini MemTxResult result = MEMTX_OK; 2709eb7eeb88SPaolo Bonzini bool release_lock = false; 2710eb7eeb88SPaolo Bonzini 2711a203ac70SPaolo Bonzini for (;;) { 2712eb7eeb88SPaolo Bonzini if (!memory_access_is_direct(mr, false)) { 271313eb76e0Sbellard /* I/O case */ 27144840f10eSJan Kiszka release_lock |= prepare_mmio_access(mr); 27155c8a00ceSPaolo Bonzini l = memory_access_size(mr, l, addr1); 271623326164SRichard Henderson switch (l) { 271723326164SRichard Henderson case 8: 271823326164SRichard Henderson /* 64 bit read access */ 27193b643495SPeter Maydell result |= memory_region_dispatch_read(mr, addr1, &val, 8, 27203b643495SPeter Maydell attrs); 272123326164SRichard Henderson stq_p(buf, val); 272223326164SRichard Henderson break; 272323326164SRichard Henderson case 4: 272413eb76e0Sbellard /* 32 bit read access */ 27253b643495SPeter Maydell result |= memory_region_dispatch_read(mr, addr1, &val, 4, 27263b643495SPeter Maydell attrs); 2727c27004ecSbellard stl_p(buf, val); 272823326164SRichard Henderson break; 272923326164SRichard Henderson case 2: 273013eb76e0Sbellard /* 16 bit read access */ 27313b643495SPeter Maydell result |= memory_region_dispatch_read(mr, addr1, &val, 2, 27323b643495SPeter Maydell attrs); 2733c27004ecSbellard stw_p(buf, val); 273423326164SRichard Henderson break; 273523326164SRichard Henderson case 1: 27361c213d19Sbellard /* 8 bit read access */ 27373b643495SPeter Maydell result |= memory_region_dispatch_read(mr, addr1, &val, 1, 27383b643495SPeter Maydell attrs); 2739c27004ecSbellard stb_p(buf, val); 274023326164SRichard Henderson break; 274123326164SRichard Henderson default: 274223326164SRichard Henderson abort(); 274313eb76e0Sbellard } 274413eb76e0Sbellard } else { 274513eb76e0Sbellard /* RAM case */ 27460878d0e1SPaolo Bonzini ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 2747f3705d53SAvi Kivity memcpy(buf, ptr, l); 274813eb76e0Sbellard } 27494840f10eSJan Kiszka 27504840f10eSJan Kiszka if (release_lock) { 27514840f10eSJan Kiszka qemu_mutex_unlock_iothread(); 27524840f10eSJan Kiszka release_lock = false; 27534840f10eSJan Kiszka } 27544840f10eSJan Kiszka 275513eb76e0Sbellard len -= l; 275613eb76e0Sbellard buf += l; 275713eb76e0Sbellard addr += l; 2758a203ac70SPaolo Bonzini 2759a203ac70SPaolo Bonzini if (!len) { 2760a203ac70SPaolo Bonzini break; 276113eb76e0Sbellard } 2762a203ac70SPaolo Bonzini 2763a203ac70SPaolo Bonzini l = len; 2764a203ac70SPaolo Bonzini mr = address_space_translate(as, addr, &addr1, &l, false); 2765a203ac70SPaolo Bonzini } 2766a203ac70SPaolo Bonzini 2767a203ac70SPaolo Bonzini return result; 2768a203ac70SPaolo Bonzini } 2769a203ac70SPaolo Bonzini 27703cc8f884SPaolo Bonzini MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 27713cc8f884SPaolo Bonzini MemTxAttrs attrs, uint8_t *buf, int len) 2772a203ac70SPaolo Bonzini { 2773a203ac70SPaolo Bonzini hwaddr l; 2774a203ac70SPaolo Bonzini hwaddr addr1; 2775a203ac70SPaolo Bonzini MemoryRegion *mr; 2776a203ac70SPaolo Bonzini MemTxResult result = MEMTX_OK; 2777a203ac70SPaolo Bonzini 2778a203ac70SPaolo Bonzini if (len > 0) { 2779a203ac70SPaolo Bonzini rcu_read_lock(); 2780a203ac70SPaolo Bonzini l = len; 2781a203ac70SPaolo Bonzini mr = address_space_translate(as, addr, &addr1, &l, false); 2782a203ac70SPaolo Bonzini result = address_space_read_continue(as, addr, attrs, buf, len, 2783a203ac70SPaolo Bonzini addr1, l, mr); 278441063e1eSPaolo Bonzini rcu_read_unlock(); 2785a203ac70SPaolo Bonzini } 2786fd8aaa76SPaolo Bonzini 27873b643495SPeter Maydell return result; 278813eb76e0Sbellard } 27898df1cd07Sbellard 2790eb7eeb88SPaolo Bonzini MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, 2791eb7eeb88SPaolo Bonzini uint8_t *buf, int len, bool is_write) 2792ac1970fbSAvi Kivity { 2793eb7eeb88SPaolo Bonzini if (is_write) { 2794eb7eeb88SPaolo Bonzini return address_space_write(as, addr, attrs, (uint8_t *)buf, len); 2795eb7eeb88SPaolo Bonzini } else { 2796eb7eeb88SPaolo Bonzini return address_space_read(as, addr, attrs, (uint8_t *)buf, len); 2797ac1970fbSAvi Kivity } 2798ac1970fbSAvi Kivity } 2799ac1970fbSAvi Kivity 2800a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, 2801ac1970fbSAvi Kivity int len, int is_write) 2802ac1970fbSAvi Kivity { 28035c9eb028SPeter Maydell address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, 28045c9eb028SPeter Maydell buf, len, is_write); 2805ac1970fbSAvi Kivity } 2806ac1970fbSAvi Kivity 2807582b55a9SAlexander Graf enum write_rom_type { 2808582b55a9SAlexander Graf WRITE_DATA, 2809582b55a9SAlexander Graf FLUSH_CACHE, 2810582b55a9SAlexander Graf }; 2811582b55a9SAlexander Graf 28122a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as, 2813582b55a9SAlexander Graf hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type) 2814d0ecd2aaSbellard { 2815149f54b5SPaolo Bonzini hwaddr l; 2816d0ecd2aaSbellard uint8_t *ptr; 2817149f54b5SPaolo Bonzini hwaddr addr1; 28185c8a00ceSPaolo Bonzini MemoryRegion *mr; 2819d0ecd2aaSbellard 282041063e1eSPaolo Bonzini rcu_read_lock(); 2821d0ecd2aaSbellard while (len > 0) { 2822d0ecd2aaSbellard l = len; 28232a221651SEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, true); 2824d0ecd2aaSbellard 28255c8a00ceSPaolo Bonzini if (!(memory_region_is_ram(mr) || 28265c8a00ceSPaolo Bonzini memory_region_is_romd(mr))) { 2827b242e0e0SPaolo Bonzini l = memory_access_size(mr, l, addr1); 2828d0ecd2aaSbellard } else { 2829d0ecd2aaSbellard /* ROM/RAM case */ 28300878d0e1SPaolo Bonzini ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 2831582b55a9SAlexander Graf switch (type) { 2832582b55a9SAlexander Graf case WRITE_DATA: 2833d0ecd2aaSbellard memcpy(ptr, buf, l); 2834845b6214SPaolo Bonzini invalidate_and_set_dirty(mr, addr1, l); 2835582b55a9SAlexander Graf break; 2836582b55a9SAlexander Graf case FLUSH_CACHE: 2837582b55a9SAlexander Graf flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l); 2838582b55a9SAlexander Graf break; 2839582b55a9SAlexander Graf } 2840d0ecd2aaSbellard } 2841d0ecd2aaSbellard len -= l; 2842d0ecd2aaSbellard buf += l; 2843d0ecd2aaSbellard addr += l; 2844d0ecd2aaSbellard } 284541063e1eSPaolo Bonzini rcu_read_unlock(); 2846d0ecd2aaSbellard } 2847d0ecd2aaSbellard 2848582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */ 28492a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr, 2850582b55a9SAlexander Graf const uint8_t *buf, int len) 2851582b55a9SAlexander Graf { 28522a221651SEdgar E. Iglesias cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA); 2853582b55a9SAlexander Graf } 2854582b55a9SAlexander Graf 2855582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len) 2856582b55a9SAlexander Graf { 2857582b55a9SAlexander Graf /* 2858582b55a9SAlexander Graf * This function should do the same thing as an icache flush that was 2859582b55a9SAlexander Graf * triggered from within the guest. For TCG we are always cache coherent, 2860582b55a9SAlexander Graf * so there is no need to flush anything. For KVM / Xen we need to flush 2861582b55a9SAlexander Graf * the host's instruction cache at least. 2862582b55a9SAlexander Graf */ 2863582b55a9SAlexander Graf if (tcg_enabled()) { 2864582b55a9SAlexander Graf return; 2865582b55a9SAlexander Graf } 2866582b55a9SAlexander Graf 28672a221651SEdgar E. Iglesias cpu_physical_memory_write_rom_internal(&address_space_memory, 28682a221651SEdgar E. Iglesias start, NULL, len, FLUSH_CACHE); 2869582b55a9SAlexander Graf } 2870582b55a9SAlexander Graf 28716d16c2f8Saliguori typedef struct { 2872d3e71559SPaolo Bonzini MemoryRegion *mr; 28736d16c2f8Saliguori void *buffer; 2874a8170e5eSAvi Kivity hwaddr addr; 2875a8170e5eSAvi Kivity hwaddr len; 2876c2cba0ffSFam Zheng bool in_use; 28776d16c2f8Saliguori } BounceBuffer; 28786d16c2f8Saliguori 28796d16c2f8Saliguori static BounceBuffer bounce; 28806d16c2f8Saliguori 2881ba223c29Saliguori typedef struct MapClient { 2882e95205e1SFam Zheng QEMUBH *bh; 288372cf2d4fSBlue Swirl QLIST_ENTRY(MapClient) link; 2884ba223c29Saliguori } MapClient; 2885ba223c29Saliguori 288638e047b5SFam Zheng QemuMutex map_client_list_lock; 288772cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list 288872cf2d4fSBlue Swirl = QLIST_HEAD_INITIALIZER(map_client_list); 2889ba223c29Saliguori 2890e95205e1SFam Zheng static void cpu_unregister_map_client_do(MapClient *client) 2891ba223c29Saliguori { 289272cf2d4fSBlue Swirl QLIST_REMOVE(client, link); 28937267c094SAnthony Liguori g_free(client); 2894ba223c29Saliguori } 2895ba223c29Saliguori 289633b6c2edSFam Zheng static void cpu_notify_map_clients_locked(void) 2897ba223c29Saliguori { 2898ba223c29Saliguori MapClient *client; 2899ba223c29Saliguori 290072cf2d4fSBlue Swirl while (!QLIST_EMPTY(&map_client_list)) { 290172cf2d4fSBlue Swirl client = QLIST_FIRST(&map_client_list); 2902e95205e1SFam Zheng qemu_bh_schedule(client->bh); 2903e95205e1SFam Zheng cpu_unregister_map_client_do(client); 2904ba223c29Saliguori } 2905ba223c29Saliguori } 2906ba223c29Saliguori 2907e95205e1SFam Zheng void cpu_register_map_client(QEMUBH *bh) 2908d0ecd2aaSbellard { 2909d0ecd2aaSbellard MapClient *client = g_malloc(sizeof(*client)); 2910d0ecd2aaSbellard 291138e047b5SFam Zheng qemu_mutex_lock(&map_client_list_lock); 2912e95205e1SFam Zheng client->bh = bh; 2913d0ecd2aaSbellard QLIST_INSERT_HEAD(&map_client_list, client, link); 291433b6c2edSFam Zheng if (!atomic_read(&bounce.in_use)) { 291533b6c2edSFam Zheng cpu_notify_map_clients_locked(); 291633b6c2edSFam Zheng } 291738e047b5SFam Zheng qemu_mutex_unlock(&map_client_list_lock); 2918d0ecd2aaSbellard } 2919d0ecd2aaSbellard 292038e047b5SFam Zheng void cpu_exec_init_all(void) 292138e047b5SFam Zheng { 292238e047b5SFam Zheng qemu_mutex_init(&ram_list.mutex); 292320bccb82SPeter Maydell /* The data structures we set up here depend on knowing the page size, 292420bccb82SPeter Maydell * so no more changes can be made after this point. 292520bccb82SPeter Maydell * In an ideal world, nothing we did before we had finished the 292620bccb82SPeter Maydell * machine setup would care about the target page size, and we could 292720bccb82SPeter Maydell * do this much later, rather than requiring board models to state 292820bccb82SPeter Maydell * up front what their requirements are. 292920bccb82SPeter Maydell */ 293020bccb82SPeter Maydell finalize_target_page_bits(); 293138e047b5SFam Zheng io_mem_init(); 2932680a4783SPaolo Bonzini memory_map_init(); 293338e047b5SFam Zheng qemu_mutex_init(&map_client_list_lock); 293438e047b5SFam Zheng } 293538e047b5SFam Zheng 2936e95205e1SFam Zheng void cpu_unregister_map_client(QEMUBH *bh) 2937d0ecd2aaSbellard { 2938e95205e1SFam Zheng MapClient *client; 2939d0ecd2aaSbellard 2940e95205e1SFam Zheng qemu_mutex_lock(&map_client_list_lock); 2941e95205e1SFam Zheng QLIST_FOREACH(client, &map_client_list, link) { 2942e95205e1SFam Zheng if (client->bh == bh) { 2943e95205e1SFam Zheng cpu_unregister_map_client_do(client); 2944e95205e1SFam Zheng break; 2945e95205e1SFam Zheng } 2946e95205e1SFam Zheng } 2947e95205e1SFam Zheng qemu_mutex_unlock(&map_client_list_lock); 2948d0ecd2aaSbellard } 2949d0ecd2aaSbellard 2950d0ecd2aaSbellard static void cpu_notify_map_clients(void) 2951d0ecd2aaSbellard { 295238e047b5SFam Zheng qemu_mutex_lock(&map_client_list_lock); 295333b6c2edSFam Zheng cpu_notify_map_clients_locked(); 295438e047b5SFam Zheng qemu_mutex_unlock(&map_client_list_lock); 29556d16c2f8Saliguori } 29566d16c2f8Saliguori 295751644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) 295851644ab7SPaolo Bonzini { 29595c8a00ceSPaolo Bonzini MemoryRegion *mr; 296051644ab7SPaolo Bonzini hwaddr l, xlat; 296151644ab7SPaolo Bonzini 296241063e1eSPaolo Bonzini rcu_read_lock(); 296351644ab7SPaolo Bonzini while (len > 0) { 296451644ab7SPaolo Bonzini l = len; 29655c8a00ceSPaolo Bonzini mr = address_space_translate(as, addr, &xlat, &l, is_write); 29665c8a00ceSPaolo Bonzini if (!memory_access_is_direct(mr, is_write)) { 29675c8a00ceSPaolo Bonzini l = memory_access_size(mr, l, addr); 29685c8a00ceSPaolo Bonzini if (!memory_region_access_valid(mr, xlat, l, is_write)) { 29695ad4a2b7SRoman Kapl rcu_read_unlock(); 297051644ab7SPaolo Bonzini return false; 297151644ab7SPaolo Bonzini } 297251644ab7SPaolo Bonzini } 297351644ab7SPaolo Bonzini 297451644ab7SPaolo Bonzini len -= l; 297551644ab7SPaolo Bonzini addr += l; 297651644ab7SPaolo Bonzini } 297741063e1eSPaolo Bonzini rcu_read_unlock(); 297851644ab7SPaolo Bonzini return true; 297951644ab7SPaolo Bonzini } 298051644ab7SPaolo Bonzini 2981715c31ecSPaolo Bonzini static hwaddr 2982715c31ecSPaolo Bonzini address_space_extend_translation(AddressSpace *as, hwaddr addr, hwaddr target_len, 2983715c31ecSPaolo Bonzini MemoryRegion *mr, hwaddr base, hwaddr len, 2984715c31ecSPaolo Bonzini bool is_write) 2985715c31ecSPaolo Bonzini { 2986715c31ecSPaolo Bonzini hwaddr done = 0; 2987715c31ecSPaolo Bonzini hwaddr xlat; 2988715c31ecSPaolo Bonzini MemoryRegion *this_mr; 2989715c31ecSPaolo Bonzini 2990715c31ecSPaolo Bonzini for (;;) { 2991715c31ecSPaolo Bonzini target_len -= len; 2992715c31ecSPaolo Bonzini addr += len; 2993715c31ecSPaolo Bonzini done += len; 2994715c31ecSPaolo Bonzini if (target_len == 0) { 2995715c31ecSPaolo Bonzini return done; 2996715c31ecSPaolo Bonzini } 2997715c31ecSPaolo Bonzini 2998715c31ecSPaolo Bonzini len = target_len; 2999715c31ecSPaolo Bonzini this_mr = address_space_translate(as, addr, &xlat, &len, is_write); 3000715c31ecSPaolo Bonzini if (this_mr != mr || xlat != base + done) { 3001715c31ecSPaolo Bonzini return done; 3002715c31ecSPaolo Bonzini } 3003715c31ecSPaolo Bonzini } 3004715c31ecSPaolo Bonzini } 3005715c31ecSPaolo Bonzini 30066d16c2f8Saliguori /* Map a physical memory region into a host virtual address. 30076d16c2f8Saliguori * May map a subset of the requested range, given by and returned in *plen. 30086d16c2f8Saliguori * May return NULL if resources needed to perform the mapping are exhausted. 30096d16c2f8Saliguori * Use only for reads OR writes - not for read-modify-write operations. 3010ba223c29Saliguori * Use cpu_register_map_client() to know when retrying the map operation is 3011ba223c29Saliguori * likely to succeed. 30126d16c2f8Saliguori */ 3013ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as, 3014a8170e5eSAvi Kivity hwaddr addr, 3015a8170e5eSAvi Kivity hwaddr *plen, 3016ac1970fbSAvi Kivity bool is_write) 30176d16c2f8Saliguori { 3018a8170e5eSAvi Kivity hwaddr len = *plen; 3019715c31ecSPaolo Bonzini hwaddr l, xlat; 3020715c31ecSPaolo Bonzini MemoryRegion *mr; 3021e81bcda5SPaolo Bonzini void *ptr; 30226d16c2f8Saliguori 3023e3127ae0SPaolo Bonzini if (len == 0) { 3024e3127ae0SPaolo Bonzini return NULL; 3025e3127ae0SPaolo Bonzini } 3026e3127ae0SPaolo Bonzini 30276d16c2f8Saliguori l = len; 302841063e1eSPaolo Bonzini rcu_read_lock(); 30295c8a00ceSPaolo Bonzini mr = address_space_translate(as, addr, &xlat, &l, is_write); 303041063e1eSPaolo Bonzini 30315c8a00ceSPaolo Bonzini if (!memory_access_is_direct(mr, is_write)) { 3032c2cba0ffSFam Zheng if (atomic_xchg(&bounce.in_use, true)) { 303341063e1eSPaolo Bonzini rcu_read_unlock(); 3034e3127ae0SPaolo Bonzini return NULL; 30356d16c2f8Saliguori } 3036e85d9db5SKevin Wolf /* Avoid unbounded allocations */ 3037e85d9db5SKevin Wolf l = MIN(l, TARGET_PAGE_SIZE); 3038e85d9db5SKevin Wolf bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); 30396d16c2f8Saliguori bounce.addr = addr; 30406d16c2f8Saliguori bounce.len = l; 3041d3e71559SPaolo Bonzini 3042d3e71559SPaolo Bonzini memory_region_ref(mr); 3043d3e71559SPaolo Bonzini bounce.mr = mr; 30446d16c2f8Saliguori if (!is_write) { 30455c9eb028SPeter Maydell address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, 30465c9eb028SPeter Maydell bounce.buffer, l); 30476d16c2f8Saliguori } 304838bee5dcSStefano Stabellini 304941063e1eSPaolo Bonzini rcu_read_unlock(); 305038bee5dcSStefano Stabellini *plen = l; 305138bee5dcSStefano Stabellini return bounce.buffer; 30526d16c2f8Saliguori } 3053e3127ae0SPaolo Bonzini 30546d16c2f8Saliguori 3055d3e71559SPaolo Bonzini memory_region_ref(mr); 3056715c31ecSPaolo Bonzini *plen = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write); 3057715c31ecSPaolo Bonzini ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen); 3058e81bcda5SPaolo Bonzini rcu_read_unlock(); 3059e81bcda5SPaolo Bonzini 3060e81bcda5SPaolo Bonzini return ptr; 30616d16c2f8Saliguori } 30626d16c2f8Saliguori 3063ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map(). 30646d16c2f8Saliguori * Will also mark the memory as dirty if is_write == 1. access_len gives 30656d16c2f8Saliguori * the amount of memory that was actually read or written by the caller. 30666d16c2f8Saliguori */ 3067a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 3068a8170e5eSAvi Kivity int is_write, hwaddr access_len) 30696d16c2f8Saliguori { 30706d16c2f8Saliguori if (buffer != bounce.buffer) { 3071d3e71559SPaolo Bonzini MemoryRegion *mr; 30727443b437SPaolo Bonzini ram_addr_t addr1; 3073d3e71559SPaolo Bonzini 307407bdaa41SPaolo Bonzini mr = memory_region_from_host(buffer, &addr1); 30751b5ec234SPaolo Bonzini assert(mr != NULL); 3076d3e71559SPaolo Bonzini if (is_write) { 3077845b6214SPaolo Bonzini invalidate_and_set_dirty(mr, addr1, access_len); 30786d16c2f8Saliguori } 3079868bb33fSJan Kiszka if (xen_enabled()) { 3080e41d7c69SJan Kiszka xen_invalidate_map_cache_entry(buffer); 3081050a0ddfSAnthony PERARD } 3082d3e71559SPaolo Bonzini memory_region_unref(mr); 30836d16c2f8Saliguori return; 30846d16c2f8Saliguori } 30856d16c2f8Saliguori if (is_write) { 30865c9eb028SPeter Maydell address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED, 30875c9eb028SPeter Maydell bounce.buffer, access_len); 30886d16c2f8Saliguori } 3089f8a83245SHerve Poussineau qemu_vfree(bounce.buffer); 30906d16c2f8Saliguori bounce.buffer = NULL; 3091d3e71559SPaolo Bonzini memory_region_unref(bounce.mr); 3092c2cba0ffSFam Zheng atomic_mb_set(&bounce.in_use, false); 3093ba223c29Saliguori cpu_notify_map_clients(); 30946d16c2f8Saliguori } 3095d0ecd2aaSbellard 3096a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr, 3097a8170e5eSAvi Kivity hwaddr *plen, 3098ac1970fbSAvi Kivity int is_write) 3099ac1970fbSAvi Kivity { 3100ac1970fbSAvi Kivity return address_space_map(&address_space_memory, addr, plen, is_write); 3101ac1970fbSAvi Kivity } 3102ac1970fbSAvi Kivity 3103a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len, 3104a8170e5eSAvi Kivity int is_write, hwaddr access_len) 3105ac1970fbSAvi Kivity { 3106ac1970fbSAvi Kivity return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); 3107ac1970fbSAvi Kivity } 3108ac1970fbSAvi Kivity 31090ce265ffSPaolo Bonzini #define ARG1_DECL AddressSpace *as 31100ce265ffSPaolo Bonzini #define ARG1 as 31110ce265ffSPaolo Bonzini #define SUFFIX 31120ce265ffSPaolo Bonzini #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) 31130ce265ffSPaolo Bonzini #define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write) 31140ce265ffSPaolo Bonzini #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs) 31150ce265ffSPaolo Bonzini #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len) 31160ce265ffSPaolo Bonzini #define RCU_READ_LOCK(...) rcu_read_lock() 31170ce265ffSPaolo Bonzini #define RCU_READ_UNLOCK(...) rcu_read_unlock() 31180ce265ffSPaolo Bonzini #include "memory_ldst.inc.c" 31191e78bcc1SAlexander Graf 31201f4e496eSPaolo Bonzini int64_t address_space_cache_init(MemoryRegionCache *cache, 31211f4e496eSPaolo Bonzini AddressSpace *as, 31221f4e496eSPaolo Bonzini hwaddr addr, 31231f4e496eSPaolo Bonzini hwaddr len, 31241f4e496eSPaolo Bonzini bool is_write) 31251f4e496eSPaolo Bonzini { 31261f4e496eSPaolo Bonzini hwaddr l, xlat; 31271f4e496eSPaolo Bonzini MemoryRegion *mr; 31281f4e496eSPaolo Bonzini void *ptr; 31291f4e496eSPaolo Bonzini 31301f4e496eSPaolo Bonzini assert(len > 0); 31311f4e496eSPaolo Bonzini 31321f4e496eSPaolo Bonzini l = len; 31331f4e496eSPaolo Bonzini mr = address_space_translate(as, addr, &xlat, &l, is_write); 31341f4e496eSPaolo Bonzini if (!memory_access_is_direct(mr, is_write)) { 31351f4e496eSPaolo Bonzini return -EINVAL; 31361f4e496eSPaolo Bonzini } 31371f4e496eSPaolo Bonzini 31381f4e496eSPaolo Bonzini l = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write); 31391f4e496eSPaolo Bonzini ptr = qemu_ram_ptr_length(mr->ram_block, xlat, &l); 31401f4e496eSPaolo Bonzini 31411f4e496eSPaolo Bonzini cache->xlat = xlat; 31421f4e496eSPaolo Bonzini cache->is_write = is_write; 31431f4e496eSPaolo Bonzini cache->mr = mr; 31441f4e496eSPaolo Bonzini cache->ptr = ptr; 31451f4e496eSPaolo Bonzini cache->len = l; 31461f4e496eSPaolo Bonzini memory_region_ref(cache->mr); 31471f4e496eSPaolo Bonzini 31481f4e496eSPaolo Bonzini return l; 31491f4e496eSPaolo Bonzini } 31501f4e496eSPaolo Bonzini 31511f4e496eSPaolo Bonzini void address_space_cache_invalidate(MemoryRegionCache *cache, 31521f4e496eSPaolo Bonzini hwaddr addr, 31531f4e496eSPaolo Bonzini hwaddr access_len) 31541f4e496eSPaolo Bonzini { 31551f4e496eSPaolo Bonzini assert(cache->is_write); 31561f4e496eSPaolo Bonzini invalidate_and_set_dirty(cache->mr, addr + cache->xlat, access_len); 31571f4e496eSPaolo Bonzini } 31581f4e496eSPaolo Bonzini 31591f4e496eSPaolo Bonzini void address_space_cache_destroy(MemoryRegionCache *cache) 31601f4e496eSPaolo Bonzini { 31611f4e496eSPaolo Bonzini if (!cache->mr) { 31621f4e496eSPaolo Bonzini return; 31631f4e496eSPaolo Bonzini } 31641f4e496eSPaolo Bonzini 31651f4e496eSPaolo Bonzini if (xen_enabled()) { 31661f4e496eSPaolo Bonzini xen_invalidate_map_cache_entry(cache->ptr); 31671f4e496eSPaolo Bonzini } 31681f4e496eSPaolo Bonzini memory_region_unref(cache->mr); 31691f4e496eSPaolo Bonzini } 31701f4e496eSPaolo Bonzini 31711f4e496eSPaolo Bonzini /* Called from RCU critical section. This function has the same 31721f4e496eSPaolo Bonzini * semantics as address_space_translate, but it only works on a 31731f4e496eSPaolo Bonzini * predefined range of a MemoryRegion that was mapped with 31741f4e496eSPaolo Bonzini * address_space_cache_init. 31751f4e496eSPaolo Bonzini */ 31761f4e496eSPaolo Bonzini static inline MemoryRegion *address_space_translate_cached( 31771f4e496eSPaolo Bonzini MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, 31781f4e496eSPaolo Bonzini hwaddr *plen, bool is_write) 31791f4e496eSPaolo Bonzini { 31801f4e496eSPaolo Bonzini assert(addr < cache->len && *plen <= cache->len - addr); 31811f4e496eSPaolo Bonzini *xlat = addr + cache->xlat; 31821f4e496eSPaolo Bonzini return cache->mr; 31831f4e496eSPaolo Bonzini } 31841f4e496eSPaolo Bonzini 31851f4e496eSPaolo Bonzini #define ARG1_DECL MemoryRegionCache *cache 31861f4e496eSPaolo Bonzini #define ARG1 cache 31871f4e496eSPaolo Bonzini #define SUFFIX _cached 31881f4e496eSPaolo Bonzini #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) 31891f4e496eSPaolo Bonzini #define IS_DIRECT(mr, is_write) true 31901f4e496eSPaolo Bonzini #define MAP_RAM(mr, ofs) (cache->ptr + (ofs - cache->xlat)) 31911f4e496eSPaolo Bonzini #define INVALIDATE(mr, ofs, len) ((void)0) 31921f4e496eSPaolo Bonzini #define RCU_READ_LOCK() ((void)0) 31931f4e496eSPaolo Bonzini #define RCU_READ_UNLOCK() ((void)0) 31941f4e496eSPaolo Bonzini #include "memory_ldst.inc.c" 31951f4e496eSPaolo Bonzini 31965e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */ 3197f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, 3198b448f2f3Sbellard uint8_t *buf, int len, int is_write) 319913eb76e0Sbellard { 320013eb76e0Sbellard int l; 3201a8170e5eSAvi Kivity hwaddr phys_addr; 32029b3c35e0Sj_mayer target_ulong page; 320313eb76e0Sbellard 320413eb76e0Sbellard while (len > 0) { 32055232e4c7SPeter Maydell int asidx; 32065232e4c7SPeter Maydell MemTxAttrs attrs; 32075232e4c7SPeter Maydell 320813eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 32095232e4c7SPeter Maydell phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); 32105232e4c7SPeter Maydell asidx = cpu_asidx_from_attrs(cpu, attrs); 321113eb76e0Sbellard /* if no physical page mapped, return an error */ 321213eb76e0Sbellard if (phys_addr == -1) 321313eb76e0Sbellard return -1; 321413eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 321513eb76e0Sbellard if (l > len) 321613eb76e0Sbellard l = len; 32175e2972fdSaliguori phys_addr += (addr & ~TARGET_PAGE_MASK); 32182e38847bSEdgar E. Iglesias if (is_write) { 32195232e4c7SPeter Maydell cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as, 32205232e4c7SPeter Maydell phys_addr, buf, l); 32212e38847bSEdgar E. Iglesias } else { 32225232e4c7SPeter Maydell address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, 32235232e4c7SPeter Maydell MEMTXATTRS_UNSPECIFIED, 32245c9eb028SPeter Maydell buf, l, 0); 32252e38847bSEdgar E. Iglesias } 322613eb76e0Sbellard len -= l; 322713eb76e0Sbellard buf += l; 322813eb76e0Sbellard addr += l; 322913eb76e0Sbellard } 323013eb76e0Sbellard return 0; 323113eb76e0Sbellard } 3232038629a6SDr. David Alan Gilbert 3233038629a6SDr. David Alan Gilbert /* 3234038629a6SDr. David Alan Gilbert * Allows code that needs to deal with migration bitmaps etc to still be built 3235038629a6SDr. David Alan Gilbert * target independent. 3236038629a6SDr. David Alan Gilbert */ 3237038629a6SDr. David Alan Gilbert size_t qemu_target_page_bits(void) 3238038629a6SDr. David Alan Gilbert { 3239038629a6SDr. David Alan Gilbert return TARGET_PAGE_BITS; 3240038629a6SDr. David Alan Gilbert } 3241038629a6SDr. David Alan Gilbert 3242a68fe89cSPaul Brook #endif 324313eb76e0Sbellard 32448e4a424bSBlue Swirl /* 32458e4a424bSBlue Swirl * A helper function for the _utterly broken_ virtio device model to find out if 32468e4a424bSBlue Swirl * it's running on a big endian machine. Don't do this at home kids! 32478e4a424bSBlue Swirl */ 324898ed8ecfSGreg Kurz bool target_words_bigendian(void); 324998ed8ecfSGreg Kurz bool target_words_bigendian(void) 32508e4a424bSBlue Swirl { 32518e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN) 32528e4a424bSBlue Swirl return true; 32538e4a424bSBlue Swirl #else 32548e4a424bSBlue Swirl return false; 32558e4a424bSBlue Swirl #endif 32568e4a424bSBlue Swirl } 32578e4a424bSBlue Swirl 325876f35538SWen Congyang #ifndef CONFIG_USER_ONLY 3259a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr) 326076f35538SWen Congyang { 32615c8a00ceSPaolo Bonzini MemoryRegion*mr; 3262149f54b5SPaolo Bonzini hwaddr l = 1; 326341063e1eSPaolo Bonzini bool res; 326476f35538SWen Congyang 326541063e1eSPaolo Bonzini rcu_read_lock(); 32665c8a00ceSPaolo Bonzini mr = address_space_translate(&address_space_memory, 3267149f54b5SPaolo Bonzini phys_addr, &phys_addr, &l, false); 326876f35538SWen Congyang 326941063e1eSPaolo Bonzini res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); 327041063e1eSPaolo Bonzini rcu_read_unlock(); 327141063e1eSPaolo Bonzini return res; 327276f35538SWen Congyang } 3273bd2fa51fSMichael R. Hines 3274e3807054SDr. David Alan Gilbert int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) 3275bd2fa51fSMichael R. Hines { 3276bd2fa51fSMichael R. Hines RAMBlock *block; 3277e3807054SDr. David Alan Gilbert int ret = 0; 3278bd2fa51fSMichael R. Hines 32790dc3f44aSMike Day rcu_read_lock(); 32800dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 3281e3807054SDr. David Alan Gilbert ret = func(block->idstr, block->host, block->offset, 3282e3807054SDr. David Alan Gilbert block->used_length, opaque); 3283e3807054SDr. David Alan Gilbert if (ret) { 3284e3807054SDr. David Alan Gilbert break; 3285e3807054SDr. David Alan Gilbert } 3286bd2fa51fSMichael R. Hines } 32870dc3f44aSMike Day rcu_read_unlock(); 3288e3807054SDr. David Alan Gilbert return ret; 3289bd2fa51fSMichael R. Hines } 3290ec3f8c99SPeter Maydell #endif 3291