154936004Sbellard /* 25b6dd868SBlue Swirl * Virtual page mapping 354936004Sbellard * 454936004Sbellard * Copyright (c) 2003 Fabrice Bellard 554936004Sbellard * 654936004Sbellard * This library is free software; you can redistribute it and/or 754936004Sbellard * modify it under the terms of the GNU Lesser General Public 854936004Sbellard * License as published by the Free Software Foundation; either 954936004Sbellard * version 2 of the License, or (at your option) any later version. 1054936004Sbellard * 1154936004Sbellard * This library is distributed in the hope that it will be useful, 1254936004Sbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 1354936004Sbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1454936004Sbellard * Lesser General Public License for more details. 1554936004Sbellard * 1654936004Sbellard * You should have received a copy of the GNU Lesser General Public 178167ee88SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 1854936004Sbellard */ 1967b915a5Sbellard #include "config.h" 20777872e5SStefan Weil #ifndef _WIN32 21a98d49b1Sbellard #include <sys/types.h> 22d5a8f07cSbellard #include <sys/mman.h> 23d5a8f07cSbellard #endif 2454936004Sbellard 25055403b2SStefan Weil #include "qemu-common.h" 266180a181Sbellard #include "cpu.h" 27b67d9a52Sbellard #include "tcg.h" 28b3c7724cSpbrook #include "hw/hw.h" 29cc9e98cbSAlex Williamson #include "hw/qdev.h" 301de7afc9SPaolo Bonzini #include "qemu/osdep.h" 319c17d615SPaolo Bonzini #include "sysemu/kvm.h" 322ff3de68SMarkus Armbruster #include "sysemu/sysemu.h" 330d09e41aSPaolo Bonzini #include "hw/xen/xen.h" 341de7afc9SPaolo Bonzini #include "qemu/timer.h" 351de7afc9SPaolo Bonzini #include "qemu/config-file.h" 3675a34036SAndreas Färber #include "qemu/error-report.h" 37022c62cbSPaolo Bonzini #include "exec/memory.h" 389c17d615SPaolo Bonzini #include "sysemu/dma.h" 39022c62cbSPaolo Bonzini #include "exec/address-spaces.h" 4053a5960aSpbrook #if defined(CONFIG_USER_ONLY) 4153a5960aSpbrook #include <qemu.h> 42432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */ 439c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h" 446506e4f9SStefano Stabellini #include "trace.h" 4553a5960aSpbrook #endif 460d6d3c87SPaolo Bonzini #include "exec/cpu-all.h" 470dc3f44aSMike Day #include "qemu/rcu_queue.h" 48022c62cbSPaolo Bonzini #include "exec/cputlb.h" 495b6dd868SBlue Swirl #include "translate-all.h" 500cac1b66SBlue Swirl 51022c62cbSPaolo Bonzini #include "exec/memory-internal.h" 52220c3ebdSJuan Quintela #include "exec/ram_addr.h" 5367d95c15SAvi Kivity 54b35ba30fSMichael S. Tsirkin #include "qemu/range.h" 55b35ba30fSMichael S. Tsirkin 56db7b5426Sblueswir1 //#define DEBUG_SUBPAGE 571196be37Sths 5899773bd4Spbrook #if !defined(CONFIG_USER_ONLY) 59981fdf23SJuan Quintela static bool in_migration; 6094a6b54fSpbrook 610dc3f44aSMike Day /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes 620dc3f44aSMike Day * are protected by the ramlist lock. 630dc3f44aSMike Day */ 640d53d9feSMike Day RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; 6562152b8aSAvi Kivity 6662152b8aSAvi Kivity static MemoryRegion *system_memory; 67309cb471SAvi Kivity static MemoryRegion *system_io; 6862152b8aSAvi Kivity 69f6790af6SAvi Kivity AddressSpace address_space_io; 70f6790af6SAvi Kivity AddressSpace address_space_memory; 712673a5daSAvi Kivity 720844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty; 73acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned; 740e0df1e2SAvi Kivity 757bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ 767bd4f430SPaolo Bonzini #define RAM_PREALLOC (1 << 0) 777bd4f430SPaolo Bonzini 78dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */ 79dbcb8981SPaolo Bonzini #define RAM_SHARED (1 << 1) 80dbcb8981SPaolo Bonzini 8162be4e3aSMichael S. Tsirkin /* Only a portion of RAM (used_length) is actually used, and migrated. 8262be4e3aSMichael S. Tsirkin * This used_length size can change across reboots. 8362be4e3aSMichael S. Tsirkin */ 8462be4e3aSMichael S. Tsirkin #define RAM_RESIZEABLE (1 << 2) 8562be4e3aSMichael S. Tsirkin 86e2eef170Spbrook #endif 879fa3e853Sbellard 88bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); 896a00d601Sbellard /* current CPU in the current thread. It is only valid inside 906a00d601Sbellard cpu_exec() */ 914917cf44SAndreas Färber DEFINE_TLS(CPUState *, current_cpu); 922e70f6efSpbrook /* 0 = Do not count executed instructions. 93bf20dc07Sths 1 = Precise instruction counting. 942e70f6efSpbrook 2 = Adaptive rate instruction counting. */ 955708fc66SPaolo Bonzini int use_icount; 966a00d601Sbellard 97e2eef170Spbrook #if !defined(CONFIG_USER_ONLY) 984346ae3eSAvi Kivity 991db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry; 1001db8abb1SPaolo Bonzini 1011db8abb1SPaolo Bonzini struct PhysPageEntry { 1029736e55bSMichael S. Tsirkin /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ 1038b795765SMichael S. Tsirkin uint32_t skip : 6; 1049736e55bSMichael S. Tsirkin /* index into phys_sections (!skip) or phys_map_nodes (skip) */ 1058b795765SMichael S. Tsirkin uint32_t ptr : 26; 1061db8abb1SPaolo Bonzini }; 1071db8abb1SPaolo Bonzini 1088b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) 1098b795765SMichael S. Tsirkin 11003f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables. */ 11157271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64 11203f49957SPaolo Bonzini 113026736ceSMichael S. Tsirkin #define P_L2_BITS 9 11403f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS) 11503f49957SPaolo Bonzini 11603f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) 11703f49957SPaolo Bonzini 11803f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE]; 1190475d94fSPaolo Bonzini 12053cb28cbSMarcel Apfelbaum typedef struct PhysPageMap { 12179e2b9aeSPaolo Bonzini struct rcu_head rcu; 12279e2b9aeSPaolo Bonzini 12353cb28cbSMarcel Apfelbaum unsigned sections_nb; 12453cb28cbSMarcel Apfelbaum unsigned sections_nb_alloc; 12553cb28cbSMarcel Apfelbaum unsigned nodes_nb; 12653cb28cbSMarcel Apfelbaum unsigned nodes_nb_alloc; 12753cb28cbSMarcel Apfelbaum Node *nodes; 12853cb28cbSMarcel Apfelbaum MemoryRegionSection *sections; 12953cb28cbSMarcel Apfelbaum } PhysPageMap; 13053cb28cbSMarcel Apfelbaum 1311db8abb1SPaolo Bonzini struct AddressSpaceDispatch { 13279e2b9aeSPaolo Bonzini struct rcu_head rcu; 13379e2b9aeSPaolo Bonzini 1341db8abb1SPaolo Bonzini /* This is a multi-level map on the physical address space. 1351db8abb1SPaolo Bonzini * The bottom level has pointers to MemoryRegionSections. 1361db8abb1SPaolo Bonzini */ 1371db8abb1SPaolo Bonzini PhysPageEntry phys_map; 13853cb28cbSMarcel Apfelbaum PhysPageMap map; 139acc9d80bSJan Kiszka AddressSpace *as; 1401db8abb1SPaolo Bonzini }; 1411db8abb1SPaolo Bonzini 14290260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 14390260c6cSJan Kiszka typedef struct subpage_t { 14490260c6cSJan Kiszka MemoryRegion iomem; 145acc9d80bSJan Kiszka AddressSpace *as; 14690260c6cSJan Kiszka hwaddr base; 14790260c6cSJan Kiszka uint16_t sub_section[TARGET_PAGE_SIZE]; 14890260c6cSJan Kiszka } subpage_t; 14990260c6cSJan Kiszka 150b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0 151b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1 152b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2 153b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3 1545312bd8bSAvi Kivity 155e2eef170Spbrook static void io_mem_init(void); 15662152b8aSAvi Kivity static void memory_map_init(void); 15709daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener); 158e2eef170Spbrook 1591ec9b909SAvi Kivity static MemoryRegion io_mem_watch; 1606658ffb8Spbrook #endif 16154936004Sbellard 1626d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY) 163d6f2ea22SAvi Kivity 16453cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) 165f7bf5461SAvi Kivity { 16653cb28cbSMarcel Apfelbaum if (map->nodes_nb + nodes > map->nodes_nb_alloc) { 16753cb28cbSMarcel Apfelbaum map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16); 16853cb28cbSMarcel Apfelbaum map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes); 16953cb28cbSMarcel Apfelbaum map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); 170f7bf5461SAvi Kivity } 171f7bf5461SAvi Kivity } 172f7bf5461SAvi Kivity 17353cb28cbSMarcel Apfelbaum static uint32_t phys_map_node_alloc(PhysPageMap *map) 174d6f2ea22SAvi Kivity { 175d6f2ea22SAvi Kivity unsigned i; 1768b795765SMichael S. Tsirkin uint32_t ret; 177d6f2ea22SAvi Kivity 17853cb28cbSMarcel Apfelbaum ret = map->nodes_nb++; 179d6f2ea22SAvi Kivity assert(ret != PHYS_MAP_NODE_NIL); 18053cb28cbSMarcel Apfelbaum assert(ret != map->nodes_nb_alloc); 18103f49957SPaolo Bonzini for (i = 0; i < P_L2_SIZE; ++i) { 18253cb28cbSMarcel Apfelbaum map->nodes[ret][i].skip = 1; 18353cb28cbSMarcel Apfelbaum map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL; 184d6f2ea22SAvi Kivity } 185f7bf5461SAvi Kivity return ret; 186d6f2ea22SAvi Kivity } 187d6f2ea22SAvi Kivity 18853cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, 18953cb28cbSMarcel Apfelbaum hwaddr *index, hwaddr *nb, uint16_t leaf, 1902999097bSAvi Kivity int level) 19192e873b9Sbellard { 192f7bf5461SAvi Kivity PhysPageEntry *p; 193f7bf5461SAvi Kivity int i; 19403f49957SPaolo Bonzini hwaddr step = (hwaddr)1 << (level * P_L2_BITS); 1955cd2c5b6SRichard Henderson 1969736e55bSMichael S. Tsirkin if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { 19753cb28cbSMarcel Apfelbaum lp->ptr = phys_map_node_alloc(map); 19853cb28cbSMarcel Apfelbaum p = map->nodes[lp->ptr]; 199f7bf5461SAvi Kivity if (level == 0) { 20003f49957SPaolo Bonzini for (i = 0; i < P_L2_SIZE; i++) { 2019736e55bSMichael S. Tsirkin p[i].skip = 0; 202b41aac4fSLiu Ping Fan p[i].ptr = PHYS_SECTION_UNASSIGNED; 20367c4d23cSpbrook } 20492e873b9Sbellard } 205d6f2ea22SAvi Kivity } else { 20653cb28cbSMarcel Apfelbaum p = map->nodes[lp->ptr]; 2074346ae3eSAvi Kivity } 20803f49957SPaolo Bonzini lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; 209f7bf5461SAvi Kivity 21003f49957SPaolo Bonzini while (*nb && lp < &p[P_L2_SIZE]) { 21107f07b31SAvi Kivity if ((*index & (step - 1)) == 0 && *nb >= step) { 2129736e55bSMichael S. Tsirkin lp->skip = 0; 213c19e8800SAvi Kivity lp->ptr = leaf; 21407f07b31SAvi Kivity *index += step; 21507f07b31SAvi Kivity *nb -= step; 216f7bf5461SAvi Kivity } else { 21753cb28cbSMarcel Apfelbaum phys_page_set_level(map, lp, index, nb, leaf, level - 1); 2182999097bSAvi Kivity } 2192999097bSAvi Kivity ++lp; 220f7bf5461SAvi Kivity } 2214346ae3eSAvi Kivity } 2225cd2c5b6SRichard Henderson 223ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d, 224a8170e5eSAvi Kivity hwaddr index, hwaddr nb, 2252999097bSAvi Kivity uint16_t leaf) 226f7bf5461SAvi Kivity { 2272999097bSAvi Kivity /* Wildly overreserve - it doesn't matter much. */ 22853cb28cbSMarcel Apfelbaum phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); 229f7bf5461SAvi Kivity 23053cb28cbSMarcel Apfelbaum phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); 23192e873b9Sbellard } 23292e873b9Sbellard 233b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child, 234b35ba30fSMichael S. Tsirkin * and update our entry so we can skip it and go directly to the destination. 235b35ba30fSMichael S. Tsirkin */ 236b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted) 237b35ba30fSMichael S. Tsirkin { 238b35ba30fSMichael S. Tsirkin unsigned valid_ptr = P_L2_SIZE; 239b35ba30fSMichael S. Tsirkin int valid = 0; 240b35ba30fSMichael S. Tsirkin PhysPageEntry *p; 241b35ba30fSMichael S. Tsirkin int i; 242b35ba30fSMichael S. Tsirkin 243b35ba30fSMichael S. Tsirkin if (lp->ptr == PHYS_MAP_NODE_NIL) { 244b35ba30fSMichael S. Tsirkin return; 245b35ba30fSMichael S. Tsirkin } 246b35ba30fSMichael S. Tsirkin 247b35ba30fSMichael S. Tsirkin p = nodes[lp->ptr]; 248b35ba30fSMichael S. Tsirkin for (i = 0; i < P_L2_SIZE; i++) { 249b35ba30fSMichael S. Tsirkin if (p[i].ptr == PHYS_MAP_NODE_NIL) { 250b35ba30fSMichael S. Tsirkin continue; 251b35ba30fSMichael S. Tsirkin } 252b35ba30fSMichael S. Tsirkin 253b35ba30fSMichael S. Tsirkin valid_ptr = i; 254b35ba30fSMichael S. Tsirkin valid++; 255b35ba30fSMichael S. Tsirkin if (p[i].skip) { 256b35ba30fSMichael S. Tsirkin phys_page_compact(&p[i], nodes, compacted); 257b35ba30fSMichael S. Tsirkin } 258b35ba30fSMichael S. Tsirkin } 259b35ba30fSMichael S. Tsirkin 260b35ba30fSMichael S. Tsirkin /* We can only compress if there's only one child. */ 261b35ba30fSMichael S. Tsirkin if (valid != 1) { 262b35ba30fSMichael S. Tsirkin return; 263b35ba30fSMichael S. Tsirkin } 264b35ba30fSMichael S. Tsirkin 265b35ba30fSMichael S. Tsirkin assert(valid_ptr < P_L2_SIZE); 266b35ba30fSMichael S. Tsirkin 267b35ba30fSMichael S. Tsirkin /* Don't compress if it won't fit in the # of bits we have. */ 268b35ba30fSMichael S. Tsirkin if (lp->skip + p[valid_ptr].skip >= (1 << 3)) { 269b35ba30fSMichael S. Tsirkin return; 270b35ba30fSMichael S. Tsirkin } 271b35ba30fSMichael S. Tsirkin 272b35ba30fSMichael S. Tsirkin lp->ptr = p[valid_ptr].ptr; 273b35ba30fSMichael S. Tsirkin if (!p[valid_ptr].skip) { 274b35ba30fSMichael S. Tsirkin /* If our only child is a leaf, make this a leaf. */ 275b35ba30fSMichael S. Tsirkin /* By design, we should have made this node a leaf to begin with so we 276b35ba30fSMichael S. Tsirkin * should never reach here. 277b35ba30fSMichael S. Tsirkin * But since it's so simple to handle this, let's do it just in case we 278b35ba30fSMichael S. Tsirkin * change this rule. 279b35ba30fSMichael S. Tsirkin */ 280b35ba30fSMichael S. Tsirkin lp->skip = 0; 281b35ba30fSMichael S. Tsirkin } else { 282b35ba30fSMichael S. Tsirkin lp->skip += p[valid_ptr].skip; 283b35ba30fSMichael S. Tsirkin } 284b35ba30fSMichael S. Tsirkin } 285b35ba30fSMichael S. Tsirkin 286b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb) 287b35ba30fSMichael S. Tsirkin { 288b35ba30fSMichael S. Tsirkin DECLARE_BITMAP(compacted, nodes_nb); 289b35ba30fSMichael S. Tsirkin 290b35ba30fSMichael S. Tsirkin if (d->phys_map.skip) { 29153cb28cbSMarcel Apfelbaum phys_page_compact(&d->phys_map, d->map.nodes, compacted); 292b35ba30fSMichael S. Tsirkin } 293b35ba30fSMichael S. Tsirkin } 294b35ba30fSMichael S. Tsirkin 29597115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr, 2969affd6fcSPaolo Bonzini Node *nodes, MemoryRegionSection *sections) 29792e873b9Sbellard { 29831ab2b4aSAvi Kivity PhysPageEntry *p; 29997115a8dSMichael S. Tsirkin hwaddr index = addr >> TARGET_PAGE_BITS; 30031ab2b4aSAvi Kivity int i; 301f1f6e3b8SAvi Kivity 3029736e55bSMichael S. Tsirkin for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { 303c19e8800SAvi Kivity if (lp.ptr == PHYS_MAP_NODE_NIL) { 3049affd6fcSPaolo Bonzini return §ions[PHYS_SECTION_UNASSIGNED]; 305f1f6e3b8SAvi Kivity } 3069affd6fcSPaolo Bonzini p = nodes[lp.ptr]; 30703f49957SPaolo Bonzini lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; 30831ab2b4aSAvi Kivity } 309b35ba30fSMichael S. Tsirkin 310b35ba30fSMichael S. Tsirkin if (sections[lp.ptr].size.hi || 311b35ba30fSMichael S. Tsirkin range_covers_byte(sections[lp.ptr].offset_within_address_space, 312b35ba30fSMichael S. Tsirkin sections[lp.ptr].size.lo, addr)) { 3139affd6fcSPaolo Bonzini return §ions[lp.ptr]; 314b35ba30fSMichael S. Tsirkin } else { 315b35ba30fSMichael S. Tsirkin return §ions[PHYS_SECTION_UNASSIGNED]; 316b35ba30fSMichael S. Tsirkin } 317f3705d53SAvi Kivity } 318f3705d53SAvi Kivity 319e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr) 320e5548617SBlue Swirl { 3212a8e7499SPaolo Bonzini return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device 322e5548617SBlue Swirl && mr != &io_mem_watch; 323e5548617SBlue Swirl } 324149f54b5SPaolo Bonzini 32579e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 326c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, 32790260c6cSJan Kiszka hwaddr addr, 32890260c6cSJan Kiszka bool resolve_subpage) 3299f029603SJan Kiszka { 33090260c6cSJan Kiszka MemoryRegionSection *section; 33190260c6cSJan Kiszka subpage_t *subpage; 33290260c6cSJan Kiszka 33353cb28cbSMarcel Apfelbaum section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections); 33490260c6cSJan Kiszka if (resolve_subpage && section->mr->subpage) { 33590260c6cSJan Kiszka subpage = container_of(section->mr, subpage_t, iomem); 33653cb28cbSMarcel Apfelbaum section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; 33790260c6cSJan Kiszka } 33890260c6cSJan Kiszka return section; 3399f029603SJan Kiszka } 3409f029603SJan Kiszka 34179e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 34290260c6cSJan Kiszka static MemoryRegionSection * 343c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, 34490260c6cSJan Kiszka hwaddr *plen, bool resolve_subpage) 345149f54b5SPaolo Bonzini { 346149f54b5SPaolo Bonzini MemoryRegionSection *section; 347a87f3954SPaolo Bonzini Int128 diff; 348149f54b5SPaolo Bonzini 349c7086b4aSPaolo Bonzini section = address_space_lookup_region(d, addr, resolve_subpage); 350149f54b5SPaolo Bonzini /* Compute offset within MemoryRegionSection */ 351149f54b5SPaolo Bonzini addr -= section->offset_within_address_space; 352149f54b5SPaolo Bonzini 353149f54b5SPaolo Bonzini /* Compute offset within MemoryRegion */ 354149f54b5SPaolo Bonzini *xlat = addr + section->offset_within_region; 355149f54b5SPaolo Bonzini 356149f54b5SPaolo Bonzini diff = int128_sub(section->mr->size, int128_make64(addr)); 3573752a036SPeter Maydell *plen = int128_get64(int128_min(diff, int128_make64(*plen))); 358149f54b5SPaolo Bonzini return section; 359149f54b5SPaolo Bonzini } 36090260c6cSJan Kiszka 361a87f3954SPaolo Bonzini static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) 362a87f3954SPaolo Bonzini { 363a87f3954SPaolo Bonzini if (memory_region_is_ram(mr)) { 364a87f3954SPaolo Bonzini return !(is_write && mr->readonly); 365a87f3954SPaolo Bonzini } 366a87f3954SPaolo Bonzini if (memory_region_is_romd(mr)) { 367a87f3954SPaolo Bonzini return !is_write; 368a87f3954SPaolo Bonzini } 369a87f3954SPaolo Bonzini 370a87f3954SPaolo Bonzini return false; 371a87f3954SPaolo Bonzini } 372a87f3954SPaolo Bonzini 3735c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, 37490260c6cSJan Kiszka hwaddr *xlat, hwaddr *plen, 37590260c6cSJan Kiszka bool is_write) 37690260c6cSJan Kiszka { 37730951157SAvi Kivity IOMMUTLBEntry iotlb; 37830951157SAvi Kivity MemoryRegionSection *section; 37930951157SAvi Kivity MemoryRegion *mr; 38030951157SAvi Kivity hwaddr len = *plen; 38130951157SAvi Kivity 38279e2b9aeSPaolo Bonzini rcu_read_lock(); 38330951157SAvi Kivity for (;;) { 38479e2b9aeSPaolo Bonzini AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch); 38579e2b9aeSPaolo Bonzini section = address_space_translate_internal(d, addr, &addr, plen, true); 38630951157SAvi Kivity mr = section->mr; 38730951157SAvi Kivity 38830951157SAvi Kivity if (!mr->iommu_ops) { 38930951157SAvi Kivity break; 39030951157SAvi Kivity } 39130951157SAvi Kivity 3928d7b8cb9SLe Tan iotlb = mr->iommu_ops->translate(mr, addr, is_write); 39330951157SAvi Kivity addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 39430951157SAvi Kivity | (addr & iotlb.addr_mask)); 39530951157SAvi Kivity len = MIN(len, (addr | iotlb.addr_mask) - addr + 1); 39630951157SAvi Kivity if (!(iotlb.perm & (1 << is_write))) { 39730951157SAvi Kivity mr = &io_mem_unassigned; 39830951157SAvi Kivity break; 39930951157SAvi Kivity } 40030951157SAvi Kivity 40130951157SAvi Kivity as = iotlb.target_as; 40230951157SAvi Kivity } 40330951157SAvi Kivity 404fe680d0dSAlexey Kardashevskiy if (xen_enabled() && memory_access_is_direct(mr, is_write)) { 405a87f3954SPaolo Bonzini hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; 406a87f3954SPaolo Bonzini len = MIN(page, len); 407a87f3954SPaolo Bonzini } 408a87f3954SPaolo Bonzini 40930951157SAvi Kivity *plen = len; 41030951157SAvi Kivity *xlat = addr; 41179e2b9aeSPaolo Bonzini rcu_read_unlock(); 41230951157SAvi Kivity return mr; 41390260c6cSJan Kiszka } 41490260c6cSJan Kiszka 41579e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 41690260c6cSJan Kiszka MemoryRegionSection * 4179d82b5a7SPaolo Bonzini address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, 4189d82b5a7SPaolo Bonzini hwaddr *xlat, hwaddr *plen) 41990260c6cSJan Kiszka { 42030951157SAvi Kivity MemoryRegionSection *section; 4219d82b5a7SPaolo Bonzini section = address_space_translate_internal(cpu->memory_dispatch, 4229d82b5a7SPaolo Bonzini addr, xlat, plen, false); 42330951157SAvi Kivity 42430951157SAvi Kivity assert(!section->mr->iommu_ops); 42530951157SAvi Kivity return section; 42690260c6cSJan Kiszka } 4279fa3e853Sbellard #endif 428fd6ce8f6Sbellard 429d5ab9713SJan Kiszka void cpu_exec_init_all(void) 430d5ab9713SJan Kiszka { 431d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY) 432b2a8658eSUmesh Deshpande qemu_mutex_init(&ram_list.mutex); 433d5ab9713SJan Kiszka memory_map_init(); 434d5ab9713SJan Kiszka io_mem_init(); 435d5ab9713SJan Kiszka #endif 436d5ab9713SJan Kiszka } 437d5ab9713SJan Kiszka 438b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY) 4399656f324Spbrook 440e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id) 441e7f4eff7SJuan Quintela { 442259186a7SAndreas Färber CPUState *cpu = opaque; 443e7f4eff7SJuan Quintela 4443098dba0Saurel32 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the 4453098dba0Saurel32 version_id is increased. */ 446259186a7SAndreas Färber cpu->interrupt_request &= ~0x01; 447c01a71c1SChristian Borntraeger tlb_flush(cpu, 1); 4489656f324Spbrook 4499656f324Spbrook return 0; 4509656f324Spbrook } 451e7f4eff7SJuan Quintela 4526c3bff0eSPavel Dovgaluk static int cpu_common_pre_load(void *opaque) 4536c3bff0eSPavel Dovgaluk { 4546c3bff0eSPavel Dovgaluk CPUState *cpu = opaque; 4556c3bff0eSPavel Dovgaluk 456adee6424SPaolo Bonzini cpu->exception_index = -1; 4576c3bff0eSPavel Dovgaluk 4586c3bff0eSPavel Dovgaluk return 0; 4596c3bff0eSPavel Dovgaluk } 4606c3bff0eSPavel Dovgaluk 4616c3bff0eSPavel Dovgaluk static bool cpu_common_exception_index_needed(void *opaque) 4626c3bff0eSPavel Dovgaluk { 4636c3bff0eSPavel Dovgaluk CPUState *cpu = opaque; 4646c3bff0eSPavel Dovgaluk 465adee6424SPaolo Bonzini return tcg_enabled() && cpu->exception_index != -1; 4666c3bff0eSPavel Dovgaluk } 4676c3bff0eSPavel Dovgaluk 4686c3bff0eSPavel Dovgaluk static const VMStateDescription vmstate_cpu_common_exception_index = { 4696c3bff0eSPavel Dovgaluk .name = "cpu_common/exception_index", 4706c3bff0eSPavel Dovgaluk .version_id = 1, 4716c3bff0eSPavel Dovgaluk .minimum_version_id = 1, 4726c3bff0eSPavel Dovgaluk .fields = (VMStateField[]) { 4736c3bff0eSPavel Dovgaluk VMSTATE_INT32(exception_index, CPUState), 4746c3bff0eSPavel Dovgaluk VMSTATE_END_OF_LIST() 4756c3bff0eSPavel Dovgaluk } 4766c3bff0eSPavel Dovgaluk }; 4776c3bff0eSPavel Dovgaluk 4781a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = { 479e7f4eff7SJuan Quintela .name = "cpu_common", 480e7f4eff7SJuan Quintela .version_id = 1, 481e7f4eff7SJuan Quintela .minimum_version_id = 1, 4826c3bff0eSPavel Dovgaluk .pre_load = cpu_common_pre_load, 483e7f4eff7SJuan Quintela .post_load = cpu_common_post_load, 484e7f4eff7SJuan Quintela .fields = (VMStateField[]) { 485259186a7SAndreas Färber VMSTATE_UINT32(halted, CPUState), 486259186a7SAndreas Färber VMSTATE_UINT32(interrupt_request, CPUState), 487e7f4eff7SJuan Quintela VMSTATE_END_OF_LIST() 4886c3bff0eSPavel Dovgaluk }, 4896c3bff0eSPavel Dovgaluk .subsections = (VMStateSubsection[]) { 4906c3bff0eSPavel Dovgaluk { 4916c3bff0eSPavel Dovgaluk .vmsd = &vmstate_cpu_common_exception_index, 4926c3bff0eSPavel Dovgaluk .needed = cpu_common_exception_index_needed, 4936c3bff0eSPavel Dovgaluk } , { 4946c3bff0eSPavel Dovgaluk /* empty */ 4956c3bff0eSPavel Dovgaluk } 496e7f4eff7SJuan Quintela } 497e7f4eff7SJuan Quintela }; 4981a1562f5SAndreas Färber 4999656f324Spbrook #endif 5009656f324Spbrook 50138d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index) 502950f1472SGlauber Costa { 503bdc44640SAndreas Färber CPUState *cpu; 504950f1472SGlauber Costa 505bdc44640SAndreas Färber CPU_FOREACH(cpu) { 50655e5c285SAndreas Färber if (cpu->cpu_index == index) { 507bdc44640SAndreas Färber return cpu; 50855e5c285SAndreas Färber } 509950f1472SGlauber Costa } 510950f1472SGlauber Costa 511bdc44640SAndreas Färber return NULL; 512950f1472SGlauber Costa } 513950f1472SGlauber Costa 51409daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY) 51509daed84SEdgar E. Iglesias void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as) 51609daed84SEdgar E. Iglesias { 51709daed84SEdgar E. Iglesias /* We only support one address space per cpu at the moment. */ 51809daed84SEdgar E. Iglesias assert(cpu->as == as); 51909daed84SEdgar E. Iglesias 52009daed84SEdgar E. Iglesias if (cpu->tcg_as_listener) { 52109daed84SEdgar E. Iglesias memory_listener_unregister(cpu->tcg_as_listener); 52209daed84SEdgar E. Iglesias } else { 52309daed84SEdgar E. Iglesias cpu->tcg_as_listener = g_new0(MemoryListener, 1); 52409daed84SEdgar E. Iglesias } 52509daed84SEdgar E. Iglesias cpu->tcg_as_listener->commit = tcg_commit; 52609daed84SEdgar E. Iglesias memory_listener_register(cpu->tcg_as_listener, as); 52709daed84SEdgar E. Iglesias } 52809daed84SEdgar E. Iglesias #endif 52909daed84SEdgar E. Iglesias 5309349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env) 531fd6ce8f6Sbellard { 5329f09e18aSAndreas Färber CPUState *cpu = ENV_GET_CPU(env); 533b170fce3SAndreas Färber CPUClass *cc = CPU_GET_CLASS(cpu); 534bdc44640SAndreas Färber CPUState *some_cpu; 5356a00d601Sbellard int cpu_index; 5366a00d601Sbellard 537c2764719Spbrook #if defined(CONFIG_USER_ONLY) 538c2764719Spbrook cpu_list_lock(); 539c2764719Spbrook #endif 5406a00d601Sbellard cpu_index = 0; 541bdc44640SAndreas Färber CPU_FOREACH(some_cpu) { 5426a00d601Sbellard cpu_index++; 5436a00d601Sbellard } 54455e5c285SAndreas Färber cpu->cpu_index = cpu_index; 5451b1ed8dcSAndreas Färber cpu->numa_node = 0; 546f0c3c505SAndreas Färber QTAILQ_INIT(&cpu->breakpoints); 547ff4700b0SAndreas Färber QTAILQ_INIT(&cpu->watchpoints); 548dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY 54909daed84SEdgar E. Iglesias cpu->as = &address_space_memory; 5509f09e18aSAndreas Färber cpu->thread_id = qemu_get_thread_id(); 551cba70549SPaolo Bonzini cpu_reload_memory_map(cpu); 552dc7a09cfSJan Kiszka #endif 553bdc44640SAndreas Färber QTAILQ_INSERT_TAIL(&cpus, cpu, node); 554c2764719Spbrook #if defined(CONFIG_USER_ONLY) 555c2764719Spbrook cpu_list_unlock(); 556c2764719Spbrook #endif 557e0d47944SAndreas Färber if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { 558259186a7SAndreas Färber vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu); 559e0d47944SAndreas Färber } 560b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 5610be71e32SAlex Williamson register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, 562b3c7724cSpbrook cpu_save, cpu_load, env); 563b170fce3SAndreas Färber assert(cc->vmsd == NULL); 564e0d47944SAndreas Färber assert(qdev_get_vmsd(DEVICE(cpu)) == NULL); 565b3c7724cSpbrook #endif 566b170fce3SAndreas Färber if (cc->vmsd != NULL) { 567b170fce3SAndreas Färber vmstate_register(NULL, cpu_index, cc->vmsd, cpu); 568b170fce3SAndreas Färber } 569fd6ce8f6Sbellard } 570fd6ce8f6Sbellard 57194df27fdSPaul Brook #if defined(CONFIG_USER_ONLY) 57200b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) 57394df27fdSPaul Brook { 57494df27fdSPaul Brook tb_invalidate_phys_page_range(pc, pc + 1, 0); 57594df27fdSPaul Brook } 57694df27fdSPaul Brook #else 57700b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) 5781e7855a5SMax Filippov { 579e8262a1bSMax Filippov hwaddr phys = cpu_get_phys_page_debug(cpu, pc); 580e8262a1bSMax Filippov if (phys != -1) { 58109daed84SEdgar E. Iglesias tb_invalidate_phys_addr(cpu->as, 58229d8ec7bSEdgar E. Iglesias phys | (pc & ~TARGET_PAGE_MASK)); 583e8262a1bSMax Filippov } 5841e7855a5SMax Filippov } 585c27004ecSbellard #endif 586d720b93dSbellard 587c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY) 58875a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask) 589c527ee8fSPaul Brook 590c527ee8fSPaul Brook { 591c527ee8fSPaul Brook } 592c527ee8fSPaul Brook 5933ee887e8SPeter Maydell int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, 5943ee887e8SPeter Maydell int flags) 5953ee887e8SPeter Maydell { 5963ee887e8SPeter Maydell return -ENOSYS; 5973ee887e8SPeter Maydell } 5983ee887e8SPeter Maydell 5993ee887e8SPeter Maydell void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) 6003ee887e8SPeter Maydell { 6013ee887e8SPeter Maydell } 6023ee887e8SPeter Maydell 60375a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 604c527ee8fSPaul Brook int flags, CPUWatchpoint **watchpoint) 605c527ee8fSPaul Brook { 606c527ee8fSPaul Brook return -ENOSYS; 607c527ee8fSPaul Brook } 608c527ee8fSPaul Brook #else 6096658ffb8Spbrook /* Add a watchpoint. */ 61075a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 611a1d1bb31Saliguori int flags, CPUWatchpoint **watchpoint) 6126658ffb8Spbrook { 613c0ce998eSaliguori CPUWatchpoint *wp; 6146658ffb8Spbrook 61505068c0dSPeter Maydell /* forbid ranges which are empty or run off the end of the address space */ 61607e2863dSMax Filippov if (len == 0 || (addr + len - 1) < addr) { 61775a34036SAndreas Färber error_report("tried to set invalid watchpoint at %" 61875a34036SAndreas Färber VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); 619b4051334Saliguori return -EINVAL; 620b4051334Saliguori } 6217267c094SAnthony Liguori wp = g_malloc(sizeof(*wp)); 6226658ffb8Spbrook 623a1d1bb31Saliguori wp->vaddr = addr; 62405068c0dSPeter Maydell wp->len = len; 625a1d1bb31Saliguori wp->flags = flags; 626a1d1bb31Saliguori 6272dc9f411Saliguori /* keep all GDB-injected watchpoints in front */ 628ff4700b0SAndreas Färber if (flags & BP_GDB) { 629ff4700b0SAndreas Färber QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); 630ff4700b0SAndreas Färber } else { 631ff4700b0SAndreas Färber QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); 632ff4700b0SAndreas Färber } 633a1d1bb31Saliguori 63431b030d4SAndreas Färber tlb_flush_page(cpu, addr); 635a1d1bb31Saliguori 636a1d1bb31Saliguori if (watchpoint) 637a1d1bb31Saliguori *watchpoint = wp; 638a1d1bb31Saliguori return 0; 6396658ffb8Spbrook } 6406658ffb8Spbrook 641a1d1bb31Saliguori /* Remove a specific watchpoint. */ 64275a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, 643a1d1bb31Saliguori int flags) 6446658ffb8Spbrook { 645a1d1bb31Saliguori CPUWatchpoint *wp; 6466658ffb8Spbrook 647ff4700b0SAndreas Färber QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 64805068c0dSPeter Maydell if (addr == wp->vaddr && len == wp->len 6496e140f28Saliguori && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { 65075a34036SAndreas Färber cpu_watchpoint_remove_by_ref(cpu, wp); 6516658ffb8Spbrook return 0; 6526658ffb8Spbrook } 6536658ffb8Spbrook } 654a1d1bb31Saliguori return -ENOENT; 6556658ffb8Spbrook } 6566658ffb8Spbrook 657a1d1bb31Saliguori /* Remove a specific watchpoint by reference. */ 65875a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) 659a1d1bb31Saliguori { 660ff4700b0SAndreas Färber QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); 6617d03f82fSedgar_igl 66231b030d4SAndreas Färber tlb_flush_page(cpu, watchpoint->vaddr); 663a1d1bb31Saliguori 6647267c094SAnthony Liguori g_free(watchpoint); 6657d03f82fSedgar_igl } 6667d03f82fSedgar_igl 667a1d1bb31Saliguori /* Remove all matching watchpoints. */ 66875a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask) 669a1d1bb31Saliguori { 670c0ce998eSaliguori CPUWatchpoint *wp, *next; 671a1d1bb31Saliguori 672ff4700b0SAndreas Färber QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { 67375a34036SAndreas Färber if (wp->flags & mask) { 67475a34036SAndreas Färber cpu_watchpoint_remove_by_ref(cpu, wp); 67575a34036SAndreas Färber } 676a1d1bb31Saliguori } 677c0ce998eSaliguori } 67805068c0dSPeter Maydell 67905068c0dSPeter Maydell /* Return true if this watchpoint address matches the specified 68005068c0dSPeter Maydell * access (ie the address range covered by the watchpoint overlaps 68105068c0dSPeter Maydell * partially or completely with the address range covered by the 68205068c0dSPeter Maydell * access). 68305068c0dSPeter Maydell */ 68405068c0dSPeter Maydell static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, 68505068c0dSPeter Maydell vaddr addr, 68605068c0dSPeter Maydell vaddr len) 68705068c0dSPeter Maydell { 68805068c0dSPeter Maydell /* We know the lengths are non-zero, but a little caution is 68905068c0dSPeter Maydell * required to avoid errors in the case where the range ends 69005068c0dSPeter Maydell * exactly at the top of the address space and so addr + len 69105068c0dSPeter Maydell * wraps round to zero. 69205068c0dSPeter Maydell */ 69305068c0dSPeter Maydell vaddr wpend = wp->vaddr + wp->len - 1; 69405068c0dSPeter Maydell vaddr addrend = addr + len - 1; 69505068c0dSPeter Maydell 69605068c0dSPeter Maydell return !(addr > wpend || wp->vaddr > addrend); 69705068c0dSPeter Maydell } 69805068c0dSPeter Maydell 699c527ee8fSPaul Brook #endif 700a1d1bb31Saliguori 701a1d1bb31Saliguori /* Add a breakpoint. */ 702b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, 703a1d1bb31Saliguori CPUBreakpoint **breakpoint) 7044c3a88a2Sbellard { 705c0ce998eSaliguori CPUBreakpoint *bp; 7064c3a88a2Sbellard 7077267c094SAnthony Liguori bp = g_malloc(sizeof(*bp)); 7084c3a88a2Sbellard 709a1d1bb31Saliguori bp->pc = pc; 710a1d1bb31Saliguori bp->flags = flags; 711a1d1bb31Saliguori 7122dc9f411Saliguori /* keep all GDB-injected breakpoints in front */ 71300b941e5SAndreas Färber if (flags & BP_GDB) { 714f0c3c505SAndreas Färber QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); 71500b941e5SAndreas Färber } else { 716f0c3c505SAndreas Färber QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); 71700b941e5SAndreas Färber } 718d720b93dSbellard 719f0c3c505SAndreas Färber breakpoint_invalidate(cpu, pc); 720a1d1bb31Saliguori 72100b941e5SAndreas Färber if (breakpoint) { 722a1d1bb31Saliguori *breakpoint = bp; 72300b941e5SAndreas Färber } 7244c3a88a2Sbellard return 0; 7254c3a88a2Sbellard } 7264c3a88a2Sbellard 727a1d1bb31Saliguori /* Remove a specific breakpoint. */ 728b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) 729a1d1bb31Saliguori { 730a1d1bb31Saliguori CPUBreakpoint *bp; 731a1d1bb31Saliguori 732f0c3c505SAndreas Färber QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 733a1d1bb31Saliguori if (bp->pc == pc && bp->flags == flags) { 734b3310ab3SAndreas Färber cpu_breakpoint_remove_by_ref(cpu, bp); 735a1d1bb31Saliguori return 0; 7367d03f82fSedgar_igl } 737a1d1bb31Saliguori } 738a1d1bb31Saliguori return -ENOENT; 7397d03f82fSedgar_igl } 7407d03f82fSedgar_igl 741a1d1bb31Saliguori /* Remove a specific breakpoint by reference. */ 742b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) 7434c3a88a2Sbellard { 744f0c3c505SAndreas Färber QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); 745f0c3c505SAndreas Färber 746f0c3c505SAndreas Färber breakpoint_invalidate(cpu, breakpoint->pc); 747a1d1bb31Saliguori 7487267c094SAnthony Liguori g_free(breakpoint); 749a1d1bb31Saliguori } 750a1d1bb31Saliguori 751a1d1bb31Saliguori /* Remove all matching breakpoints. */ 752b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask) 753a1d1bb31Saliguori { 754c0ce998eSaliguori CPUBreakpoint *bp, *next; 755a1d1bb31Saliguori 756f0c3c505SAndreas Färber QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { 757b3310ab3SAndreas Färber if (bp->flags & mask) { 758b3310ab3SAndreas Färber cpu_breakpoint_remove_by_ref(cpu, bp); 759b3310ab3SAndreas Färber } 760c0ce998eSaliguori } 7614c3a88a2Sbellard } 7624c3a88a2Sbellard 763c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the 764c33a346eSbellard CPU loop after each instruction */ 7653825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled) 766c33a346eSbellard { 767ed2803daSAndreas Färber if (cpu->singlestep_enabled != enabled) { 768ed2803daSAndreas Färber cpu->singlestep_enabled = enabled; 769ed2803daSAndreas Färber if (kvm_enabled()) { 77038e478ecSStefan Weil kvm_update_guest_debug(cpu, 0); 771ed2803daSAndreas Färber } else { 772ccbb4d44SStuart Brady /* must flush all the translated code to avoid inconsistencies */ 7739fa3e853Sbellard /* XXX: only flush what is necessary */ 77438e478ecSStefan Weil CPUArchState *env = cpu->env_ptr; 7750124311eSbellard tb_flush(env); 776c33a346eSbellard } 777e22a25c9Saliguori } 778c33a346eSbellard } 779c33a346eSbellard 780a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...) 7817501267eSbellard { 7827501267eSbellard va_list ap; 783493ae1f0Spbrook va_list ap2; 7847501267eSbellard 7857501267eSbellard va_start(ap, fmt); 786493ae1f0Spbrook va_copy(ap2, ap); 7877501267eSbellard fprintf(stderr, "qemu: fatal: "); 7887501267eSbellard vfprintf(stderr, fmt, ap); 7897501267eSbellard fprintf(stderr, "\n"); 790878096eeSAndreas Färber cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); 79193fcfe39Saliguori if (qemu_log_enabled()) { 79293fcfe39Saliguori qemu_log("qemu: fatal: "); 79393fcfe39Saliguori qemu_log_vprintf(fmt, ap2); 79493fcfe39Saliguori qemu_log("\n"); 795a0762859SAndreas Färber log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); 79631b1a7b4Saliguori qemu_log_flush(); 79793fcfe39Saliguori qemu_log_close(); 798924edcaeSbalrog } 799493ae1f0Spbrook va_end(ap2); 800f9373291Sj_mayer va_end(ap); 801fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY) 802fd052bf6SRiku Voipio { 803fd052bf6SRiku Voipio struct sigaction act; 804fd052bf6SRiku Voipio sigfillset(&act.sa_mask); 805fd052bf6SRiku Voipio act.sa_handler = SIG_DFL; 806fd052bf6SRiku Voipio sigaction(SIGABRT, &act, NULL); 807fd052bf6SRiku Voipio } 808fd052bf6SRiku Voipio #endif 8097501267eSbellard abort(); 8107501267eSbellard } 8117501267eSbellard 8120124311eSbellard #if !defined(CONFIG_USER_ONLY) 8130dc3f44aSMike Day /* Called from RCU critical section */ 814041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr) 815041603feSPaolo Bonzini { 816041603feSPaolo Bonzini RAMBlock *block; 817041603feSPaolo Bonzini 81843771539SPaolo Bonzini block = atomic_rcu_read(&ram_list.mru_block); 8199b8424d5SMichael S. Tsirkin if (block && addr - block->offset < block->max_length) { 820041603feSPaolo Bonzini goto found; 821041603feSPaolo Bonzini } 8220dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 8239b8424d5SMichael S. Tsirkin if (addr - block->offset < block->max_length) { 824041603feSPaolo Bonzini goto found; 825041603feSPaolo Bonzini } 826041603feSPaolo Bonzini } 827041603feSPaolo Bonzini 828041603feSPaolo Bonzini fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 829041603feSPaolo Bonzini abort(); 830041603feSPaolo Bonzini 831041603feSPaolo Bonzini found: 83243771539SPaolo Bonzini /* It is safe to write mru_block outside the iothread lock. This 83343771539SPaolo Bonzini * is what happens: 83443771539SPaolo Bonzini * 83543771539SPaolo Bonzini * mru_block = xxx 83643771539SPaolo Bonzini * rcu_read_unlock() 83743771539SPaolo Bonzini * xxx removed from list 83843771539SPaolo Bonzini * rcu_read_lock() 83943771539SPaolo Bonzini * read mru_block 84043771539SPaolo Bonzini * mru_block = NULL; 84143771539SPaolo Bonzini * call_rcu(reclaim_ramblock, xxx); 84243771539SPaolo Bonzini * rcu_read_unlock() 84343771539SPaolo Bonzini * 84443771539SPaolo Bonzini * atomic_rcu_set is not needed here. The block was already published 84543771539SPaolo Bonzini * when it was placed into the list. Here we're just making an extra 84643771539SPaolo Bonzini * copy of the pointer. 84743771539SPaolo Bonzini */ 848041603feSPaolo Bonzini ram_list.mru_block = block; 849041603feSPaolo Bonzini return block; 850041603feSPaolo Bonzini } 851041603feSPaolo Bonzini 852a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) 8531ccde1cbSbellard { 854041603feSPaolo Bonzini ram_addr_t start1; 855a2f4d5beSJuan Quintela RAMBlock *block; 856a2f4d5beSJuan Quintela ram_addr_t end; 857a2f4d5beSJuan Quintela 858a2f4d5beSJuan Quintela end = TARGET_PAGE_ALIGN(start + length); 859a2f4d5beSJuan Quintela start &= TARGET_PAGE_MASK; 860f23db169Sbellard 8610dc3f44aSMike Day rcu_read_lock(); 862041603feSPaolo Bonzini block = qemu_get_ram_block(start); 863041603feSPaolo Bonzini assert(block == qemu_get_ram_block(end - 1)); 8641240be24SMichael S. Tsirkin start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); 865e5548617SBlue Swirl cpu_tlb_reset_dirty_all(start1, length); 8660dc3f44aSMike Day rcu_read_unlock(); 867d24981d3SJuan Quintela } 868d24981d3SJuan Quintela 869d24981d3SJuan Quintela /* Note: start and end must be within the same ram block. */ 870a2f4d5beSJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length, 87152159192SJuan Quintela unsigned client) 872d24981d3SJuan Quintela { 873d24981d3SJuan Quintela if (length == 0) 874d24981d3SJuan Quintela return; 875c8d6f66aSMichael S. Tsirkin cpu_physical_memory_clear_dirty_range_type(start, length, client); 876d24981d3SJuan Quintela 877d24981d3SJuan Quintela if (tcg_enabled()) { 878a2f4d5beSJuan Quintela tlb_reset_dirty_range_all(start, length); 879d24981d3SJuan Quintela } 8801ccde1cbSbellard } 8811ccde1cbSbellard 882981fdf23SJuan Quintela static void cpu_physical_memory_set_dirty_tracking(bool enable) 88374576198Saliguori { 88474576198Saliguori in_migration = enable; 88574576198Saliguori } 88674576198Saliguori 88779e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 888bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu, 889e5548617SBlue Swirl MemoryRegionSection *section, 890e5548617SBlue Swirl target_ulong vaddr, 891149f54b5SPaolo Bonzini hwaddr paddr, hwaddr xlat, 892e5548617SBlue Swirl int prot, 893e5548617SBlue Swirl target_ulong *address) 894e5548617SBlue Swirl { 895a8170e5eSAvi Kivity hwaddr iotlb; 896e5548617SBlue Swirl CPUWatchpoint *wp; 897e5548617SBlue Swirl 898cc5bea60SBlue Swirl if (memory_region_is_ram(section->mr)) { 899e5548617SBlue Swirl /* Normal RAM. */ 900e5548617SBlue Swirl iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) 901149f54b5SPaolo Bonzini + xlat; 902e5548617SBlue Swirl if (!section->readonly) { 903b41aac4fSLiu Ping Fan iotlb |= PHYS_SECTION_NOTDIRTY; 904e5548617SBlue Swirl } else { 905b41aac4fSLiu Ping Fan iotlb |= PHYS_SECTION_ROM; 906e5548617SBlue Swirl } 907e5548617SBlue Swirl } else { 9081b3fb98fSEdgar E. Iglesias iotlb = section - section->address_space->dispatch->map.sections; 909149f54b5SPaolo Bonzini iotlb += xlat; 910e5548617SBlue Swirl } 911e5548617SBlue Swirl 912e5548617SBlue Swirl /* Make accesses to pages with watchpoints go via the 913e5548617SBlue Swirl watchpoint trap routines. */ 914ff4700b0SAndreas Färber QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 91505068c0dSPeter Maydell if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) { 916e5548617SBlue Swirl /* Avoid trapping reads of pages with a write breakpoint. */ 917e5548617SBlue Swirl if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { 918b41aac4fSLiu Ping Fan iotlb = PHYS_SECTION_WATCH + paddr; 919e5548617SBlue Swirl *address |= TLB_MMIO; 920e5548617SBlue Swirl break; 921e5548617SBlue Swirl } 922e5548617SBlue Swirl } 923e5548617SBlue Swirl } 924e5548617SBlue Swirl 925e5548617SBlue Swirl return iotlb; 926e5548617SBlue Swirl } 9279fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */ 92833417e70Sbellard 929e2eef170Spbrook #if !defined(CONFIG_USER_ONLY) 9308da3ff18Spbrook 931c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 9325312bd8bSAvi Kivity uint16_t section); 933acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base); 93454688b1eSAvi Kivity 935a2b257d6SIgor Mammedov static void *(*phys_mem_alloc)(size_t size, uint64_t *align) = 936a2b257d6SIgor Mammedov qemu_anon_ram_alloc; 93791138037SMarkus Armbruster 93891138037SMarkus Armbruster /* 93991138037SMarkus Armbruster * Set a custom physical guest memory alloator. 94091138037SMarkus Armbruster * Accelerators with unusual needs may need this. Hopefully, we can 94191138037SMarkus Armbruster * get rid of it eventually. 94291138037SMarkus Armbruster */ 943a2b257d6SIgor Mammedov void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)) 94491138037SMarkus Armbruster { 94591138037SMarkus Armbruster phys_mem_alloc = alloc; 94691138037SMarkus Armbruster } 94791138037SMarkus Armbruster 94853cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map, 94953cb28cbSMarcel Apfelbaum MemoryRegionSection *section) 9505312bd8bSAvi Kivity { 95168f3f65bSPaolo Bonzini /* The physical section number is ORed with a page-aligned 95268f3f65bSPaolo Bonzini * pointer to produce the iotlb entries. Thus it should 95368f3f65bSPaolo Bonzini * never overflow into the page-aligned value. 95468f3f65bSPaolo Bonzini */ 95553cb28cbSMarcel Apfelbaum assert(map->sections_nb < TARGET_PAGE_SIZE); 95668f3f65bSPaolo Bonzini 95753cb28cbSMarcel Apfelbaum if (map->sections_nb == map->sections_nb_alloc) { 95853cb28cbSMarcel Apfelbaum map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); 95953cb28cbSMarcel Apfelbaum map->sections = g_renew(MemoryRegionSection, map->sections, 96053cb28cbSMarcel Apfelbaum map->sections_nb_alloc); 9615312bd8bSAvi Kivity } 96253cb28cbSMarcel Apfelbaum map->sections[map->sections_nb] = *section; 963dfde4e6eSPaolo Bonzini memory_region_ref(section->mr); 96453cb28cbSMarcel Apfelbaum return map->sections_nb++; 9655312bd8bSAvi Kivity } 9665312bd8bSAvi Kivity 967058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr) 968058bc4b5SPaolo Bonzini { 969dfde4e6eSPaolo Bonzini memory_region_unref(mr); 970dfde4e6eSPaolo Bonzini 971058bc4b5SPaolo Bonzini if (mr->subpage) { 972058bc4b5SPaolo Bonzini subpage_t *subpage = container_of(mr, subpage_t, iomem); 973b4fefef9SPeter Crosthwaite object_unref(OBJECT(&subpage->iomem)); 974058bc4b5SPaolo Bonzini g_free(subpage); 975058bc4b5SPaolo Bonzini } 976058bc4b5SPaolo Bonzini } 977058bc4b5SPaolo Bonzini 9786092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map) 9795312bd8bSAvi Kivity { 9809affd6fcSPaolo Bonzini while (map->sections_nb > 0) { 9819affd6fcSPaolo Bonzini MemoryRegionSection *section = &map->sections[--map->sections_nb]; 982058bc4b5SPaolo Bonzini phys_section_destroy(section->mr); 983058bc4b5SPaolo Bonzini } 9849affd6fcSPaolo Bonzini g_free(map->sections); 9859affd6fcSPaolo Bonzini g_free(map->nodes); 9865312bd8bSAvi Kivity } 9875312bd8bSAvi Kivity 988ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) 9890f0cb164SAvi Kivity { 9900f0cb164SAvi Kivity subpage_t *subpage; 991a8170e5eSAvi Kivity hwaddr base = section->offset_within_address_space 9920f0cb164SAvi Kivity & TARGET_PAGE_MASK; 99397115a8dSMichael S. Tsirkin MemoryRegionSection *existing = phys_page_find(d->phys_map, base, 99453cb28cbSMarcel Apfelbaum d->map.nodes, d->map.sections); 9950f0cb164SAvi Kivity MemoryRegionSection subsection = { 9960f0cb164SAvi Kivity .offset_within_address_space = base, 997052e87b0SPaolo Bonzini .size = int128_make64(TARGET_PAGE_SIZE), 9980f0cb164SAvi Kivity }; 999a8170e5eSAvi Kivity hwaddr start, end; 10000f0cb164SAvi Kivity 1001f3705d53SAvi Kivity assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); 10020f0cb164SAvi Kivity 1003f3705d53SAvi Kivity if (!(existing->mr->subpage)) { 1004acc9d80bSJan Kiszka subpage = subpage_init(d->as, base); 10053be91e86SEdgar E. Iglesias subsection.address_space = d->as; 10060f0cb164SAvi Kivity subsection.mr = &subpage->iomem; 1007ac1970fbSAvi Kivity phys_page_set(d, base >> TARGET_PAGE_BITS, 1, 100853cb28cbSMarcel Apfelbaum phys_section_add(&d->map, &subsection)); 10090f0cb164SAvi Kivity } else { 1010f3705d53SAvi Kivity subpage = container_of(existing->mr, subpage_t, iomem); 10110f0cb164SAvi Kivity } 10120f0cb164SAvi Kivity start = section->offset_within_address_space & ~TARGET_PAGE_MASK; 1013052e87b0SPaolo Bonzini end = start + int128_get64(section->size) - 1; 101453cb28cbSMarcel Apfelbaum subpage_register(subpage, start, end, 101553cb28cbSMarcel Apfelbaum phys_section_add(&d->map, section)); 10160f0cb164SAvi Kivity } 10170f0cb164SAvi Kivity 10180f0cb164SAvi Kivity 1019052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d, 1020052e87b0SPaolo Bonzini MemoryRegionSection *section) 102133417e70Sbellard { 1022a8170e5eSAvi Kivity hwaddr start_addr = section->offset_within_address_space; 102353cb28cbSMarcel Apfelbaum uint16_t section_index = phys_section_add(&d->map, section); 1024052e87b0SPaolo Bonzini uint64_t num_pages = int128_get64(int128_rshift(section->size, 1025052e87b0SPaolo Bonzini TARGET_PAGE_BITS)); 1026dd81124bSAvi Kivity 1027733d5ef5SPaolo Bonzini assert(num_pages); 1028733d5ef5SPaolo Bonzini phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); 102933417e70Sbellard } 103033417e70Sbellard 1031ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section) 10320f0cb164SAvi Kivity { 103389ae337aSPaolo Bonzini AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); 103400752703SPaolo Bonzini AddressSpaceDispatch *d = as->next_dispatch; 103599b9cc06SPaolo Bonzini MemoryRegionSection now = *section, remain = *section; 1036052e87b0SPaolo Bonzini Int128 page_size = int128_make64(TARGET_PAGE_SIZE); 10370f0cb164SAvi Kivity 1038733d5ef5SPaolo Bonzini if (now.offset_within_address_space & ~TARGET_PAGE_MASK) { 1039733d5ef5SPaolo Bonzini uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) 1040733d5ef5SPaolo Bonzini - now.offset_within_address_space; 1041733d5ef5SPaolo Bonzini 1042052e87b0SPaolo Bonzini now.size = int128_min(int128_make64(left), now.size); 1043ac1970fbSAvi Kivity register_subpage(d, &now); 1044733d5ef5SPaolo Bonzini } else { 1045052e87b0SPaolo Bonzini now.size = int128_zero(); 1046733d5ef5SPaolo Bonzini } 1047052e87b0SPaolo Bonzini while (int128_ne(remain.size, now.size)) { 1048052e87b0SPaolo Bonzini remain.size = int128_sub(remain.size, now.size); 1049052e87b0SPaolo Bonzini remain.offset_within_address_space += int128_get64(now.size); 1050052e87b0SPaolo Bonzini remain.offset_within_region += int128_get64(now.size); 10510f0cb164SAvi Kivity now = remain; 1052052e87b0SPaolo Bonzini if (int128_lt(remain.size, page_size)) { 1053733d5ef5SPaolo Bonzini register_subpage(d, &now); 105488266249SHu Tao } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { 1055052e87b0SPaolo Bonzini now.size = page_size; 1056ac1970fbSAvi Kivity register_subpage(d, &now); 105769b67646STyler Hall } else { 1058052e87b0SPaolo Bonzini now.size = int128_and(now.size, int128_neg(page_size)); 1059ac1970fbSAvi Kivity register_multipage(d, &now); 106069b67646STyler Hall } 10610f0cb164SAvi Kivity } 10620f0cb164SAvi Kivity } 10630f0cb164SAvi Kivity 106462a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void) 106562a2744cSSheng Yang { 106662a2744cSSheng Yang if (kvm_enabled()) 106762a2744cSSheng Yang kvm_flush_coalesced_mmio_buffer(); 106862a2744cSSheng Yang } 106962a2744cSSheng Yang 1070b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void) 1071b2a8658eSUmesh Deshpande { 1072b2a8658eSUmesh Deshpande qemu_mutex_lock(&ram_list.mutex); 1073b2a8658eSUmesh Deshpande } 1074b2a8658eSUmesh Deshpande 1075b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void) 1076b2a8658eSUmesh Deshpande { 1077b2a8658eSUmesh Deshpande qemu_mutex_unlock(&ram_list.mutex); 1078b2a8658eSUmesh Deshpande } 1079b2a8658eSUmesh Deshpande 1080e1e84ba0SMarkus Armbruster #ifdef __linux__ 1081c902760fSMarcelo Tosatti 1082c902760fSMarcelo Tosatti #include <sys/vfs.h> 1083c902760fSMarcelo Tosatti 1084c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC 0x958458f6 1085c902760fSMarcelo Tosatti 1086fc7a5800SHu Tao static long gethugepagesize(const char *path, Error **errp) 1087c902760fSMarcelo Tosatti { 1088c902760fSMarcelo Tosatti struct statfs fs; 1089c902760fSMarcelo Tosatti int ret; 1090c902760fSMarcelo Tosatti 1091c902760fSMarcelo Tosatti do { 1092c902760fSMarcelo Tosatti ret = statfs(path, &fs); 1093c902760fSMarcelo Tosatti } while (ret != 0 && errno == EINTR); 1094c902760fSMarcelo Tosatti 1095c902760fSMarcelo Tosatti if (ret != 0) { 1096fc7a5800SHu Tao error_setg_errno(errp, errno, "failed to get page size of file %s", 1097fc7a5800SHu Tao path); 1098c902760fSMarcelo Tosatti return 0; 1099c902760fSMarcelo Tosatti } 1100c902760fSMarcelo Tosatti 1101c902760fSMarcelo Tosatti if (fs.f_type != HUGETLBFS_MAGIC) 1102c902760fSMarcelo Tosatti fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); 1103c902760fSMarcelo Tosatti 1104c902760fSMarcelo Tosatti return fs.f_bsize; 1105c902760fSMarcelo Tosatti } 1106c902760fSMarcelo Tosatti 110704b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block, 110804b16653SAlex Williamson ram_addr_t memory, 11097f56e740SPaolo Bonzini const char *path, 11107f56e740SPaolo Bonzini Error **errp) 1111c902760fSMarcelo Tosatti { 1112c902760fSMarcelo Tosatti char *filename; 11138ca761f6SPeter Feiner char *sanitized_name; 11148ca761f6SPeter Feiner char *c; 1115557529ddSHu Tao void *area = NULL; 1116c902760fSMarcelo Tosatti int fd; 1117557529ddSHu Tao uint64_t hpagesize; 1118fc7a5800SHu Tao Error *local_err = NULL; 1119c902760fSMarcelo Tosatti 1120fc7a5800SHu Tao hpagesize = gethugepagesize(path, &local_err); 1121fc7a5800SHu Tao if (local_err) { 1122fc7a5800SHu Tao error_propagate(errp, local_err); 1123f9a49dfaSMarcelo Tosatti goto error; 1124c902760fSMarcelo Tosatti } 1125a2b257d6SIgor Mammedov block->mr->align = hpagesize; 1126c902760fSMarcelo Tosatti 1127c902760fSMarcelo Tosatti if (memory < hpagesize) { 1128557529ddSHu Tao error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " 1129557529ddSHu Tao "or larger than huge page size 0x%" PRIx64, 1130557529ddSHu Tao memory, hpagesize); 1131557529ddSHu Tao goto error; 1132c902760fSMarcelo Tosatti } 1133c902760fSMarcelo Tosatti 1134c902760fSMarcelo Tosatti if (kvm_enabled() && !kvm_has_sync_mmu()) { 11357f56e740SPaolo Bonzini error_setg(errp, 11367f56e740SPaolo Bonzini "host lacks kvm mmu notifiers, -mem-path unsupported"); 1137f9a49dfaSMarcelo Tosatti goto error; 1138c902760fSMarcelo Tosatti } 1139c902760fSMarcelo Tosatti 11408ca761f6SPeter Feiner /* Make name safe to use with mkstemp by replacing '/' with '_'. */ 114183234bf2SPeter Crosthwaite sanitized_name = g_strdup(memory_region_name(block->mr)); 11428ca761f6SPeter Feiner for (c = sanitized_name; *c != '\0'; c++) { 11438ca761f6SPeter Feiner if (*c == '/') 11448ca761f6SPeter Feiner *c = '_'; 11458ca761f6SPeter Feiner } 11468ca761f6SPeter Feiner 11478ca761f6SPeter Feiner filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, 11488ca761f6SPeter Feiner sanitized_name); 11498ca761f6SPeter Feiner g_free(sanitized_name); 1150c902760fSMarcelo Tosatti 1151c902760fSMarcelo Tosatti fd = mkstemp(filename); 1152c902760fSMarcelo Tosatti if (fd < 0) { 11537f56e740SPaolo Bonzini error_setg_errno(errp, errno, 11547f56e740SPaolo Bonzini "unable to create backing store for hugepages"); 1155e4ada482SStefan Weil g_free(filename); 1156f9a49dfaSMarcelo Tosatti goto error; 1157c902760fSMarcelo Tosatti } 1158c902760fSMarcelo Tosatti unlink(filename); 1159e4ada482SStefan Weil g_free(filename); 1160c902760fSMarcelo Tosatti 1161c902760fSMarcelo Tosatti memory = (memory+hpagesize-1) & ~(hpagesize-1); 1162c902760fSMarcelo Tosatti 1163c902760fSMarcelo Tosatti /* 1164c902760fSMarcelo Tosatti * ftruncate is not supported by hugetlbfs in older 1165c902760fSMarcelo Tosatti * hosts, so don't bother bailing out on errors. 1166c902760fSMarcelo Tosatti * If anything goes wrong with it under other filesystems, 1167c902760fSMarcelo Tosatti * mmap will fail. 1168c902760fSMarcelo Tosatti */ 11697f56e740SPaolo Bonzini if (ftruncate(fd, memory)) { 1170c902760fSMarcelo Tosatti perror("ftruncate"); 11717f56e740SPaolo Bonzini } 1172c902760fSMarcelo Tosatti 1173dbcb8981SPaolo Bonzini area = mmap(0, memory, PROT_READ | PROT_WRITE, 1174dbcb8981SPaolo Bonzini (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE), 1175dbcb8981SPaolo Bonzini fd, 0); 1176c902760fSMarcelo Tosatti if (area == MAP_FAILED) { 11777f56e740SPaolo Bonzini error_setg_errno(errp, errno, 11787f56e740SPaolo Bonzini "unable to map backing store for hugepages"); 1179c902760fSMarcelo Tosatti close(fd); 1180f9a49dfaSMarcelo Tosatti goto error; 1181c902760fSMarcelo Tosatti } 1182ef36fa14SMarcelo Tosatti 1183ef36fa14SMarcelo Tosatti if (mem_prealloc) { 118438183310SPaolo Bonzini os_mem_prealloc(fd, area, memory); 1185ef36fa14SMarcelo Tosatti } 1186ef36fa14SMarcelo Tosatti 118704b16653SAlex Williamson block->fd = fd; 1188c902760fSMarcelo Tosatti return area; 1189f9a49dfaSMarcelo Tosatti 1190f9a49dfaSMarcelo Tosatti error: 1191f9a49dfaSMarcelo Tosatti if (mem_prealloc) { 1192e4d9df4fSLuiz Capitulino error_report("%s\n", error_get_pretty(*errp)); 1193f9a49dfaSMarcelo Tosatti exit(1); 1194f9a49dfaSMarcelo Tosatti } 1195f9a49dfaSMarcelo Tosatti return NULL; 1196c902760fSMarcelo Tosatti } 1197c902760fSMarcelo Tosatti #endif 1198c902760fSMarcelo Tosatti 11990dc3f44aSMike Day /* Called with the ramlist lock held. */ 1200d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size) 1201d17b5288SAlex Williamson { 120204b16653SAlex Williamson RAMBlock *block, *next_block; 12033e837b2cSAlex Williamson ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; 120404b16653SAlex Williamson 120549cd9ac6SStefan Hajnoczi assert(size != 0); /* it would hand out same offset multiple times */ 120649cd9ac6SStefan Hajnoczi 12070dc3f44aSMike Day if (QLIST_EMPTY_RCU(&ram_list.blocks)) { 120804b16653SAlex Williamson return 0; 12090d53d9feSMike Day } 121004b16653SAlex Williamson 12110dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1212f15fbc4bSAnthony PERARD ram_addr_t end, next = RAM_ADDR_MAX; 121304b16653SAlex Williamson 121462be4e3aSMichael S. Tsirkin end = block->offset + block->max_length; 121504b16653SAlex Williamson 12160dc3f44aSMike Day QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) { 121704b16653SAlex Williamson if (next_block->offset >= end) { 121804b16653SAlex Williamson next = MIN(next, next_block->offset); 121904b16653SAlex Williamson } 122004b16653SAlex Williamson } 122104b16653SAlex Williamson if (next - end >= size && next - end < mingap) { 122204b16653SAlex Williamson offset = end; 122304b16653SAlex Williamson mingap = next - end; 122404b16653SAlex Williamson } 122504b16653SAlex Williamson } 12263e837b2cSAlex Williamson 12273e837b2cSAlex Williamson if (offset == RAM_ADDR_MAX) { 12283e837b2cSAlex Williamson fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", 12293e837b2cSAlex Williamson (uint64_t)size); 12303e837b2cSAlex Williamson abort(); 12313e837b2cSAlex Williamson } 12323e837b2cSAlex Williamson 123304b16653SAlex Williamson return offset; 123404b16653SAlex Williamson } 123504b16653SAlex Williamson 1236652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void) 123704b16653SAlex Williamson { 1238d17b5288SAlex Williamson RAMBlock *block; 1239d17b5288SAlex Williamson ram_addr_t last = 0; 1240d17b5288SAlex Williamson 12410dc3f44aSMike Day rcu_read_lock(); 12420dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 124362be4e3aSMichael S. Tsirkin last = MAX(last, block->offset + block->max_length); 12440d53d9feSMike Day } 12450dc3f44aSMike Day rcu_read_unlock(); 1246d17b5288SAlex Williamson return last; 1247d17b5288SAlex Williamson } 1248d17b5288SAlex Williamson 1249ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size) 1250ddb97f1dSJason Baron { 1251ddb97f1dSJason Baron int ret; 1252ddb97f1dSJason Baron 1253ddb97f1dSJason Baron /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ 12542ff3de68SMarkus Armbruster if (!qemu_opt_get_bool(qemu_get_machine_opts(), 12552ff3de68SMarkus Armbruster "dump-guest-core", true)) { 1256ddb97f1dSJason Baron ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); 1257ddb97f1dSJason Baron if (ret) { 1258ddb97f1dSJason Baron perror("qemu_madvise"); 1259ddb97f1dSJason Baron fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " 1260ddb97f1dSJason Baron "but dump_guest_core=off specified\n"); 1261ddb97f1dSJason Baron } 1262ddb97f1dSJason Baron } 1263ddb97f1dSJason Baron } 1264ddb97f1dSJason Baron 12650dc3f44aSMike Day /* Called within an RCU critical section, or while the ramlist lock 12660dc3f44aSMike Day * is held. 12670dc3f44aSMike Day */ 126820cfe881SHu Tao static RAMBlock *find_ram_block(ram_addr_t addr) 126984b89d78SCam Macdonell { 127020cfe881SHu Tao RAMBlock *block; 127184b89d78SCam Macdonell 12720dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1273c5705a77SAvi Kivity if (block->offset == addr) { 127420cfe881SHu Tao return block; 1275c5705a77SAvi Kivity } 1276c5705a77SAvi Kivity } 127720cfe881SHu Tao 127820cfe881SHu Tao return NULL; 127920cfe881SHu Tao } 128020cfe881SHu Tao 1281ae3a7047SMike Day /* Called with iothread lock held. */ 128220cfe881SHu Tao void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) 128320cfe881SHu Tao { 1284ae3a7047SMike Day RAMBlock *new_block, *block; 128520cfe881SHu Tao 12860dc3f44aSMike Day rcu_read_lock(); 1287ae3a7047SMike Day new_block = find_ram_block(addr); 1288c5705a77SAvi Kivity assert(new_block); 1289c5705a77SAvi Kivity assert(!new_block->idstr[0]); 129084b89d78SCam Macdonell 129109e5ab63SAnthony Liguori if (dev) { 129209e5ab63SAnthony Liguori char *id = qdev_get_dev_path(dev); 129384b89d78SCam Macdonell if (id) { 129484b89d78SCam Macdonell snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 12957267c094SAnthony Liguori g_free(id); 129684b89d78SCam Macdonell } 129784b89d78SCam Macdonell } 129884b89d78SCam Macdonell pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 129984b89d78SCam Macdonell 13000dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1301c5705a77SAvi Kivity if (block != new_block && !strcmp(block->idstr, new_block->idstr)) { 130284b89d78SCam Macdonell fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 130384b89d78SCam Macdonell new_block->idstr); 130484b89d78SCam Macdonell abort(); 130584b89d78SCam Macdonell } 130684b89d78SCam Macdonell } 13070dc3f44aSMike Day rcu_read_unlock(); 1308c5705a77SAvi Kivity } 1309c5705a77SAvi Kivity 1310ae3a7047SMike Day /* Called with iothread lock held. */ 131120cfe881SHu Tao void qemu_ram_unset_idstr(ram_addr_t addr) 131220cfe881SHu Tao { 1313ae3a7047SMike Day RAMBlock *block; 131420cfe881SHu Tao 1315ae3a7047SMike Day /* FIXME: arch_init.c assumes that this is not called throughout 1316ae3a7047SMike Day * migration. Ignore the problem since hot-unplug during migration 1317ae3a7047SMike Day * does not work anyway. 1318ae3a7047SMike Day */ 1319ae3a7047SMike Day 13200dc3f44aSMike Day rcu_read_lock(); 1321ae3a7047SMike Day block = find_ram_block(addr); 132220cfe881SHu Tao if (block) { 132320cfe881SHu Tao memset(block->idstr, 0, sizeof(block->idstr)); 132420cfe881SHu Tao } 13250dc3f44aSMike Day rcu_read_unlock(); 132620cfe881SHu Tao } 132720cfe881SHu Tao 13288490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len) 13298490fc78SLuiz Capitulino { 13302ff3de68SMarkus Armbruster if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) { 13318490fc78SLuiz Capitulino /* disabled by the user */ 13328490fc78SLuiz Capitulino return 0; 13338490fc78SLuiz Capitulino } 13348490fc78SLuiz Capitulino 13358490fc78SLuiz Capitulino return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); 13368490fc78SLuiz Capitulino } 13378490fc78SLuiz Capitulino 133862be4e3aSMichael S. Tsirkin /* Only legal before guest might have detected the memory size: e.g. on 133962be4e3aSMichael S. Tsirkin * incoming migration, or right after reset. 134062be4e3aSMichael S. Tsirkin * 134162be4e3aSMichael S. Tsirkin * As memory core doesn't know how is memory accessed, it is up to 134262be4e3aSMichael S. Tsirkin * resize callback to update device state and/or add assertions to detect 134362be4e3aSMichael S. Tsirkin * misuse, if necessary. 134462be4e3aSMichael S. Tsirkin */ 134562be4e3aSMichael S. Tsirkin int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp) 134662be4e3aSMichael S. Tsirkin { 134762be4e3aSMichael S. Tsirkin RAMBlock *block = find_ram_block(base); 134862be4e3aSMichael S. Tsirkin 134962be4e3aSMichael S. Tsirkin assert(block); 135062be4e3aSMichael S. Tsirkin 1351129ddaf3SMichael S. Tsirkin newsize = TARGET_PAGE_ALIGN(newsize); 1352129ddaf3SMichael S. Tsirkin 135362be4e3aSMichael S. Tsirkin if (block->used_length == newsize) { 135462be4e3aSMichael S. Tsirkin return 0; 135562be4e3aSMichael S. Tsirkin } 135662be4e3aSMichael S. Tsirkin 135762be4e3aSMichael S. Tsirkin if (!(block->flags & RAM_RESIZEABLE)) { 135862be4e3aSMichael S. Tsirkin error_setg_errno(errp, EINVAL, 135962be4e3aSMichael S. Tsirkin "Length mismatch: %s: 0x" RAM_ADDR_FMT 136062be4e3aSMichael S. Tsirkin " in != 0x" RAM_ADDR_FMT, block->idstr, 136162be4e3aSMichael S. Tsirkin newsize, block->used_length); 136262be4e3aSMichael S. Tsirkin return -EINVAL; 136362be4e3aSMichael S. Tsirkin } 136462be4e3aSMichael S. Tsirkin 136562be4e3aSMichael S. Tsirkin if (block->max_length < newsize) { 136662be4e3aSMichael S. Tsirkin error_setg_errno(errp, EINVAL, 136762be4e3aSMichael S. Tsirkin "Length too large: %s: 0x" RAM_ADDR_FMT 136862be4e3aSMichael S. Tsirkin " > 0x" RAM_ADDR_FMT, block->idstr, 136962be4e3aSMichael S. Tsirkin newsize, block->max_length); 137062be4e3aSMichael S. Tsirkin return -EINVAL; 137162be4e3aSMichael S. Tsirkin } 137262be4e3aSMichael S. Tsirkin 137362be4e3aSMichael S. Tsirkin cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); 137462be4e3aSMichael S. Tsirkin block->used_length = newsize; 137562be4e3aSMichael S. Tsirkin cpu_physical_memory_set_dirty_range(block->offset, block->used_length); 137662be4e3aSMichael S. Tsirkin memory_region_set_size(block->mr, newsize); 137762be4e3aSMichael S. Tsirkin if (block->resized) { 137862be4e3aSMichael S. Tsirkin block->resized(block->idstr, newsize, block->host); 137962be4e3aSMichael S. Tsirkin } 138062be4e3aSMichael S. Tsirkin return 0; 138162be4e3aSMichael S. Tsirkin } 138262be4e3aSMichael S. Tsirkin 1383ef701d7bSHu Tao static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp) 1384c5705a77SAvi Kivity { 1385e1c57ab8SPaolo Bonzini RAMBlock *block; 13860d53d9feSMike Day RAMBlock *last_block = NULL; 13872152f5caSJuan Quintela ram_addr_t old_ram_size, new_ram_size; 13882152f5caSJuan Quintela 13892152f5caSJuan Quintela old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS; 1390c5705a77SAvi Kivity 1391b2a8658eSUmesh Deshpande qemu_mutex_lock_ramlist(); 13929b8424d5SMichael S. Tsirkin new_block->offset = find_ram_offset(new_block->max_length); 1393e1c57ab8SPaolo Bonzini 13940628c182SMarkus Armbruster if (!new_block->host) { 1395e1c57ab8SPaolo Bonzini if (xen_enabled()) { 13969b8424d5SMichael S. Tsirkin xen_ram_alloc(new_block->offset, new_block->max_length, 13979b8424d5SMichael S. Tsirkin new_block->mr); 1398e1c57ab8SPaolo Bonzini } else { 13999b8424d5SMichael S. Tsirkin new_block->host = phys_mem_alloc(new_block->max_length, 1400a2b257d6SIgor Mammedov &new_block->mr->align); 140139228250SMarkus Armbruster if (!new_block->host) { 1402ef701d7bSHu Tao error_setg_errno(errp, errno, 1403ef701d7bSHu Tao "cannot set up guest memory '%s'", 1404ef701d7bSHu Tao memory_region_name(new_block->mr)); 1405ef701d7bSHu Tao qemu_mutex_unlock_ramlist(); 1406ef701d7bSHu Tao return -1; 140739228250SMarkus Armbruster } 14089b8424d5SMichael S. Tsirkin memory_try_enable_merging(new_block->host, new_block->max_length); 1409c902760fSMarcelo Tosatti } 14106977dfe6SYoshiaki Tamura } 141194a6b54fSpbrook 14120d53d9feSMike Day /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, 14130d53d9feSMike Day * QLIST (which has an RCU-friendly variant) does not have insertion at 14140d53d9feSMike Day * tail, so save the last element in last_block. 14150d53d9feSMike Day */ 14160dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 14170d53d9feSMike Day last_block = block; 14189b8424d5SMichael S. Tsirkin if (block->max_length < new_block->max_length) { 1419abb26d63SPaolo Bonzini break; 1420abb26d63SPaolo Bonzini } 1421abb26d63SPaolo Bonzini } 1422abb26d63SPaolo Bonzini if (block) { 14230dc3f44aSMike Day QLIST_INSERT_BEFORE_RCU(block, new_block, next); 14240d53d9feSMike Day } else if (last_block) { 14250dc3f44aSMike Day QLIST_INSERT_AFTER_RCU(last_block, new_block, next); 14260d53d9feSMike Day } else { /* list is empty */ 14270dc3f44aSMike Day QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); 1428abb26d63SPaolo Bonzini } 14290d6d3c87SPaolo Bonzini ram_list.mru_block = NULL; 143094a6b54fSpbrook 14310dc3f44aSMike Day /* Write list before version */ 14320dc3f44aSMike Day smp_wmb(); 1433f798b07fSUmesh Deshpande ram_list.version++; 1434b2a8658eSUmesh Deshpande qemu_mutex_unlock_ramlist(); 1435f798b07fSUmesh Deshpande 14362152f5caSJuan Quintela new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS; 14372152f5caSJuan Quintela 14382152f5caSJuan Quintela if (new_ram_size > old_ram_size) { 14391ab4c8ceSJuan Quintela int i; 1440ae3a7047SMike Day 1441ae3a7047SMike Day /* ram_list.dirty_memory[] is protected by the iothread lock. */ 14421ab4c8ceSJuan Quintela for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 14431ab4c8ceSJuan Quintela ram_list.dirty_memory[i] = 14441ab4c8ceSJuan Quintela bitmap_zero_extend(ram_list.dirty_memory[i], 14451ab4c8ceSJuan Quintela old_ram_size, new_ram_size); 14461ab4c8ceSJuan Quintela } 14472152f5caSJuan Quintela } 14489b8424d5SMichael S. Tsirkin cpu_physical_memory_set_dirty_range(new_block->offset, 14499b8424d5SMichael S. Tsirkin new_block->used_length); 145094a6b54fSpbrook 1451a904c911SPaolo Bonzini if (new_block->host) { 14529b8424d5SMichael S. Tsirkin qemu_ram_setup_dump(new_block->host, new_block->max_length); 14539b8424d5SMichael S. Tsirkin qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); 14549b8424d5SMichael S. Tsirkin qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK); 1455e1c57ab8SPaolo Bonzini if (kvm_enabled()) { 14569b8424d5SMichael S. Tsirkin kvm_setup_guest_memory(new_block->host, new_block->max_length); 1457e1c57ab8SPaolo Bonzini } 1458a904c911SPaolo Bonzini } 14596f0437e8SJan Kiszka 146094a6b54fSpbrook return new_block->offset; 146194a6b54fSpbrook } 1462e9a1ab19Sbellard 14630b183fc8SPaolo Bonzini #ifdef __linux__ 1464e1c57ab8SPaolo Bonzini ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 1465dbcb8981SPaolo Bonzini bool share, const char *mem_path, 14667f56e740SPaolo Bonzini Error **errp) 1467e1c57ab8SPaolo Bonzini { 1468e1c57ab8SPaolo Bonzini RAMBlock *new_block; 1469ef701d7bSHu Tao ram_addr_t addr; 1470ef701d7bSHu Tao Error *local_err = NULL; 1471e1c57ab8SPaolo Bonzini 1472e1c57ab8SPaolo Bonzini if (xen_enabled()) { 14737f56e740SPaolo Bonzini error_setg(errp, "-mem-path not supported with Xen"); 14747f56e740SPaolo Bonzini return -1; 1475e1c57ab8SPaolo Bonzini } 1476e1c57ab8SPaolo Bonzini 1477e1c57ab8SPaolo Bonzini if (phys_mem_alloc != qemu_anon_ram_alloc) { 1478e1c57ab8SPaolo Bonzini /* 1479e1c57ab8SPaolo Bonzini * file_ram_alloc() needs to allocate just like 1480e1c57ab8SPaolo Bonzini * phys_mem_alloc, but we haven't bothered to provide 1481e1c57ab8SPaolo Bonzini * a hook there. 1482e1c57ab8SPaolo Bonzini */ 14837f56e740SPaolo Bonzini error_setg(errp, 14847f56e740SPaolo Bonzini "-mem-path not supported with this accelerator"); 14857f56e740SPaolo Bonzini return -1; 1486e1c57ab8SPaolo Bonzini } 1487e1c57ab8SPaolo Bonzini 1488e1c57ab8SPaolo Bonzini size = TARGET_PAGE_ALIGN(size); 1489e1c57ab8SPaolo Bonzini new_block = g_malloc0(sizeof(*new_block)); 1490e1c57ab8SPaolo Bonzini new_block->mr = mr; 14919b8424d5SMichael S. Tsirkin new_block->used_length = size; 14929b8424d5SMichael S. Tsirkin new_block->max_length = size; 1493dbcb8981SPaolo Bonzini new_block->flags = share ? RAM_SHARED : 0; 14947f56e740SPaolo Bonzini new_block->host = file_ram_alloc(new_block, size, 14957f56e740SPaolo Bonzini mem_path, errp); 14967f56e740SPaolo Bonzini if (!new_block->host) { 14977f56e740SPaolo Bonzini g_free(new_block); 14987f56e740SPaolo Bonzini return -1; 14997f56e740SPaolo Bonzini } 15007f56e740SPaolo Bonzini 1501ef701d7bSHu Tao addr = ram_block_add(new_block, &local_err); 1502ef701d7bSHu Tao if (local_err) { 1503ef701d7bSHu Tao g_free(new_block); 1504ef701d7bSHu Tao error_propagate(errp, local_err); 1505ef701d7bSHu Tao return -1; 1506ef701d7bSHu Tao } 1507ef701d7bSHu Tao return addr; 1508e1c57ab8SPaolo Bonzini } 15090b183fc8SPaolo Bonzini #endif 1510e1c57ab8SPaolo Bonzini 151162be4e3aSMichael S. Tsirkin static 151262be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, 151362be4e3aSMichael S. Tsirkin void (*resized)(const char*, 151462be4e3aSMichael S. Tsirkin uint64_t length, 151562be4e3aSMichael S. Tsirkin void *host), 151662be4e3aSMichael S. Tsirkin void *host, bool resizeable, 1517ef701d7bSHu Tao MemoryRegion *mr, Error **errp) 1518e1c57ab8SPaolo Bonzini { 1519e1c57ab8SPaolo Bonzini RAMBlock *new_block; 1520ef701d7bSHu Tao ram_addr_t addr; 1521ef701d7bSHu Tao Error *local_err = NULL; 1522e1c57ab8SPaolo Bonzini 1523e1c57ab8SPaolo Bonzini size = TARGET_PAGE_ALIGN(size); 152462be4e3aSMichael S. Tsirkin max_size = TARGET_PAGE_ALIGN(max_size); 1525e1c57ab8SPaolo Bonzini new_block = g_malloc0(sizeof(*new_block)); 1526e1c57ab8SPaolo Bonzini new_block->mr = mr; 152762be4e3aSMichael S. Tsirkin new_block->resized = resized; 15289b8424d5SMichael S. Tsirkin new_block->used_length = size; 15299b8424d5SMichael S. Tsirkin new_block->max_length = max_size; 153062be4e3aSMichael S. Tsirkin assert(max_size >= size); 1531e1c57ab8SPaolo Bonzini new_block->fd = -1; 1532e1c57ab8SPaolo Bonzini new_block->host = host; 1533e1c57ab8SPaolo Bonzini if (host) { 15347bd4f430SPaolo Bonzini new_block->flags |= RAM_PREALLOC; 1535e1c57ab8SPaolo Bonzini } 153662be4e3aSMichael S. Tsirkin if (resizeable) { 153762be4e3aSMichael S. Tsirkin new_block->flags |= RAM_RESIZEABLE; 153862be4e3aSMichael S. Tsirkin } 1539ef701d7bSHu Tao addr = ram_block_add(new_block, &local_err); 1540ef701d7bSHu Tao if (local_err) { 1541ef701d7bSHu Tao g_free(new_block); 1542ef701d7bSHu Tao error_propagate(errp, local_err); 1543ef701d7bSHu Tao return -1; 1544ef701d7bSHu Tao } 1545ef701d7bSHu Tao return addr; 1546e1c57ab8SPaolo Bonzini } 1547e1c57ab8SPaolo Bonzini 154862be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 154962be4e3aSMichael S. Tsirkin MemoryRegion *mr, Error **errp) 155062be4e3aSMichael S. Tsirkin { 155162be4e3aSMichael S. Tsirkin return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp); 155262be4e3aSMichael S. Tsirkin } 155362be4e3aSMichael S. Tsirkin 1554ef701d7bSHu Tao ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp) 15556977dfe6SYoshiaki Tamura { 155662be4e3aSMichael S. Tsirkin return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp); 155762be4e3aSMichael S. Tsirkin } 155862be4e3aSMichael S. Tsirkin 155962be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, 156062be4e3aSMichael S. Tsirkin void (*resized)(const char*, 156162be4e3aSMichael S. Tsirkin uint64_t length, 156262be4e3aSMichael S. Tsirkin void *host), 156362be4e3aSMichael S. Tsirkin MemoryRegion *mr, Error **errp) 156462be4e3aSMichael S. Tsirkin { 156562be4e3aSMichael S. Tsirkin return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp); 15666977dfe6SYoshiaki Tamura } 15676977dfe6SYoshiaki Tamura 15681f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr) 15691f2e98b6SAlex Williamson { 15701f2e98b6SAlex Williamson RAMBlock *block; 15711f2e98b6SAlex Williamson 1572b2a8658eSUmesh Deshpande qemu_mutex_lock_ramlist(); 15730dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 15741f2e98b6SAlex Williamson if (addr == block->offset) { 15750dc3f44aSMike Day QLIST_REMOVE_RCU(block, next); 15760d6d3c87SPaolo Bonzini ram_list.mru_block = NULL; 15770dc3f44aSMike Day /* Write list before version */ 15780dc3f44aSMike Day smp_wmb(); 1579f798b07fSUmesh Deshpande ram_list.version++; 158043771539SPaolo Bonzini g_free_rcu(block, rcu); 1581b2a8658eSUmesh Deshpande break; 15821f2e98b6SAlex Williamson } 15831f2e98b6SAlex Williamson } 1584b2a8658eSUmesh Deshpande qemu_mutex_unlock_ramlist(); 15851f2e98b6SAlex Williamson } 15861f2e98b6SAlex Williamson 158743771539SPaolo Bonzini static void reclaim_ramblock(RAMBlock *block) 1588e9a1ab19Sbellard { 15897bd4f430SPaolo Bonzini if (block->flags & RAM_PREALLOC) { 1590cd19cfa2SHuang Ying ; 1591dfeaf2abSMarkus Armbruster } else if (xen_enabled()) { 1592dfeaf2abSMarkus Armbruster xen_invalidate_map_cache_entry(block->host); 1593089f3f76SStefan Weil #ifndef _WIN32 15943435f395SMarkus Armbruster } else if (block->fd >= 0) { 15959b8424d5SMichael S. Tsirkin munmap(block->host, block->max_length); 159604b16653SAlex Williamson close(block->fd); 1597089f3f76SStefan Weil #endif 159804b16653SAlex Williamson } else { 15999b8424d5SMichael S. Tsirkin qemu_anon_ram_free(block->host, block->max_length); 160004b16653SAlex Williamson } 16017267c094SAnthony Liguori g_free(block); 160243771539SPaolo Bonzini } 160343771539SPaolo Bonzini 160443771539SPaolo Bonzini void qemu_ram_free(ram_addr_t addr) 160543771539SPaolo Bonzini { 160643771539SPaolo Bonzini RAMBlock *block; 160743771539SPaolo Bonzini 160843771539SPaolo Bonzini qemu_mutex_lock_ramlist(); 16090dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 161043771539SPaolo Bonzini if (addr == block->offset) { 16110dc3f44aSMike Day QLIST_REMOVE_RCU(block, next); 161243771539SPaolo Bonzini ram_list.mru_block = NULL; 16130dc3f44aSMike Day /* Write list before version */ 16140dc3f44aSMike Day smp_wmb(); 161543771539SPaolo Bonzini ram_list.version++; 161643771539SPaolo Bonzini call_rcu(block, reclaim_ramblock, rcu); 1617b2a8658eSUmesh Deshpande break; 161804b16653SAlex Williamson } 161904b16653SAlex Williamson } 1620b2a8658eSUmesh Deshpande qemu_mutex_unlock_ramlist(); 1621e9a1ab19Sbellard } 1622e9a1ab19Sbellard 1623cd19cfa2SHuang Ying #ifndef _WIN32 1624cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) 1625cd19cfa2SHuang Ying { 1626cd19cfa2SHuang Ying RAMBlock *block; 1627cd19cfa2SHuang Ying ram_addr_t offset; 1628cd19cfa2SHuang Ying int flags; 1629cd19cfa2SHuang Ying void *area, *vaddr; 1630cd19cfa2SHuang Ying 16310dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1632cd19cfa2SHuang Ying offset = addr - block->offset; 16339b8424d5SMichael S. Tsirkin if (offset < block->max_length) { 16341240be24SMichael S. Tsirkin vaddr = ramblock_ptr(block, offset); 16357bd4f430SPaolo Bonzini if (block->flags & RAM_PREALLOC) { 1636cd19cfa2SHuang Ying ; 1637dfeaf2abSMarkus Armbruster } else if (xen_enabled()) { 1638dfeaf2abSMarkus Armbruster abort(); 1639cd19cfa2SHuang Ying } else { 1640cd19cfa2SHuang Ying flags = MAP_FIXED; 1641cd19cfa2SHuang Ying munmap(vaddr, length); 16423435f395SMarkus Armbruster if (block->fd >= 0) { 1643dbcb8981SPaolo Bonzini flags |= (block->flags & RAM_SHARED ? 1644dbcb8981SPaolo Bonzini MAP_SHARED : MAP_PRIVATE); 1645cd19cfa2SHuang Ying area = mmap(vaddr, length, PROT_READ | PROT_WRITE, 1646cd19cfa2SHuang Ying flags, block->fd, offset); 1647cd19cfa2SHuang Ying } else { 16482eb9fbaaSMarkus Armbruster /* 16492eb9fbaaSMarkus Armbruster * Remap needs to match alloc. Accelerators that 16502eb9fbaaSMarkus Armbruster * set phys_mem_alloc never remap. If they did, 16512eb9fbaaSMarkus Armbruster * we'd need a remap hook here. 16522eb9fbaaSMarkus Armbruster */ 16532eb9fbaaSMarkus Armbruster assert(phys_mem_alloc == qemu_anon_ram_alloc); 16542eb9fbaaSMarkus Armbruster 1655cd19cfa2SHuang Ying flags |= MAP_PRIVATE | MAP_ANONYMOUS; 1656cd19cfa2SHuang Ying area = mmap(vaddr, length, PROT_READ | PROT_WRITE, 1657cd19cfa2SHuang Ying flags, -1, 0); 1658cd19cfa2SHuang Ying } 1659cd19cfa2SHuang Ying if (area != vaddr) { 1660f15fbc4bSAnthony PERARD fprintf(stderr, "Could not remap addr: " 1661f15fbc4bSAnthony PERARD RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", 1662cd19cfa2SHuang Ying length, addr); 1663cd19cfa2SHuang Ying exit(1); 1664cd19cfa2SHuang Ying } 16658490fc78SLuiz Capitulino memory_try_enable_merging(vaddr, length); 1666ddb97f1dSJason Baron qemu_ram_setup_dump(vaddr, length); 1667cd19cfa2SHuang Ying } 1668cd19cfa2SHuang Ying } 1669cd19cfa2SHuang Ying } 1670cd19cfa2SHuang Ying } 1671cd19cfa2SHuang Ying #endif /* !_WIN32 */ 1672cd19cfa2SHuang Ying 1673a35ba7beSPaolo Bonzini int qemu_get_ram_fd(ram_addr_t addr) 1674a35ba7beSPaolo Bonzini { 1675ae3a7047SMike Day RAMBlock *block; 1676ae3a7047SMike Day int fd; 1677a35ba7beSPaolo Bonzini 16780dc3f44aSMike Day rcu_read_lock(); 1679ae3a7047SMike Day block = qemu_get_ram_block(addr); 1680ae3a7047SMike Day fd = block->fd; 16810dc3f44aSMike Day rcu_read_unlock(); 1682ae3a7047SMike Day return fd; 1683a35ba7beSPaolo Bonzini } 1684a35ba7beSPaolo Bonzini 16853fd74b84SDamjan Marion void *qemu_get_ram_block_host_ptr(ram_addr_t addr) 16863fd74b84SDamjan Marion { 1687ae3a7047SMike Day RAMBlock *block; 1688ae3a7047SMike Day void *ptr; 16893fd74b84SDamjan Marion 16900dc3f44aSMike Day rcu_read_lock(); 1691ae3a7047SMike Day block = qemu_get_ram_block(addr); 1692ae3a7047SMike Day ptr = ramblock_ptr(block, 0); 16930dc3f44aSMike Day rcu_read_unlock(); 1694ae3a7047SMike Day return ptr; 16953fd74b84SDamjan Marion } 16963fd74b84SDamjan Marion 16971b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc. 1698ae3a7047SMike Day * This should not be used for general purpose DMA. Use address_space_map 1699ae3a7047SMike Day * or address_space_rw instead. For local memory (e.g. video ram) that the 1700ae3a7047SMike Day * device owns, use memory_region_get_ram_ptr. 17010dc3f44aSMike Day * 17020dc3f44aSMike Day * By the time this function returns, the returned pointer is not protected 17030dc3f44aSMike Day * by RCU anymore. If the caller is not within an RCU critical section and 17040dc3f44aSMike Day * does not hold the iothread lock, it must have other means of protecting the 17050dc3f44aSMike Day * pointer, such as a reference to the region that includes the incoming 17060dc3f44aSMike Day * ram_addr_t. 17071b5ec234SPaolo Bonzini */ 17081b5ec234SPaolo Bonzini void *qemu_get_ram_ptr(ram_addr_t addr) 17091b5ec234SPaolo Bonzini { 1710ae3a7047SMike Day RAMBlock *block; 1711ae3a7047SMike Day void *ptr; 17121b5ec234SPaolo Bonzini 17130dc3f44aSMike Day rcu_read_lock(); 1714ae3a7047SMike Day block = qemu_get_ram_block(addr); 1715ae3a7047SMike Day 1716ae3a7047SMike Day if (xen_enabled() && block->host == NULL) { 1717432d268cSJun Nakajima /* We need to check if the requested address is in the RAM 1718432d268cSJun Nakajima * because we don't want to map the entire memory in QEMU. 1719712c2b41SStefano Stabellini * In that case just map until the end of the page. 1720432d268cSJun Nakajima */ 1721432d268cSJun Nakajima if (block->offset == 0) { 1722ae3a7047SMike Day ptr = xen_map_cache(addr, 0, 0); 17230dc3f44aSMike Day goto unlock; 1724432d268cSJun Nakajima } 1725ae3a7047SMike Day 1726ae3a7047SMike Day block->host = xen_map_cache(block->offset, block->max_length, 1); 1727432d268cSJun Nakajima } 1728ae3a7047SMike Day ptr = ramblock_ptr(block, addr - block->offset); 1729ae3a7047SMike Day 17300dc3f44aSMike Day unlock: 17310dc3f44aSMike Day rcu_read_unlock(); 1732ae3a7047SMike Day return ptr; 173394a6b54fSpbrook } 1734f471a17eSAlex Williamson 173538bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr 1736ae3a7047SMike Day * but takes a size argument. 17370dc3f44aSMike Day * 17380dc3f44aSMike Day * By the time this function returns, the returned pointer is not protected 17390dc3f44aSMike Day * by RCU anymore. If the caller is not within an RCU critical section and 17400dc3f44aSMike Day * does not hold the iothread lock, it must have other means of protecting the 17410dc3f44aSMike Day * pointer, such as a reference to the region that includes the incoming 17420dc3f44aSMike Day * ram_addr_t. 1743ae3a7047SMike Day */ 1744cb85f7abSPeter Maydell static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size) 174538bee5dcSStefano Stabellini { 1746ae3a7047SMike Day void *ptr; 17478ab934f9SStefano Stabellini if (*size == 0) { 17488ab934f9SStefano Stabellini return NULL; 17498ab934f9SStefano Stabellini } 1750868bb33fSJan Kiszka if (xen_enabled()) { 1751e41d7c69SJan Kiszka return xen_map_cache(addr, *size, 1); 1752868bb33fSJan Kiszka } else { 175338bee5dcSStefano Stabellini RAMBlock *block; 17540dc3f44aSMike Day rcu_read_lock(); 17550dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 17569b8424d5SMichael S. Tsirkin if (addr - block->offset < block->max_length) { 17579b8424d5SMichael S. Tsirkin if (addr - block->offset + *size > block->max_length) 17589b8424d5SMichael S. Tsirkin *size = block->max_length - addr + block->offset; 1759ae3a7047SMike Day ptr = ramblock_ptr(block, addr - block->offset); 17600dc3f44aSMike Day rcu_read_unlock(); 1761ae3a7047SMike Day return ptr; 176238bee5dcSStefano Stabellini } 176338bee5dcSStefano Stabellini } 176438bee5dcSStefano Stabellini 176538bee5dcSStefano Stabellini fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 176638bee5dcSStefano Stabellini abort(); 176738bee5dcSStefano Stabellini } 176838bee5dcSStefano Stabellini } 176938bee5dcSStefano Stabellini 17707443b437SPaolo Bonzini /* Some of the softmmu routines need to translate from a host pointer 1771ae3a7047SMike Day * (typically a TLB entry) back to a ram offset. 1772ae3a7047SMike Day * 1773ae3a7047SMike Day * By the time this function returns, the returned pointer is not protected 1774ae3a7047SMike Day * by RCU anymore. If the caller is not within an RCU critical section and 1775ae3a7047SMike Day * does not hold the iothread lock, it must have other means of protecting the 1776ae3a7047SMike Day * pointer, such as a reference to the region that includes the incoming 1777ae3a7047SMike Day * ram_addr_t. 1778ae3a7047SMike Day */ 17791b5ec234SPaolo Bonzini MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) 17805579c7f3Spbrook { 178194a6b54fSpbrook RAMBlock *block; 178294a6b54fSpbrook uint8_t *host = ptr; 1783ae3a7047SMike Day MemoryRegion *mr; 178494a6b54fSpbrook 1785868bb33fSJan Kiszka if (xen_enabled()) { 17860dc3f44aSMike Day rcu_read_lock(); 1787e41d7c69SJan Kiszka *ram_addr = xen_ram_addr_from_mapcache(ptr); 1788ae3a7047SMike Day mr = qemu_get_ram_block(*ram_addr)->mr; 17890dc3f44aSMike Day rcu_read_unlock(); 1790ae3a7047SMike Day return mr; 1791712c2b41SStefano Stabellini } 1792712c2b41SStefano Stabellini 17930dc3f44aSMike Day rcu_read_lock(); 17940dc3f44aSMike Day block = atomic_rcu_read(&ram_list.mru_block); 17959b8424d5SMichael S. Tsirkin if (block && block->host && host - block->host < block->max_length) { 179623887b79SPaolo Bonzini goto found; 179723887b79SPaolo Bonzini } 179823887b79SPaolo Bonzini 17990dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1800432d268cSJun Nakajima /* This case append when the block is not mapped. */ 1801432d268cSJun Nakajima if (block->host == NULL) { 1802432d268cSJun Nakajima continue; 1803432d268cSJun Nakajima } 18049b8424d5SMichael S. Tsirkin if (host - block->host < block->max_length) { 180523887b79SPaolo Bonzini goto found; 180694a6b54fSpbrook } 1807f471a17eSAlex Williamson } 1808432d268cSJun Nakajima 18090dc3f44aSMike Day rcu_read_unlock(); 18101b5ec234SPaolo Bonzini return NULL; 181123887b79SPaolo Bonzini 181223887b79SPaolo Bonzini found: 181323887b79SPaolo Bonzini *ram_addr = block->offset + (host - block->host); 1814ae3a7047SMike Day mr = block->mr; 18150dc3f44aSMike Day rcu_read_unlock(); 1816ae3a7047SMike Day return mr; 1817e890261fSMarcelo Tosatti } 1818f471a17eSAlex Williamson 1819a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr, 18200e0df1e2SAvi Kivity uint64_t val, unsigned size) 18211ccde1cbSbellard { 182252159192SJuan Quintela if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 18230e0df1e2SAvi Kivity tb_invalidate_phys_page_fast(ram_addr, size); 18243a7d929eSbellard } 18250e0df1e2SAvi Kivity switch (size) { 18260e0df1e2SAvi Kivity case 1: 18275579c7f3Spbrook stb_p(qemu_get_ram_ptr(ram_addr), val); 18280e0df1e2SAvi Kivity break; 18290e0df1e2SAvi Kivity case 2: 18305579c7f3Spbrook stw_p(qemu_get_ram_ptr(ram_addr), val); 18310e0df1e2SAvi Kivity break; 18320e0df1e2SAvi Kivity case 4: 18335579c7f3Spbrook stl_p(qemu_get_ram_ptr(ram_addr), val); 18340e0df1e2SAvi Kivity break; 18350e0df1e2SAvi Kivity default: 18360e0df1e2SAvi Kivity abort(); 18370e0df1e2SAvi Kivity } 18386886867eSPaolo Bonzini cpu_physical_memory_set_dirty_range_nocode(ram_addr, size); 1839f23db169Sbellard /* we remove the notdirty callback only if the code has been 1840f23db169Sbellard flushed */ 1841a2cd8c85SJuan Quintela if (!cpu_physical_memory_is_clean(ram_addr)) { 18424917cf44SAndreas Färber CPUArchState *env = current_cpu->env_ptr; 184393afeadeSAndreas Färber tlb_set_dirty(env, current_cpu->mem_io_vaddr); 18444917cf44SAndreas Färber } 18451ccde1cbSbellard } 18461ccde1cbSbellard 1847b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr, 1848b018ddf6SPaolo Bonzini unsigned size, bool is_write) 1849b018ddf6SPaolo Bonzini { 1850b018ddf6SPaolo Bonzini return is_write; 1851b018ddf6SPaolo Bonzini } 1852b018ddf6SPaolo Bonzini 18530e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = { 18540e0df1e2SAvi Kivity .write = notdirty_mem_write, 1855b018ddf6SPaolo Bonzini .valid.accepts = notdirty_mem_accepts, 18560e0df1e2SAvi Kivity .endianness = DEVICE_NATIVE_ENDIAN, 18571ccde1cbSbellard }; 18581ccde1cbSbellard 18590f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit. */ 186005068c0dSPeter Maydell static void check_watchpoint(int offset, int len, int flags) 18610f459d16Spbrook { 186293afeadeSAndreas Färber CPUState *cpu = current_cpu; 186393afeadeSAndreas Färber CPUArchState *env = cpu->env_ptr; 186406d55cc1Saliguori target_ulong pc, cs_base; 18650f459d16Spbrook target_ulong vaddr; 1866a1d1bb31Saliguori CPUWatchpoint *wp; 186706d55cc1Saliguori int cpu_flags; 18680f459d16Spbrook 1869ff4700b0SAndreas Färber if (cpu->watchpoint_hit) { 187006d55cc1Saliguori /* We re-entered the check after replacing the TB. Now raise 187106d55cc1Saliguori * the debug interrupt so that is will trigger after the 187206d55cc1Saliguori * current instruction. */ 187393afeadeSAndreas Färber cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); 187406d55cc1Saliguori return; 187506d55cc1Saliguori } 187693afeadeSAndreas Färber vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; 1877ff4700b0SAndreas Färber QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 187805068c0dSPeter Maydell if (cpu_watchpoint_address_matches(wp, vaddr, len) 187905068c0dSPeter Maydell && (wp->flags & flags)) { 188008225676SPeter Maydell if (flags == BP_MEM_READ) { 188108225676SPeter Maydell wp->flags |= BP_WATCHPOINT_HIT_READ; 188208225676SPeter Maydell } else { 188308225676SPeter Maydell wp->flags |= BP_WATCHPOINT_HIT_WRITE; 188408225676SPeter Maydell } 188508225676SPeter Maydell wp->hitaddr = vaddr; 1886ff4700b0SAndreas Färber if (!cpu->watchpoint_hit) { 1887ff4700b0SAndreas Färber cpu->watchpoint_hit = wp; 1888239c51a5SAndreas Färber tb_check_watchpoint(cpu); 188906d55cc1Saliguori if (wp->flags & BP_STOP_BEFORE_ACCESS) { 189027103424SAndreas Färber cpu->exception_index = EXCP_DEBUG; 18915638d180SAndreas Färber cpu_loop_exit(cpu); 189206d55cc1Saliguori } else { 189306d55cc1Saliguori cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); 1894648f034cSAndreas Färber tb_gen_code(cpu, pc, cs_base, cpu_flags, 1); 18950ea8cb88SAndreas Färber cpu_resume_from_signal(cpu, NULL); 18960f459d16Spbrook } 1897488d6577SMax Filippov } 18986e140f28Saliguori } else { 18996e140f28Saliguori wp->flags &= ~BP_WATCHPOINT_HIT; 19006e140f28Saliguori } 19010f459d16Spbrook } 19020f459d16Spbrook } 19030f459d16Spbrook 19046658ffb8Spbrook /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, 19056658ffb8Spbrook so these check for a hit then pass through to the normal out-of-line 19066658ffb8Spbrook phys routines. */ 1907a8170e5eSAvi Kivity static uint64_t watch_mem_read(void *opaque, hwaddr addr, 19081ec9b909SAvi Kivity unsigned size) 19096658ffb8Spbrook { 191005068c0dSPeter Maydell check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ); 19111ec9b909SAvi Kivity switch (size) { 19122c17449bSEdgar E. Iglesias case 1: return ldub_phys(&address_space_memory, addr); 191341701aa4SEdgar E. Iglesias case 2: return lduw_phys(&address_space_memory, addr); 1914fdfba1a2SEdgar E. Iglesias case 4: return ldl_phys(&address_space_memory, addr); 19151ec9b909SAvi Kivity default: abort(); 19161ec9b909SAvi Kivity } 19176658ffb8Spbrook } 19186658ffb8Spbrook 1919a8170e5eSAvi Kivity static void watch_mem_write(void *opaque, hwaddr addr, 19201ec9b909SAvi Kivity uint64_t val, unsigned size) 19216658ffb8Spbrook { 192205068c0dSPeter Maydell check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE); 19231ec9b909SAvi Kivity switch (size) { 192467364150SMax Filippov case 1: 1925db3be60dSEdgar E. Iglesias stb_phys(&address_space_memory, addr, val); 192667364150SMax Filippov break; 192767364150SMax Filippov case 2: 19285ce5944dSEdgar E. Iglesias stw_phys(&address_space_memory, addr, val); 192967364150SMax Filippov break; 193067364150SMax Filippov case 4: 1931ab1da857SEdgar E. Iglesias stl_phys(&address_space_memory, addr, val); 193267364150SMax Filippov break; 19331ec9b909SAvi Kivity default: abort(); 19341ec9b909SAvi Kivity } 19356658ffb8Spbrook } 19366658ffb8Spbrook 19371ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = { 19381ec9b909SAvi Kivity .read = watch_mem_read, 19391ec9b909SAvi Kivity .write = watch_mem_write, 19401ec9b909SAvi Kivity .endianness = DEVICE_NATIVE_ENDIAN, 19416658ffb8Spbrook }; 19426658ffb8Spbrook 1943a8170e5eSAvi Kivity static uint64_t subpage_read(void *opaque, hwaddr addr, 194470c68e44SAvi Kivity unsigned len) 1945db7b5426Sblueswir1 { 1946acc9d80bSJan Kiszka subpage_t *subpage = opaque; 1947ff6cff75SPaolo Bonzini uint8_t buf[8]; 1948791af8c8SPaolo Bonzini 1949db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 1950016e9d62SAmos Kong printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, 1951acc9d80bSJan Kiszka subpage, len, addr); 1952db7b5426Sblueswir1 #endif 1953acc9d80bSJan Kiszka address_space_read(subpage->as, addr + subpage->base, buf, len); 1954acc9d80bSJan Kiszka switch (len) { 1955acc9d80bSJan Kiszka case 1: 1956acc9d80bSJan Kiszka return ldub_p(buf); 1957acc9d80bSJan Kiszka case 2: 1958acc9d80bSJan Kiszka return lduw_p(buf); 1959acc9d80bSJan Kiszka case 4: 1960acc9d80bSJan Kiszka return ldl_p(buf); 1961ff6cff75SPaolo Bonzini case 8: 1962ff6cff75SPaolo Bonzini return ldq_p(buf); 1963acc9d80bSJan Kiszka default: 1964acc9d80bSJan Kiszka abort(); 1965acc9d80bSJan Kiszka } 1966db7b5426Sblueswir1 } 1967db7b5426Sblueswir1 1968a8170e5eSAvi Kivity static void subpage_write(void *opaque, hwaddr addr, 196970c68e44SAvi Kivity uint64_t value, unsigned len) 1970db7b5426Sblueswir1 { 1971acc9d80bSJan Kiszka subpage_t *subpage = opaque; 1972ff6cff75SPaolo Bonzini uint8_t buf[8]; 1973acc9d80bSJan Kiszka 1974db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 1975016e9d62SAmos Kong printf("%s: subpage %p len %u addr " TARGET_FMT_plx 1976acc9d80bSJan Kiszka " value %"PRIx64"\n", 1977acc9d80bSJan Kiszka __func__, subpage, len, addr, value); 1978db7b5426Sblueswir1 #endif 1979acc9d80bSJan Kiszka switch (len) { 1980acc9d80bSJan Kiszka case 1: 1981acc9d80bSJan Kiszka stb_p(buf, value); 1982acc9d80bSJan Kiszka break; 1983acc9d80bSJan Kiszka case 2: 1984acc9d80bSJan Kiszka stw_p(buf, value); 1985acc9d80bSJan Kiszka break; 1986acc9d80bSJan Kiszka case 4: 1987acc9d80bSJan Kiszka stl_p(buf, value); 1988acc9d80bSJan Kiszka break; 1989ff6cff75SPaolo Bonzini case 8: 1990ff6cff75SPaolo Bonzini stq_p(buf, value); 1991ff6cff75SPaolo Bonzini break; 1992acc9d80bSJan Kiszka default: 1993acc9d80bSJan Kiszka abort(); 1994acc9d80bSJan Kiszka } 1995acc9d80bSJan Kiszka address_space_write(subpage->as, addr + subpage->base, buf, len); 1996db7b5426Sblueswir1 } 1997db7b5426Sblueswir1 1998c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr, 1999016e9d62SAmos Kong unsigned len, bool is_write) 2000c353e4ccSPaolo Bonzini { 2001acc9d80bSJan Kiszka subpage_t *subpage = opaque; 2002c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE) 2003016e9d62SAmos Kong printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n", 2004acc9d80bSJan Kiszka __func__, subpage, is_write ? 'w' : 'r', len, addr); 2005c353e4ccSPaolo Bonzini #endif 2006c353e4ccSPaolo Bonzini 2007acc9d80bSJan Kiszka return address_space_access_valid(subpage->as, addr + subpage->base, 2008016e9d62SAmos Kong len, is_write); 2009c353e4ccSPaolo Bonzini } 2010c353e4ccSPaolo Bonzini 201170c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = { 201270c68e44SAvi Kivity .read = subpage_read, 201370c68e44SAvi Kivity .write = subpage_write, 2014ff6cff75SPaolo Bonzini .impl.min_access_size = 1, 2015ff6cff75SPaolo Bonzini .impl.max_access_size = 8, 2016ff6cff75SPaolo Bonzini .valid.min_access_size = 1, 2017ff6cff75SPaolo Bonzini .valid.max_access_size = 8, 2018c353e4ccSPaolo Bonzini .valid.accepts = subpage_accepts, 201970c68e44SAvi Kivity .endianness = DEVICE_NATIVE_ENDIAN, 2020db7b5426Sblueswir1 }; 2021db7b5426Sblueswir1 2022c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 20235312bd8bSAvi Kivity uint16_t section) 2024db7b5426Sblueswir1 { 2025db7b5426Sblueswir1 int idx, eidx; 2026db7b5426Sblueswir1 2027db7b5426Sblueswir1 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) 2028db7b5426Sblueswir1 return -1; 2029db7b5426Sblueswir1 idx = SUBPAGE_IDX(start); 2030db7b5426Sblueswir1 eidx = SUBPAGE_IDX(end); 2031db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 2032016e9d62SAmos Kong printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", 2033016e9d62SAmos Kong __func__, mmio, start, end, idx, eidx, section); 2034db7b5426Sblueswir1 #endif 2035db7b5426Sblueswir1 for (; idx <= eidx; idx++) { 20365312bd8bSAvi Kivity mmio->sub_section[idx] = section; 2037db7b5426Sblueswir1 } 2038db7b5426Sblueswir1 2039db7b5426Sblueswir1 return 0; 2040db7b5426Sblueswir1 } 2041db7b5426Sblueswir1 2042acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base) 2043db7b5426Sblueswir1 { 2044c227f099SAnthony Liguori subpage_t *mmio; 2045db7b5426Sblueswir1 20467267c094SAnthony Liguori mmio = g_malloc0(sizeof(subpage_t)); 20471eec614bSaliguori 2048acc9d80bSJan Kiszka mmio->as = as; 2049db7b5426Sblueswir1 mmio->base = base; 20502c9b15caSPaolo Bonzini memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, 2051b4fefef9SPeter Crosthwaite NULL, TARGET_PAGE_SIZE); 2052b3b00c78SAvi Kivity mmio->iomem.subpage = true; 2053db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 2054016e9d62SAmos Kong printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__, 2055016e9d62SAmos Kong mmio, base, TARGET_PAGE_SIZE); 2056db7b5426Sblueswir1 #endif 2057b41aac4fSLiu Ping Fan subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED); 2058db7b5426Sblueswir1 2059db7b5426Sblueswir1 return mmio; 2060db7b5426Sblueswir1 } 2061db7b5426Sblueswir1 2062a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as, 2063a656e22fSPeter Crosthwaite MemoryRegion *mr) 20645312bd8bSAvi Kivity { 2065a656e22fSPeter Crosthwaite assert(as); 20665312bd8bSAvi Kivity MemoryRegionSection section = { 2067a656e22fSPeter Crosthwaite .address_space = as, 20685312bd8bSAvi Kivity .mr = mr, 20695312bd8bSAvi Kivity .offset_within_address_space = 0, 20705312bd8bSAvi Kivity .offset_within_region = 0, 2071052e87b0SPaolo Bonzini .size = int128_2_64(), 20725312bd8bSAvi Kivity }; 20735312bd8bSAvi Kivity 207453cb28cbSMarcel Apfelbaum return phys_section_add(map, §ion); 20755312bd8bSAvi Kivity } 20765312bd8bSAvi Kivity 20779d82b5a7SPaolo Bonzini MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index) 2078aa102231SAvi Kivity { 207979e2b9aeSPaolo Bonzini AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch); 208079e2b9aeSPaolo Bonzini MemoryRegionSection *sections = d->map.sections; 20819d82b5a7SPaolo Bonzini 20829d82b5a7SPaolo Bonzini return sections[index & ~TARGET_PAGE_MASK].mr; 2083aa102231SAvi Kivity } 2084aa102231SAvi Kivity 2085e9179ce1SAvi Kivity static void io_mem_init(void) 2086e9179ce1SAvi Kivity { 20871f6245e5SPaolo Bonzini memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX); 20882c9b15caSPaolo Bonzini memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, 20891f6245e5SPaolo Bonzini NULL, UINT64_MAX); 20902c9b15caSPaolo Bonzini memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, 20911f6245e5SPaolo Bonzini NULL, UINT64_MAX); 20922c9b15caSPaolo Bonzini memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL, 20931f6245e5SPaolo Bonzini NULL, UINT64_MAX); 2094e9179ce1SAvi Kivity } 2095e9179ce1SAvi Kivity 2096ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener) 2097ac1970fbSAvi Kivity { 209889ae337aSPaolo Bonzini AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); 209953cb28cbSMarcel Apfelbaum AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); 210053cb28cbSMarcel Apfelbaum uint16_t n; 210153cb28cbSMarcel Apfelbaum 2102a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_unassigned); 210353cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_UNASSIGNED); 2104a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_notdirty); 210553cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_NOTDIRTY); 2106a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_rom); 210753cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_ROM); 2108a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_watch); 210953cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_WATCH); 211000752703SPaolo Bonzini 21119736e55bSMichael S. Tsirkin d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; 211200752703SPaolo Bonzini d->as = as; 211300752703SPaolo Bonzini as->next_dispatch = d; 211400752703SPaolo Bonzini } 211500752703SPaolo Bonzini 211679e2b9aeSPaolo Bonzini static void address_space_dispatch_free(AddressSpaceDispatch *d) 211779e2b9aeSPaolo Bonzini { 211879e2b9aeSPaolo Bonzini phys_sections_free(&d->map); 211979e2b9aeSPaolo Bonzini g_free(d); 212079e2b9aeSPaolo Bonzini } 212179e2b9aeSPaolo Bonzini 212200752703SPaolo Bonzini static void mem_commit(MemoryListener *listener) 212300752703SPaolo Bonzini { 212400752703SPaolo Bonzini AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); 21250475d94fSPaolo Bonzini AddressSpaceDispatch *cur = as->dispatch; 21260475d94fSPaolo Bonzini AddressSpaceDispatch *next = as->next_dispatch; 2127ac1970fbSAvi Kivity 212853cb28cbSMarcel Apfelbaum phys_page_compact_all(next, next->map.nodes_nb); 2129b35ba30fSMichael S. Tsirkin 213079e2b9aeSPaolo Bonzini atomic_rcu_set(&as->dispatch, next); 213153cb28cbSMarcel Apfelbaum if (cur) { 213279e2b9aeSPaolo Bonzini call_rcu(cur, address_space_dispatch_free, rcu); 2133ac1970fbSAvi Kivity } 21349affd6fcSPaolo Bonzini } 21359affd6fcSPaolo Bonzini 21361d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener) 213750c1e149SAvi Kivity { 2138182735efSAndreas Färber CPUState *cpu; 2139117712c3SAvi Kivity 2140117712c3SAvi Kivity /* since each CPU stores ram addresses in its TLB cache, we must 2141117712c3SAvi Kivity reset the modified entries */ 2142117712c3SAvi Kivity /* XXX: slow ! */ 2143bdc44640SAndreas Färber CPU_FOREACH(cpu) { 214433bde2e1SEdgar E. Iglesias /* FIXME: Disentangle the cpu.h circular files deps so we can 214533bde2e1SEdgar E. Iglesias directly get the right CPU from listener. */ 214633bde2e1SEdgar E. Iglesias if (cpu->tcg_as_listener != listener) { 214733bde2e1SEdgar E. Iglesias continue; 214833bde2e1SEdgar E. Iglesias } 214976e5c76fSPaolo Bonzini cpu_reload_memory_map(cpu); 2150117712c3SAvi Kivity } 215150c1e149SAvi Kivity } 215250c1e149SAvi Kivity 215393632747SAvi Kivity static void core_log_global_start(MemoryListener *listener) 215493632747SAvi Kivity { 2155981fdf23SJuan Quintela cpu_physical_memory_set_dirty_tracking(true); 215693632747SAvi Kivity } 215793632747SAvi Kivity 215893632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener) 215993632747SAvi Kivity { 2160981fdf23SJuan Quintela cpu_physical_memory_set_dirty_tracking(false); 216193632747SAvi Kivity } 216293632747SAvi Kivity 216393632747SAvi Kivity static MemoryListener core_memory_listener = { 216493632747SAvi Kivity .log_global_start = core_log_global_start, 216593632747SAvi Kivity .log_global_stop = core_log_global_stop, 2166ac1970fbSAvi Kivity .priority = 1, 216793632747SAvi Kivity }; 216893632747SAvi Kivity 2169ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as) 2170ac1970fbSAvi Kivity { 217100752703SPaolo Bonzini as->dispatch = NULL; 217289ae337aSPaolo Bonzini as->dispatch_listener = (MemoryListener) { 2173ac1970fbSAvi Kivity .begin = mem_begin, 217400752703SPaolo Bonzini .commit = mem_commit, 2175ac1970fbSAvi Kivity .region_add = mem_add, 2176ac1970fbSAvi Kivity .region_nop = mem_add, 2177ac1970fbSAvi Kivity .priority = 0, 2178ac1970fbSAvi Kivity }; 217989ae337aSPaolo Bonzini memory_listener_register(&as->dispatch_listener, as); 2180ac1970fbSAvi Kivity } 2181ac1970fbSAvi Kivity 21826e48e8f9SPaolo Bonzini void address_space_unregister(AddressSpace *as) 21836e48e8f9SPaolo Bonzini { 21846e48e8f9SPaolo Bonzini memory_listener_unregister(&as->dispatch_listener); 21856e48e8f9SPaolo Bonzini } 21866e48e8f9SPaolo Bonzini 218783f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as) 218883f3c251SAvi Kivity { 218983f3c251SAvi Kivity AddressSpaceDispatch *d = as->dispatch; 219083f3c251SAvi Kivity 219179e2b9aeSPaolo Bonzini atomic_rcu_set(&as->dispatch, NULL); 219279e2b9aeSPaolo Bonzini if (d) { 219379e2b9aeSPaolo Bonzini call_rcu(d, address_space_dispatch_free, rcu); 219479e2b9aeSPaolo Bonzini } 219583f3c251SAvi Kivity } 219683f3c251SAvi Kivity 219762152b8aSAvi Kivity static void memory_map_init(void) 219862152b8aSAvi Kivity { 21997267c094SAnthony Liguori system_memory = g_malloc(sizeof(*system_memory)); 220003f49957SPaolo Bonzini 220157271d63SPaolo Bonzini memory_region_init(system_memory, NULL, "system", UINT64_MAX); 22027dca8043SAlexey Kardashevskiy address_space_init(&address_space_memory, system_memory, "memory"); 2203309cb471SAvi Kivity 22047267c094SAnthony Liguori system_io = g_malloc(sizeof(*system_io)); 22053bb28b72SJan Kiszka memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", 22063bb28b72SJan Kiszka 65536); 22077dca8043SAlexey Kardashevskiy address_space_init(&address_space_io, system_io, "I/O"); 220893632747SAvi Kivity 2209f6790af6SAvi Kivity memory_listener_register(&core_memory_listener, &address_space_memory); 22102641689aSliguang } 221162152b8aSAvi Kivity 221262152b8aSAvi Kivity MemoryRegion *get_system_memory(void) 221362152b8aSAvi Kivity { 221462152b8aSAvi Kivity return system_memory; 221562152b8aSAvi Kivity } 221662152b8aSAvi Kivity 2217309cb471SAvi Kivity MemoryRegion *get_system_io(void) 2218309cb471SAvi Kivity { 2219309cb471SAvi Kivity return system_io; 2220309cb471SAvi Kivity } 2221309cb471SAvi Kivity 2222e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */ 2223e2eef170Spbrook 222413eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */ 222513eb76e0Sbellard #if defined(CONFIG_USER_ONLY) 2226f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, 2227a68fe89cSPaul Brook uint8_t *buf, int len, int is_write) 222813eb76e0Sbellard { 222913eb76e0Sbellard int l, flags; 223013eb76e0Sbellard target_ulong page; 223153a5960aSpbrook void * p; 223213eb76e0Sbellard 223313eb76e0Sbellard while (len > 0) { 223413eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 223513eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 223613eb76e0Sbellard if (l > len) 223713eb76e0Sbellard l = len; 223813eb76e0Sbellard flags = page_get_flags(page); 223913eb76e0Sbellard if (!(flags & PAGE_VALID)) 2240a68fe89cSPaul Brook return -1; 224113eb76e0Sbellard if (is_write) { 224213eb76e0Sbellard if (!(flags & PAGE_WRITE)) 2243a68fe89cSPaul Brook return -1; 2244579a97f7Sbellard /* XXX: this code should not depend on lock_user */ 224572fb7daaSaurel32 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) 2246a68fe89cSPaul Brook return -1; 224772fb7daaSaurel32 memcpy(p, buf, l); 224872fb7daaSaurel32 unlock_user(p, addr, l); 224913eb76e0Sbellard } else { 225013eb76e0Sbellard if (!(flags & PAGE_READ)) 2251a68fe89cSPaul Brook return -1; 2252579a97f7Sbellard /* XXX: this code should not depend on lock_user */ 225372fb7daaSaurel32 if (!(p = lock_user(VERIFY_READ, addr, l, 1))) 2254a68fe89cSPaul Brook return -1; 225572fb7daaSaurel32 memcpy(buf, p, l); 22565b257578Saurel32 unlock_user(p, addr, 0); 225713eb76e0Sbellard } 225813eb76e0Sbellard len -= l; 225913eb76e0Sbellard buf += l; 226013eb76e0Sbellard addr += l; 226113eb76e0Sbellard } 2262a68fe89cSPaul Brook return 0; 226313eb76e0Sbellard } 22648df1cd07Sbellard 226513eb76e0Sbellard #else 226651d7a9ebSAnthony PERARD 2267a8170e5eSAvi Kivity static void invalidate_and_set_dirty(hwaddr addr, 2268a8170e5eSAvi Kivity hwaddr length) 226951d7a9ebSAnthony PERARD { 2270f874bf90SPeter Maydell if (cpu_physical_memory_range_includes_clean(addr, length)) { 2271f874bf90SPeter Maydell tb_invalidate_phys_range(addr, addr + length, 0); 22726886867eSPaolo Bonzini cpu_physical_memory_set_dirty_range_nocode(addr, length); 227351d7a9ebSAnthony PERARD } 2274e226939dSAnthony PERARD xen_modified_memory(addr, length); 227551d7a9ebSAnthony PERARD } 227651d7a9ebSAnthony PERARD 227723326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) 227882f2563fSPaolo Bonzini { 2279e1622f4bSPaolo Bonzini unsigned access_size_max = mr->ops->valid.max_access_size; 228023326164SRichard Henderson 228123326164SRichard Henderson /* Regions are assumed to support 1-4 byte accesses unless 228223326164SRichard Henderson otherwise specified. */ 228323326164SRichard Henderson if (access_size_max == 0) { 228423326164SRichard Henderson access_size_max = 4; 228582f2563fSPaolo Bonzini } 228623326164SRichard Henderson 228723326164SRichard Henderson /* Bound the maximum access by the alignment of the address. */ 228823326164SRichard Henderson if (!mr->ops->impl.unaligned) { 228923326164SRichard Henderson unsigned align_size_max = addr & -addr; 229023326164SRichard Henderson if (align_size_max != 0 && align_size_max < access_size_max) { 229123326164SRichard Henderson access_size_max = align_size_max; 229223326164SRichard Henderson } 229323326164SRichard Henderson } 229423326164SRichard Henderson 229523326164SRichard Henderson /* Don't attempt accesses larger than the maximum. */ 229623326164SRichard Henderson if (l > access_size_max) { 229723326164SRichard Henderson l = access_size_max; 229823326164SRichard Henderson } 2299098178f2SPaolo Bonzini if (l & (l - 1)) { 2300098178f2SPaolo Bonzini l = 1 << (qemu_fls(l) - 1); 2301098178f2SPaolo Bonzini } 230223326164SRichard Henderson 230323326164SRichard Henderson return l; 230482f2563fSPaolo Bonzini } 230582f2563fSPaolo Bonzini 2306fd8aaa76SPaolo Bonzini bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, 2307ac1970fbSAvi Kivity int len, bool is_write) 230813eb76e0Sbellard { 2309149f54b5SPaolo Bonzini hwaddr l; 231013eb76e0Sbellard uint8_t *ptr; 2311791af8c8SPaolo Bonzini uint64_t val; 2312149f54b5SPaolo Bonzini hwaddr addr1; 23135c8a00ceSPaolo Bonzini MemoryRegion *mr; 2314fd8aaa76SPaolo Bonzini bool error = false; 231513eb76e0Sbellard 231613eb76e0Sbellard while (len > 0) { 231713eb76e0Sbellard l = len; 23185c8a00ceSPaolo Bonzini mr = address_space_translate(as, addr, &addr1, &l, is_write); 231913eb76e0Sbellard 232013eb76e0Sbellard if (is_write) { 23215c8a00ceSPaolo Bonzini if (!memory_access_is_direct(mr, is_write)) { 23225c8a00ceSPaolo Bonzini l = memory_access_size(mr, l, addr1); 23234917cf44SAndreas Färber /* XXX: could force current_cpu to NULL to avoid 23246a00d601Sbellard potential bugs */ 232523326164SRichard Henderson switch (l) { 232623326164SRichard Henderson case 8: 232723326164SRichard Henderson /* 64 bit write access */ 232823326164SRichard Henderson val = ldq_p(buf); 232923326164SRichard Henderson error |= io_mem_write(mr, addr1, val, 8); 233023326164SRichard Henderson break; 233123326164SRichard Henderson case 4: 23321c213d19Sbellard /* 32 bit write access */ 2333c27004ecSbellard val = ldl_p(buf); 23345c8a00ceSPaolo Bonzini error |= io_mem_write(mr, addr1, val, 4); 233523326164SRichard Henderson break; 233623326164SRichard Henderson case 2: 23371c213d19Sbellard /* 16 bit write access */ 2338c27004ecSbellard val = lduw_p(buf); 23395c8a00ceSPaolo Bonzini error |= io_mem_write(mr, addr1, val, 2); 234023326164SRichard Henderson break; 234123326164SRichard Henderson case 1: 23421c213d19Sbellard /* 8 bit write access */ 2343c27004ecSbellard val = ldub_p(buf); 23445c8a00ceSPaolo Bonzini error |= io_mem_write(mr, addr1, val, 1); 234523326164SRichard Henderson break; 234623326164SRichard Henderson default: 234723326164SRichard Henderson abort(); 234813eb76e0Sbellard } 23492bbfa05dSPaolo Bonzini } else { 23505c8a00ceSPaolo Bonzini addr1 += memory_region_get_ram_addr(mr); 235113eb76e0Sbellard /* RAM case */ 23525579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 235313eb76e0Sbellard memcpy(ptr, buf, l); 235451d7a9ebSAnthony PERARD invalidate_and_set_dirty(addr1, l); 23553a7d929eSbellard } 235613eb76e0Sbellard } else { 23575c8a00ceSPaolo Bonzini if (!memory_access_is_direct(mr, is_write)) { 235813eb76e0Sbellard /* I/O case */ 23595c8a00ceSPaolo Bonzini l = memory_access_size(mr, l, addr1); 236023326164SRichard Henderson switch (l) { 236123326164SRichard Henderson case 8: 236223326164SRichard Henderson /* 64 bit read access */ 236323326164SRichard Henderson error |= io_mem_read(mr, addr1, &val, 8); 236423326164SRichard Henderson stq_p(buf, val); 236523326164SRichard Henderson break; 236623326164SRichard Henderson case 4: 236713eb76e0Sbellard /* 32 bit read access */ 23685c8a00ceSPaolo Bonzini error |= io_mem_read(mr, addr1, &val, 4); 2369c27004ecSbellard stl_p(buf, val); 237023326164SRichard Henderson break; 237123326164SRichard Henderson case 2: 237213eb76e0Sbellard /* 16 bit read access */ 23735c8a00ceSPaolo Bonzini error |= io_mem_read(mr, addr1, &val, 2); 2374c27004ecSbellard stw_p(buf, val); 237523326164SRichard Henderson break; 237623326164SRichard Henderson case 1: 23771c213d19Sbellard /* 8 bit read access */ 23785c8a00ceSPaolo Bonzini error |= io_mem_read(mr, addr1, &val, 1); 2379c27004ecSbellard stb_p(buf, val); 238023326164SRichard Henderson break; 238123326164SRichard Henderson default: 238223326164SRichard Henderson abort(); 238313eb76e0Sbellard } 238413eb76e0Sbellard } else { 238513eb76e0Sbellard /* RAM case */ 23865c8a00ceSPaolo Bonzini ptr = qemu_get_ram_ptr(mr->ram_addr + addr1); 2387f3705d53SAvi Kivity memcpy(buf, ptr, l); 238813eb76e0Sbellard } 238913eb76e0Sbellard } 239013eb76e0Sbellard len -= l; 239113eb76e0Sbellard buf += l; 239213eb76e0Sbellard addr += l; 239313eb76e0Sbellard } 2394fd8aaa76SPaolo Bonzini 2395fd8aaa76SPaolo Bonzini return error; 239613eb76e0Sbellard } 23978df1cd07Sbellard 2398fd8aaa76SPaolo Bonzini bool address_space_write(AddressSpace *as, hwaddr addr, 2399ac1970fbSAvi Kivity const uint8_t *buf, int len) 2400ac1970fbSAvi Kivity { 2401fd8aaa76SPaolo Bonzini return address_space_rw(as, addr, (uint8_t *)buf, len, true); 2402ac1970fbSAvi Kivity } 2403ac1970fbSAvi Kivity 2404fd8aaa76SPaolo Bonzini bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) 2405ac1970fbSAvi Kivity { 2406fd8aaa76SPaolo Bonzini return address_space_rw(as, addr, buf, len, false); 2407ac1970fbSAvi Kivity } 2408ac1970fbSAvi Kivity 2409ac1970fbSAvi Kivity 2410a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, 2411ac1970fbSAvi Kivity int len, int is_write) 2412ac1970fbSAvi Kivity { 2413fd8aaa76SPaolo Bonzini address_space_rw(&address_space_memory, addr, buf, len, is_write); 2414ac1970fbSAvi Kivity } 2415ac1970fbSAvi Kivity 2416582b55a9SAlexander Graf enum write_rom_type { 2417582b55a9SAlexander Graf WRITE_DATA, 2418582b55a9SAlexander Graf FLUSH_CACHE, 2419582b55a9SAlexander Graf }; 2420582b55a9SAlexander Graf 24212a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as, 2422582b55a9SAlexander Graf hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type) 2423d0ecd2aaSbellard { 2424149f54b5SPaolo Bonzini hwaddr l; 2425d0ecd2aaSbellard uint8_t *ptr; 2426149f54b5SPaolo Bonzini hwaddr addr1; 24275c8a00ceSPaolo Bonzini MemoryRegion *mr; 2428d0ecd2aaSbellard 2429d0ecd2aaSbellard while (len > 0) { 2430d0ecd2aaSbellard l = len; 24312a221651SEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, true); 2432d0ecd2aaSbellard 24335c8a00ceSPaolo Bonzini if (!(memory_region_is_ram(mr) || 24345c8a00ceSPaolo Bonzini memory_region_is_romd(mr))) { 2435d0ecd2aaSbellard /* do nothing */ 2436d0ecd2aaSbellard } else { 24375c8a00ceSPaolo Bonzini addr1 += memory_region_get_ram_addr(mr); 2438d0ecd2aaSbellard /* ROM/RAM case */ 24395579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 2440582b55a9SAlexander Graf switch (type) { 2441582b55a9SAlexander Graf case WRITE_DATA: 2442d0ecd2aaSbellard memcpy(ptr, buf, l); 244351d7a9ebSAnthony PERARD invalidate_and_set_dirty(addr1, l); 2444582b55a9SAlexander Graf break; 2445582b55a9SAlexander Graf case FLUSH_CACHE: 2446582b55a9SAlexander Graf flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l); 2447582b55a9SAlexander Graf break; 2448582b55a9SAlexander Graf } 2449d0ecd2aaSbellard } 2450d0ecd2aaSbellard len -= l; 2451d0ecd2aaSbellard buf += l; 2452d0ecd2aaSbellard addr += l; 2453d0ecd2aaSbellard } 2454d0ecd2aaSbellard } 2455d0ecd2aaSbellard 2456582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */ 24572a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr, 2458582b55a9SAlexander Graf const uint8_t *buf, int len) 2459582b55a9SAlexander Graf { 24602a221651SEdgar E. Iglesias cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA); 2461582b55a9SAlexander Graf } 2462582b55a9SAlexander Graf 2463582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len) 2464582b55a9SAlexander Graf { 2465582b55a9SAlexander Graf /* 2466582b55a9SAlexander Graf * This function should do the same thing as an icache flush that was 2467582b55a9SAlexander Graf * triggered from within the guest. For TCG we are always cache coherent, 2468582b55a9SAlexander Graf * so there is no need to flush anything. For KVM / Xen we need to flush 2469582b55a9SAlexander Graf * the host's instruction cache at least. 2470582b55a9SAlexander Graf */ 2471582b55a9SAlexander Graf if (tcg_enabled()) { 2472582b55a9SAlexander Graf return; 2473582b55a9SAlexander Graf } 2474582b55a9SAlexander Graf 24752a221651SEdgar E. Iglesias cpu_physical_memory_write_rom_internal(&address_space_memory, 24762a221651SEdgar E. Iglesias start, NULL, len, FLUSH_CACHE); 2477582b55a9SAlexander Graf } 2478582b55a9SAlexander Graf 24796d16c2f8Saliguori typedef struct { 2480d3e71559SPaolo Bonzini MemoryRegion *mr; 24816d16c2f8Saliguori void *buffer; 2482a8170e5eSAvi Kivity hwaddr addr; 2483a8170e5eSAvi Kivity hwaddr len; 24846d16c2f8Saliguori } BounceBuffer; 24856d16c2f8Saliguori 24866d16c2f8Saliguori static BounceBuffer bounce; 24876d16c2f8Saliguori 2488ba223c29Saliguori typedef struct MapClient { 2489ba223c29Saliguori void *opaque; 2490ba223c29Saliguori void (*callback)(void *opaque); 249172cf2d4fSBlue Swirl QLIST_ENTRY(MapClient) link; 2492ba223c29Saliguori } MapClient; 2493ba223c29Saliguori 249472cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list 249572cf2d4fSBlue Swirl = QLIST_HEAD_INITIALIZER(map_client_list); 2496ba223c29Saliguori 2497ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) 2498ba223c29Saliguori { 24997267c094SAnthony Liguori MapClient *client = g_malloc(sizeof(*client)); 2500ba223c29Saliguori 2501ba223c29Saliguori client->opaque = opaque; 2502ba223c29Saliguori client->callback = callback; 250372cf2d4fSBlue Swirl QLIST_INSERT_HEAD(&map_client_list, client, link); 2504ba223c29Saliguori return client; 2505ba223c29Saliguori } 2506ba223c29Saliguori 25078b9c99d9SBlue Swirl static void cpu_unregister_map_client(void *_client) 2508ba223c29Saliguori { 2509ba223c29Saliguori MapClient *client = (MapClient *)_client; 2510ba223c29Saliguori 251172cf2d4fSBlue Swirl QLIST_REMOVE(client, link); 25127267c094SAnthony Liguori g_free(client); 2513ba223c29Saliguori } 2514ba223c29Saliguori 2515ba223c29Saliguori static void cpu_notify_map_clients(void) 2516ba223c29Saliguori { 2517ba223c29Saliguori MapClient *client; 2518ba223c29Saliguori 251972cf2d4fSBlue Swirl while (!QLIST_EMPTY(&map_client_list)) { 252072cf2d4fSBlue Swirl client = QLIST_FIRST(&map_client_list); 2521ba223c29Saliguori client->callback(client->opaque); 252234d5e948SIsaku Yamahata cpu_unregister_map_client(client); 2523ba223c29Saliguori } 2524ba223c29Saliguori } 2525ba223c29Saliguori 252651644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) 252751644ab7SPaolo Bonzini { 25285c8a00ceSPaolo Bonzini MemoryRegion *mr; 252951644ab7SPaolo Bonzini hwaddr l, xlat; 253051644ab7SPaolo Bonzini 253151644ab7SPaolo Bonzini while (len > 0) { 253251644ab7SPaolo Bonzini l = len; 25335c8a00ceSPaolo Bonzini mr = address_space_translate(as, addr, &xlat, &l, is_write); 25345c8a00ceSPaolo Bonzini if (!memory_access_is_direct(mr, is_write)) { 25355c8a00ceSPaolo Bonzini l = memory_access_size(mr, l, addr); 25365c8a00ceSPaolo Bonzini if (!memory_region_access_valid(mr, xlat, l, is_write)) { 253751644ab7SPaolo Bonzini return false; 253851644ab7SPaolo Bonzini } 253951644ab7SPaolo Bonzini } 254051644ab7SPaolo Bonzini 254151644ab7SPaolo Bonzini len -= l; 254251644ab7SPaolo Bonzini addr += l; 254351644ab7SPaolo Bonzini } 254451644ab7SPaolo Bonzini return true; 254551644ab7SPaolo Bonzini } 254651644ab7SPaolo Bonzini 25476d16c2f8Saliguori /* Map a physical memory region into a host virtual address. 25486d16c2f8Saliguori * May map a subset of the requested range, given by and returned in *plen. 25496d16c2f8Saliguori * May return NULL if resources needed to perform the mapping are exhausted. 25506d16c2f8Saliguori * Use only for reads OR writes - not for read-modify-write operations. 2551ba223c29Saliguori * Use cpu_register_map_client() to know when retrying the map operation is 2552ba223c29Saliguori * likely to succeed. 25536d16c2f8Saliguori */ 2554ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as, 2555a8170e5eSAvi Kivity hwaddr addr, 2556a8170e5eSAvi Kivity hwaddr *plen, 2557ac1970fbSAvi Kivity bool is_write) 25586d16c2f8Saliguori { 2559a8170e5eSAvi Kivity hwaddr len = *plen; 2560e3127ae0SPaolo Bonzini hwaddr done = 0; 2561e3127ae0SPaolo Bonzini hwaddr l, xlat, base; 2562e3127ae0SPaolo Bonzini MemoryRegion *mr, *this_mr; 2563e3127ae0SPaolo Bonzini ram_addr_t raddr; 25646d16c2f8Saliguori 2565e3127ae0SPaolo Bonzini if (len == 0) { 2566e3127ae0SPaolo Bonzini return NULL; 2567e3127ae0SPaolo Bonzini } 2568e3127ae0SPaolo Bonzini 25696d16c2f8Saliguori l = len; 25705c8a00ceSPaolo Bonzini mr = address_space_translate(as, addr, &xlat, &l, is_write); 25715c8a00ceSPaolo Bonzini if (!memory_access_is_direct(mr, is_write)) { 2572e3127ae0SPaolo Bonzini if (bounce.buffer) { 2573e3127ae0SPaolo Bonzini return NULL; 25746d16c2f8Saliguori } 2575e85d9db5SKevin Wolf /* Avoid unbounded allocations */ 2576e85d9db5SKevin Wolf l = MIN(l, TARGET_PAGE_SIZE); 2577e85d9db5SKevin Wolf bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); 25786d16c2f8Saliguori bounce.addr = addr; 25796d16c2f8Saliguori bounce.len = l; 2580d3e71559SPaolo Bonzini 2581d3e71559SPaolo Bonzini memory_region_ref(mr); 2582d3e71559SPaolo Bonzini bounce.mr = mr; 25836d16c2f8Saliguori if (!is_write) { 2584ac1970fbSAvi Kivity address_space_read(as, addr, bounce.buffer, l); 25856d16c2f8Saliguori } 258638bee5dcSStefano Stabellini 258738bee5dcSStefano Stabellini *plen = l; 258838bee5dcSStefano Stabellini return bounce.buffer; 25896d16c2f8Saliguori } 2590e3127ae0SPaolo Bonzini 2591e3127ae0SPaolo Bonzini base = xlat; 2592e3127ae0SPaolo Bonzini raddr = memory_region_get_ram_addr(mr); 2593e3127ae0SPaolo Bonzini 2594e3127ae0SPaolo Bonzini for (;;) { 2595e3127ae0SPaolo Bonzini len -= l; 2596e3127ae0SPaolo Bonzini addr += l; 2597e3127ae0SPaolo Bonzini done += l; 2598e3127ae0SPaolo Bonzini if (len == 0) { 2599e3127ae0SPaolo Bonzini break; 2600e3127ae0SPaolo Bonzini } 2601e3127ae0SPaolo Bonzini 2602e3127ae0SPaolo Bonzini l = len; 2603e3127ae0SPaolo Bonzini this_mr = address_space_translate(as, addr, &xlat, &l, is_write); 2604e3127ae0SPaolo Bonzini if (this_mr != mr || xlat != base + done) { 2605149f54b5SPaolo Bonzini break; 2606149f54b5SPaolo Bonzini } 26078ab934f9SStefano Stabellini } 26086d16c2f8Saliguori 2609d3e71559SPaolo Bonzini memory_region_ref(mr); 2610e3127ae0SPaolo Bonzini *plen = done; 2611e3127ae0SPaolo Bonzini return qemu_ram_ptr_length(raddr + base, plen); 26126d16c2f8Saliguori } 26136d16c2f8Saliguori 2614ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map(). 26156d16c2f8Saliguori * Will also mark the memory as dirty if is_write == 1. access_len gives 26166d16c2f8Saliguori * the amount of memory that was actually read or written by the caller. 26176d16c2f8Saliguori */ 2618a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 2619a8170e5eSAvi Kivity int is_write, hwaddr access_len) 26206d16c2f8Saliguori { 26216d16c2f8Saliguori if (buffer != bounce.buffer) { 2622d3e71559SPaolo Bonzini MemoryRegion *mr; 26237443b437SPaolo Bonzini ram_addr_t addr1; 2624d3e71559SPaolo Bonzini 2625d3e71559SPaolo Bonzini mr = qemu_ram_addr_from_host(buffer, &addr1); 26261b5ec234SPaolo Bonzini assert(mr != NULL); 2627d3e71559SPaolo Bonzini if (is_write) { 26286886867eSPaolo Bonzini invalidate_and_set_dirty(addr1, access_len); 26296d16c2f8Saliguori } 2630868bb33fSJan Kiszka if (xen_enabled()) { 2631e41d7c69SJan Kiszka xen_invalidate_map_cache_entry(buffer); 2632050a0ddfSAnthony PERARD } 2633d3e71559SPaolo Bonzini memory_region_unref(mr); 26346d16c2f8Saliguori return; 26356d16c2f8Saliguori } 26366d16c2f8Saliguori if (is_write) { 2637ac1970fbSAvi Kivity address_space_write(as, bounce.addr, bounce.buffer, access_len); 26386d16c2f8Saliguori } 2639f8a83245SHerve Poussineau qemu_vfree(bounce.buffer); 26406d16c2f8Saliguori bounce.buffer = NULL; 2641d3e71559SPaolo Bonzini memory_region_unref(bounce.mr); 2642ba223c29Saliguori cpu_notify_map_clients(); 26436d16c2f8Saliguori } 2644d0ecd2aaSbellard 2645a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr, 2646a8170e5eSAvi Kivity hwaddr *plen, 2647ac1970fbSAvi Kivity int is_write) 2648ac1970fbSAvi Kivity { 2649ac1970fbSAvi Kivity return address_space_map(&address_space_memory, addr, plen, is_write); 2650ac1970fbSAvi Kivity } 2651ac1970fbSAvi Kivity 2652a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len, 2653a8170e5eSAvi Kivity int is_write, hwaddr access_len) 2654ac1970fbSAvi Kivity { 2655ac1970fbSAvi Kivity return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); 2656ac1970fbSAvi Kivity } 2657ac1970fbSAvi Kivity 26588df1cd07Sbellard /* warning: addr must be aligned */ 2659fdfba1a2SEdgar E. Iglesias static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr, 26601e78bcc1SAlexander Graf enum device_endian endian) 26618df1cd07Sbellard { 26628df1cd07Sbellard uint8_t *ptr; 2663791af8c8SPaolo Bonzini uint64_t val; 26645c8a00ceSPaolo Bonzini MemoryRegion *mr; 2665149f54b5SPaolo Bonzini hwaddr l = 4; 2666149f54b5SPaolo Bonzini hwaddr addr1; 26678df1cd07Sbellard 2668fdfba1a2SEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, false); 26695c8a00ceSPaolo Bonzini if (l < 4 || !memory_access_is_direct(mr, false)) { 26708df1cd07Sbellard /* I/O case */ 26715c8a00ceSPaolo Bonzini io_mem_read(mr, addr1, &val, 4); 26721e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 26731e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 26741e78bcc1SAlexander Graf val = bswap32(val); 26751e78bcc1SAlexander Graf } 26761e78bcc1SAlexander Graf #else 26771e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 26781e78bcc1SAlexander Graf val = bswap32(val); 26791e78bcc1SAlexander Graf } 26801e78bcc1SAlexander Graf #endif 26818df1cd07Sbellard } else { 26828df1cd07Sbellard /* RAM case */ 26835c8a00ceSPaolo Bonzini ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) 268406ef3525SAvi Kivity & TARGET_PAGE_MASK) 2685149f54b5SPaolo Bonzini + addr1); 26861e78bcc1SAlexander Graf switch (endian) { 26871e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 26881e78bcc1SAlexander Graf val = ldl_le_p(ptr); 26891e78bcc1SAlexander Graf break; 26901e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 26911e78bcc1SAlexander Graf val = ldl_be_p(ptr); 26921e78bcc1SAlexander Graf break; 26931e78bcc1SAlexander Graf default: 26948df1cd07Sbellard val = ldl_p(ptr); 26951e78bcc1SAlexander Graf break; 26961e78bcc1SAlexander Graf } 26978df1cd07Sbellard } 26988df1cd07Sbellard return val; 26998df1cd07Sbellard } 27008df1cd07Sbellard 2701fdfba1a2SEdgar E. Iglesias uint32_t ldl_phys(AddressSpace *as, hwaddr addr) 27021e78bcc1SAlexander Graf { 2703fdfba1a2SEdgar E. Iglesias return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); 27041e78bcc1SAlexander Graf } 27051e78bcc1SAlexander Graf 2706fdfba1a2SEdgar E. Iglesias uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr) 27071e78bcc1SAlexander Graf { 2708fdfba1a2SEdgar E. Iglesias return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); 27091e78bcc1SAlexander Graf } 27101e78bcc1SAlexander Graf 2711fdfba1a2SEdgar E. Iglesias uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr) 27121e78bcc1SAlexander Graf { 2713fdfba1a2SEdgar E. Iglesias return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN); 27141e78bcc1SAlexander Graf } 27151e78bcc1SAlexander Graf 271684b7b8e7Sbellard /* warning: addr must be aligned */ 27172c17449bSEdgar E. Iglesias static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr, 27181e78bcc1SAlexander Graf enum device_endian endian) 271984b7b8e7Sbellard { 272084b7b8e7Sbellard uint8_t *ptr; 272184b7b8e7Sbellard uint64_t val; 27225c8a00ceSPaolo Bonzini MemoryRegion *mr; 2723149f54b5SPaolo Bonzini hwaddr l = 8; 2724149f54b5SPaolo Bonzini hwaddr addr1; 272584b7b8e7Sbellard 27262c17449bSEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, 2727149f54b5SPaolo Bonzini false); 27285c8a00ceSPaolo Bonzini if (l < 8 || !memory_access_is_direct(mr, false)) { 272984b7b8e7Sbellard /* I/O case */ 27305c8a00ceSPaolo Bonzini io_mem_read(mr, addr1, &val, 8); 2731968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN) 2732968a5627SPaolo Bonzini if (endian == DEVICE_LITTLE_ENDIAN) { 2733968a5627SPaolo Bonzini val = bswap64(val); 2734968a5627SPaolo Bonzini } 2735968a5627SPaolo Bonzini #else 2736968a5627SPaolo Bonzini if (endian == DEVICE_BIG_ENDIAN) { 2737968a5627SPaolo Bonzini val = bswap64(val); 2738968a5627SPaolo Bonzini } 2739968a5627SPaolo Bonzini #endif 274084b7b8e7Sbellard } else { 274184b7b8e7Sbellard /* RAM case */ 27425c8a00ceSPaolo Bonzini ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) 274306ef3525SAvi Kivity & TARGET_PAGE_MASK) 2744149f54b5SPaolo Bonzini + addr1); 27451e78bcc1SAlexander Graf switch (endian) { 27461e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 27471e78bcc1SAlexander Graf val = ldq_le_p(ptr); 27481e78bcc1SAlexander Graf break; 27491e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 27501e78bcc1SAlexander Graf val = ldq_be_p(ptr); 27511e78bcc1SAlexander Graf break; 27521e78bcc1SAlexander Graf default: 275384b7b8e7Sbellard val = ldq_p(ptr); 27541e78bcc1SAlexander Graf break; 27551e78bcc1SAlexander Graf } 275684b7b8e7Sbellard } 275784b7b8e7Sbellard return val; 275884b7b8e7Sbellard } 275984b7b8e7Sbellard 27602c17449bSEdgar E. Iglesias uint64_t ldq_phys(AddressSpace *as, hwaddr addr) 27611e78bcc1SAlexander Graf { 27622c17449bSEdgar E. Iglesias return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); 27631e78bcc1SAlexander Graf } 27641e78bcc1SAlexander Graf 27652c17449bSEdgar E. Iglesias uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr) 27661e78bcc1SAlexander Graf { 27672c17449bSEdgar E. Iglesias return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); 27681e78bcc1SAlexander Graf } 27691e78bcc1SAlexander Graf 27702c17449bSEdgar E. Iglesias uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr) 27711e78bcc1SAlexander Graf { 27722c17449bSEdgar E. Iglesias return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN); 27731e78bcc1SAlexander Graf } 27741e78bcc1SAlexander Graf 2775aab33094Sbellard /* XXX: optimize */ 27762c17449bSEdgar E. Iglesias uint32_t ldub_phys(AddressSpace *as, hwaddr addr) 2777aab33094Sbellard { 2778aab33094Sbellard uint8_t val; 27792c17449bSEdgar E. Iglesias address_space_rw(as, addr, &val, 1, 0); 2780aab33094Sbellard return val; 2781aab33094Sbellard } 2782aab33094Sbellard 2783733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */ 278441701aa4SEdgar E. Iglesias static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr, 27851e78bcc1SAlexander Graf enum device_endian endian) 2786aab33094Sbellard { 2787733f0b02SMichael S. Tsirkin uint8_t *ptr; 2788733f0b02SMichael S. Tsirkin uint64_t val; 27895c8a00ceSPaolo Bonzini MemoryRegion *mr; 2790149f54b5SPaolo Bonzini hwaddr l = 2; 2791149f54b5SPaolo Bonzini hwaddr addr1; 2792733f0b02SMichael S. Tsirkin 279341701aa4SEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, 2794149f54b5SPaolo Bonzini false); 27955c8a00ceSPaolo Bonzini if (l < 2 || !memory_access_is_direct(mr, false)) { 2796733f0b02SMichael S. Tsirkin /* I/O case */ 27975c8a00ceSPaolo Bonzini io_mem_read(mr, addr1, &val, 2); 27981e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 27991e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 28001e78bcc1SAlexander Graf val = bswap16(val); 28011e78bcc1SAlexander Graf } 28021e78bcc1SAlexander Graf #else 28031e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 28041e78bcc1SAlexander Graf val = bswap16(val); 28051e78bcc1SAlexander Graf } 28061e78bcc1SAlexander Graf #endif 2807733f0b02SMichael S. Tsirkin } else { 2808733f0b02SMichael S. Tsirkin /* RAM case */ 28095c8a00ceSPaolo Bonzini ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) 281006ef3525SAvi Kivity & TARGET_PAGE_MASK) 2811149f54b5SPaolo Bonzini + addr1); 28121e78bcc1SAlexander Graf switch (endian) { 28131e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 28141e78bcc1SAlexander Graf val = lduw_le_p(ptr); 28151e78bcc1SAlexander Graf break; 28161e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 28171e78bcc1SAlexander Graf val = lduw_be_p(ptr); 28181e78bcc1SAlexander Graf break; 28191e78bcc1SAlexander Graf default: 2820733f0b02SMichael S. Tsirkin val = lduw_p(ptr); 28211e78bcc1SAlexander Graf break; 28221e78bcc1SAlexander Graf } 2823733f0b02SMichael S. Tsirkin } 2824733f0b02SMichael S. Tsirkin return val; 2825aab33094Sbellard } 2826aab33094Sbellard 282741701aa4SEdgar E. Iglesias uint32_t lduw_phys(AddressSpace *as, hwaddr addr) 28281e78bcc1SAlexander Graf { 282941701aa4SEdgar E. Iglesias return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); 28301e78bcc1SAlexander Graf } 28311e78bcc1SAlexander Graf 283241701aa4SEdgar E. Iglesias uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr) 28331e78bcc1SAlexander Graf { 283441701aa4SEdgar E. Iglesias return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); 28351e78bcc1SAlexander Graf } 28361e78bcc1SAlexander Graf 283741701aa4SEdgar E. Iglesias uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr) 28381e78bcc1SAlexander Graf { 283941701aa4SEdgar E. Iglesias return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN); 28401e78bcc1SAlexander Graf } 28411e78bcc1SAlexander Graf 28428df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty 28438df1cd07Sbellard and the code inside is not invalidated. It is useful if the dirty 28448df1cd07Sbellard bits are used to track modified PTEs */ 28452198a121SEdgar E. Iglesias void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) 28468df1cd07Sbellard { 28478df1cd07Sbellard uint8_t *ptr; 28485c8a00ceSPaolo Bonzini MemoryRegion *mr; 2849149f54b5SPaolo Bonzini hwaddr l = 4; 2850149f54b5SPaolo Bonzini hwaddr addr1; 28518df1cd07Sbellard 28522198a121SEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, 2853149f54b5SPaolo Bonzini true); 28545c8a00ceSPaolo Bonzini if (l < 4 || !memory_access_is_direct(mr, true)) { 28555c8a00ceSPaolo Bonzini io_mem_write(mr, addr1, val, 4); 28568df1cd07Sbellard } else { 28575c8a00ceSPaolo Bonzini addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; 28585579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 28598df1cd07Sbellard stl_p(ptr, val); 286074576198Saliguori 286174576198Saliguori if (unlikely(in_migration)) { 2862a2cd8c85SJuan Quintela if (cpu_physical_memory_is_clean(addr1)) { 286374576198Saliguori /* invalidate code */ 286474576198Saliguori tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); 286574576198Saliguori /* set dirty bit */ 28666886867eSPaolo Bonzini cpu_physical_memory_set_dirty_range_nocode(addr1, 4); 286774576198Saliguori } 286874576198Saliguori } 28698df1cd07Sbellard } 28708df1cd07Sbellard } 28718df1cd07Sbellard 28728df1cd07Sbellard /* warning: addr must be aligned */ 2873ab1da857SEdgar E. Iglesias static inline void stl_phys_internal(AddressSpace *as, 2874ab1da857SEdgar E. Iglesias hwaddr addr, uint32_t val, 28751e78bcc1SAlexander Graf enum device_endian endian) 28768df1cd07Sbellard { 28778df1cd07Sbellard uint8_t *ptr; 28785c8a00ceSPaolo Bonzini MemoryRegion *mr; 2879149f54b5SPaolo Bonzini hwaddr l = 4; 2880149f54b5SPaolo Bonzini hwaddr addr1; 28818df1cd07Sbellard 2882ab1da857SEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, 2883149f54b5SPaolo Bonzini true); 28845c8a00ceSPaolo Bonzini if (l < 4 || !memory_access_is_direct(mr, true)) { 28851e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 28861e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 28871e78bcc1SAlexander Graf val = bswap32(val); 28881e78bcc1SAlexander Graf } 28891e78bcc1SAlexander Graf #else 28901e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 28911e78bcc1SAlexander Graf val = bswap32(val); 28921e78bcc1SAlexander Graf } 28931e78bcc1SAlexander Graf #endif 28945c8a00ceSPaolo Bonzini io_mem_write(mr, addr1, val, 4); 28958df1cd07Sbellard } else { 28968df1cd07Sbellard /* RAM case */ 28975c8a00ceSPaolo Bonzini addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; 28985579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 28991e78bcc1SAlexander Graf switch (endian) { 29001e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 29011e78bcc1SAlexander Graf stl_le_p(ptr, val); 29021e78bcc1SAlexander Graf break; 29031e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 29041e78bcc1SAlexander Graf stl_be_p(ptr, val); 29051e78bcc1SAlexander Graf break; 29061e78bcc1SAlexander Graf default: 29078df1cd07Sbellard stl_p(ptr, val); 29081e78bcc1SAlexander Graf break; 29091e78bcc1SAlexander Graf } 291051d7a9ebSAnthony PERARD invalidate_and_set_dirty(addr1, 4); 29118df1cd07Sbellard } 29123a7d929eSbellard } 29138df1cd07Sbellard 2914ab1da857SEdgar E. Iglesias void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29151e78bcc1SAlexander Graf { 2916ab1da857SEdgar E. Iglesias stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN); 29171e78bcc1SAlexander Graf } 29181e78bcc1SAlexander Graf 2919ab1da857SEdgar E. Iglesias void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29201e78bcc1SAlexander Graf { 2921ab1da857SEdgar E. Iglesias stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN); 29221e78bcc1SAlexander Graf } 29231e78bcc1SAlexander Graf 2924ab1da857SEdgar E. Iglesias void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29251e78bcc1SAlexander Graf { 2926ab1da857SEdgar E. Iglesias stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN); 29271e78bcc1SAlexander Graf } 29281e78bcc1SAlexander Graf 2929aab33094Sbellard /* XXX: optimize */ 2930db3be60dSEdgar E. Iglesias void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val) 2931aab33094Sbellard { 2932aab33094Sbellard uint8_t v = val; 2933db3be60dSEdgar E. Iglesias address_space_rw(as, addr, &v, 1, 1); 2934aab33094Sbellard } 2935aab33094Sbellard 2936733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */ 29375ce5944dSEdgar E. Iglesias static inline void stw_phys_internal(AddressSpace *as, 29385ce5944dSEdgar E. Iglesias hwaddr addr, uint32_t val, 29391e78bcc1SAlexander Graf enum device_endian endian) 2940aab33094Sbellard { 2941733f0b02SMichael S. Tsirkin uint8_t *ptr; 29425c8a00ceSPaolo Bonzini MemoryRegion *mr; 2943149f54b5SPaolo Bonzini hwaddr l = 2; 2944149f54b5SPaolo Bonzini hwaddr addr1; 2945733f0b02SMichael S. Tsirkin 29465ce5944dSEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, true); 29475c8a00ceSPaolo Bonzini if (l < 2 || !memory_access_is_direct(mr, true)) { 29481e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 29491e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 29501e78bcc1SAlexander Graf val = bswap16(val); 29511e78bcc1SAlexander Graf } 29521e78bcc1SAlexander Graf #else 29531e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 29541e78bcc1SAlexander Graf val = bswap16(val); 29551e78bcc1SAlexander Graf } 29561e78bcc1SAlexander Graf #endif 29575c8a00ceSPaolo Bonzini io_mem_write(mr, addr1, val, 2); 2958733f0b02SMichael S. Tsirkin } else { 2959733f0b02SMichael S. Tsirkin /* RAM case */ 29605c8a00ceSPaolo Bonzini addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; 2961733f0b02SMichael S. Tsirkin ptr = qemu_get_ram_ptr(addr1); 29621e78bcc1SAlexander Graf switch (endian) { 29631e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 29641e78bcc1SAlexander Graf stw_le_p(ptr, val); 29651e78bcc1SAlexander Graf break; 29661e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 29671e78bcc1SAlexander Graf stw_be_p(ptr, val); 29681e78bcc1SAlexander Graf break; 29691e78bcc1SAlexander Graf default: 2970733f0b02SMichael S. Tsirkin stw_p(ptr, val); 29711e78bcc1SAlexander Graf break; 29721e78bcc1SAlexander Graf } 297351d7a9ebSAnthony PERARD invalidate_and_set_dirty(addr1, 2); 2974733f0b02SMichael S. Tsirkin } 2975aab33094Sbellard } 2976aab33094Sbellard 29775ce5944dSEdgar E. Iglesias void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29781e78bcc1SAlexander Graf { 29795ce5944dSEdgar E. Iglesias stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN); 29801e78bcc1SAlexander Graf } 29811e78bcc1SAlexander Graf 29825ce5944dSEdgar E. Iglesias void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29831e78bcc1SAlexander Graf { 29845ce5944dSEdgar E. Iglesias stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN); 29851e78bcc1SAlexander Graf } 29861e78bcc1SAlexander Graf 29875ce5944dSEdgar E. Iglesias void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29881e78bcc1SAlexander Graf { 29895ce5944dSEdgar E. Iglesias stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN); 29901e78bcc1SAlexander Graf } 29911e78bcc1SAlexander Graf 2992aab33094Sbellard /* XXX: optimize */ 2993f606604fSEdgar E. Iglesias void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val) 2994aab33094Sbellard { 2995aab33094Sbellard val = tswap64(val); 2996f606604fSEdgar E. Iglesias address_space_rw(as, addr, (void *) &val, 8, 1); 2997aab33094Sbellard } 2998aab33094Sbellard 2999f606604fSEdgar E. Iglesias void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val) 30001e78bcc1SAlexander Graf { 30011e78bcc1SAlexander Graf val = cpu_to_le64(val); 3002f606604fSEdgar E. Iglesias address_space_rw(as, addr, (void *) &val, 8, 1); 30031e78bcc1SAlexander Graf } 30041e78bcc1SAlexander Graf 3005f606604fSEdgar E. Iglesias void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val) 30061e78bcc1SAlexander Graf { 30071e78bcc1SAlexander Graf val = cpu_to_be64(val); 3008f606604fSEdgar E. Iglesias address_space_rw(as, addr, (void *) &val, 8, 1); 30091e78bcc1SAlexander Graf } 30101e78bcc1SAlexander Graf 30115e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */ 3012f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, 3013b448f2f3Sbellard uint8_t *buf, int len, int is_write) 301413eb76e0Sbellard { 301513eb76e0Sbellard int l; 3016a8170e5eSAvi Kivity hwaddr phys_addr; 30179b3c35e0Sj_mayer target_ulong page; 301813eb76e0Sbellard 301913eb76e0Sbellard while (len > 0) { 302013eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 3021f17ec444SAndreas Färber phys_addr = cpu_get_phys_page_debug(cpu, page); 302213eb76e0Sbellard /* if no physical page mapped, return an error */ 302313eb76e0Sbellard if (phys_addr == -1) 302413eb76e0Sbellard return -1; 302513eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 302613eb76e0Sbellard if (l > len) 302713eb76e0Sbellard l = len; 30285e2972fdSaliguori phys_addr += (addr & ~TARGET_PAGE_MASK); 30292e38847bSEdgar E. Iglesias if (is_write) { 30302e38847bSEdgar E. Iglesias cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l); 30312e38847bSEdgar E. Iglesias } else { 30322e38847bSEdgar E. Iglesias address_space_rw(cpu->as, phys_addr, buf, l, 0); 30332e38847bSEdgar E. Iglesias } 303413eb76e0Sbellard len -= l; 303513eb76e0Sbellard buf += l; 303613eb76e0Sbellard addr += l; 303713eb76e0Sbellard } 303813eb76e0Sbellard return 0; 303913eb76e0Sbellard } 3040a68fe89cSPaul Brook #endif 304113eb76e0Sbellard 30428e4a424bSBlue Swirl /* 30438e4a424bSBlue Swirl * A helper function for the _utterly broken_ virtio device model to find out if 30448e4a424bSBlue Swirl * it's running on a big endian machine. Don't do this at home kids! 30458e4a424bSBlue Swirl */ 304698ed8ecfSGreg Kurz bool target_words_bigendian(void); 304798ed8ecfSGreg Kurz bool target_words_bigendian(void) 30488e4a424bSBlue Swirl { 30498e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN) 30508e4a424bSBlue Swirl return true; 30518e4a424bSBlue Swirl #else 30528e4a424bSBlue Swirl return false; 30538e4a424bSBlue Swirl #endif 30548e4a424bSBlue Swirl } 30558e4a424bSBlue Swirl 305676f35538SWen Congyang #ifndef CONFIG_USER_ONLY 3057a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr) 305876f35538SWen Congyang { 30595c8a00ceSPaolo Bonzini MemoryRegion*mr; 3060149f54b5SPaolo Bonzini hwaddr l = 1; 306176f35538SWen Congyang 30625c8a00ceSPaolo Bonzini mr = address_space_translate(&address_space_memory, 3063149f54b5SPaolo Bonzini phys_addr, &phys_addr, &l, false); 306476f35538SWen Congyang 30655c8a00ceSPaolo Bonzini return !(memory_region_is_ram(mr) || 30665c8a00ceSPaolo Bonzini memory_region_is_romd(mr)); 306776f35538SWen Congyang } 3068bd2fa51fSMichael R. Hines 3069bd2fa51fSMichael R. Hines void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) 3070bd2fa51fSMichael R. Hines { 3071bd2fa51fSMichael R. Hines RAMBlock *block; 3072bd2fa51fSMichael R. Hines 30730dc3f44aSMike Day rcu_read_lock(); 30740dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 30759b8424d5SMichael S. Tsirkin func(block->host, block->offset, block->used_length, opaque); 3076bd2fa51fSMichael R. Hines } 30770dc3f44aSMike Day rcu_read_unlock(); 3078bd2fa51fSMichael R. Hines } 3079ec3f8c99SPeter Maydell #endif 3080