154936004Sbellard /* 25b6dd868SBlue Swirl * Virtual page mapping 354936004Sbellard * 454936004Sbellard * Copyright (c) 2003 Fabrice Bellard 554936004Sbellard * 654936004Sbellard * This library is free software; you can redistribute it and/or 754936004Sbellard * modify it under the terms of the GNU Lesser General Public 854936004Sbellard * License as published by the Free Software Foundation; either 954936004Sbellard * version 2 of the License, or (at your option) any later version. 1054936004Sbellard * 1154936004Sbellard * This library is distributed in the hope that it will be useful, 1254936004Sbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 1354936004Sbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1454936004Sbellard * Lesser General Public License for more details. 1554936004Sbellard * 1654936004Sbellard * You should have received a copy of the GNU Lesser General Public 178167ee88SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 1854936004Sbellard */ 1967b915a5Sbellard #include "config.h" 20777872e5SStefan Weil #ifndef _WIN32 21a98d49b1Sbellard #include <sys/types.h> 22d5a8f07cSbellard #include <sys/mman.h> 23d5a8f07cSbellard #endif 2454936004Sbellard 25055403b2SStefan Weil #include "qemu-common.h" 266180a181Sbellard #include "cpu.h" 27b67d9a52Sbellard #include "tcg.h" 28b3c7724cSpbrook #include "hw/hw.h" 29cc9e98cbSAlex Williamson #include "hw/qdev.h" 301de7afc9SPaolo Bonzini #include "qemu/osdep.h" 319c17d615SPaolo Bonzini #include "sysemu/kvm.h" 322ff3de68SMarkus Armbruster #include "sysemu/sysemu.h" 330d09e41aSPaolo Bonzini #include "hw/xen/xen.h" 341de7afc9SPaolo Bonzini #include "qemu/timer.h" 351de7afc9SPaolo Bonzini #include "qemu/config-file.h" 3675a34036SAndreas Färber #include "qemu/error-report.h" 37022c62cbSPaolo Bonzini #include "exec/memory.h" 389c17d615SPaolo Bonzini #include "sysemu/dma.h" 39022c62cbSPaolo Bonzini #include "exec/address-spaces.h" 4053a5960aSpbrook #if defined(CONFIG_USER_ONLY) 4153a5960aSpbrook #include <qemu.h> 42432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */ 439c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h" 446506e4f9SStefano Stabellini #include "trace.h" 4553a5960aSpbrook #endif 460d6d3c87SPaolo Bonzini #include "exec/cpu-all.h" 470dc3f44aSMike Day #include "qemu/rcu_queue.h" 48022c62cbSPaolo Bonzini #include "exec/cputlb.h" 495b6dd868SBlue Swirl #include "translate-all.h" 500cac1b66SBlue Swirl 51022c62cbSPaolo Bonzini #include "exec/memory-internal.h" 52220c3ebdSJuan Quintela #include "exec/ram_addr.h" 5367d95c15SAvi Kivity 54b35ba30fSMichael S. Tsirkin #include "qemu/range.h" 55b35ba30fSMichael S. Tsirkin 56db7b5426Sblueswir1 //#define DEBUG_SUBPAGE 571196be37Sths 5899773bd4Spbrook #if !defined(CONFIG_USER_ONLY) 59981fdf23SJuan Quintela static bool in_migration; 6094a6b54fSpbrook 610dc3f44aSMike Day /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes 620dc3f44aSMike Day * are protected by the ramlist lock. 630dc3f44aSMike Day */ 640d53d9feSMike Day RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; 6562152b8aSAvi Kivity 6662152b8aSAvi Kivity static MemoryRegion *system_memory; 67309cb471SAvi Kivity static MemoryRegion *system_io; 6862152b8aSAvi Kivity 69f6790af6SAvi Kivity AddressSpace address_space_io; 70f6790af6SAvi Kivity AddressSpace address_space_memory; 712673a5daSAvi Kivity 720844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty; 73acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned; 740e0df1e2SAvi Kivity 757bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ 767bd4f430SPaolo Bonzini #define RAM_PREALLOC (1 << 0) 777bd4f430SPaolo Bonzini 78dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */ 79dbcb8981SPaolo Bonzini #define RAM_SHARED (1 << 1) 80dbcb8981SPaolo Bonzini 8162be4e3aSMichael S. Tsirkin /* Only a portion of RAM (used_length) is actually used, and migrated. 8262be4e3aSMichael S. Tsirkin * This used_length size can change across reboots. 8362be4e3aSMichael S. Tsirkin */ 8462be4e3aSMichael S. Tsirkin #define RAM_RESIZEABLE (1 << 2) 8562be4e3aSMichael S. Tsirkin 86e2eef170Spbrook #endif 879fa3e853Sbellard 88bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); 896a00d601Sbellard /* current CPU in the current thread. It is only valid inside 906a00d601Sbellard cpu_exec() */ 914917cf44SAndreas Färber DEFINE_TLS(CPUState *, current_cpu); 922e70f6efSpbrook /* 0 = Do not count executed instructions. 93bf20dc07Sths 1 = Precise instruction counting. 942e70f6efSpbrook 2 = Adaptive rate instruction counting. */ 955708fc66SPaolo Bonzini int use_icount; 966a00d601Sbellard 97e2eef170Spbrook #if !defined(CONFIG_USER_ONLY) 984346ae3eSAvi Kivity 991db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry; 1001db8abb1SPaolo Bonzini 1011db8abb1SPaolo Bonzini struct PhysPageEntry { 1029736e55bSMichael S. Tsirkin /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ 1038b795765SMichael S. Tsirkin uint32_t skip : 6; 1049736e55bSMichael S. Tsirkin /* index into phys_sections (!skip) or phys_map_nodes (skip) */ 1058b795765SMichael S. Tsirkin uint32_t ptr : 26; 1061db8abb1SPaolo Bonzini }; 1071db8abb1SPaolo Bonzini 1088b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) 1098b795765SMichael S. Tsirkin 11003f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables. */ 11157271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64 11203f49957SPaolo Bonzini 113026736ceSMichael S. Tsirkin #define P_L2_BITS 9 11403f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS) 11503f49957SPaolo Bonzini 11603f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) 11703f49957SPaolo Bonzini 11803f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE]; 1190475d94fSPaolo Bonzini 12053cb28cbSMarcel Apfelbaum typedef struct PhysPageMap { 12179e2b9aeSPaolo Bonzini struct rcu_head rcu; 12279e2b9aeSPaolo Bonzini 12353cb28cbSMarcel Apfelbaum unsigned sections_nb; 12453cb28cbSMarcel Apfelbaum unsigned sections_nb_alloc; 12553cb28cbSMarcel Apfelbaum unsigned nodes_nb; 12653cb28cbSMarcel Apfelbaum unsigned nodes_nb_alloc; 12753cb28cbSMarcel Apfelbaum Node *nodes; 12853cb28cbSMarcel Apfelbaum MemoryRegionSection *sections; 12953cb28cbSMarcel Apfelbaum } PhysPageMap; 13053cb28cbSMarcel Apfelbaum 1311db8abb1SPaolo Bonzini struct AddressSpaceDispatch { 13279e2b9aeSPaolo Bonzini struct rcu_head rcu; 13379e2b9aeSPaolo Bonzini 1341db8abb1SPaolo Bonzini /* This is a multi-level map on the physical address space. 1351db8abb1SPaolo Bonzini * The bottom level has pointers to MemoryRegionSections. 1361db8abb1SPaolo Bonzini */ 1371db8abb1SPaolo Bonzini PhysPageEntry phys_map; 13853cb28cbSMarcel Apfelbaum PhysPageMap map; 139acc9d80bSJan Kiszka AddressSpace *as; 1401db8abb1SPaolo Bonzini }; 1411db8abb1SPaolo Bonzini 14290260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 14390260c6cSJan Kiszka typedef struct subpage_t { 14490260c6cSJan Kiszka MemoryRegion iomem; 145acc9d80bSJan Kiszka AddressSpace *as; 14690260c6cSJan Kiszka hwaddr base; 14790260c6cSJan Kiszka uint16_t sub_section[TARGET_PAGE_SIZE]; 14890260c6cSJan Kiszka } subpage_t; 14990260c6cSJan Kiszka 150b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0 151b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1 152b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2 153b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3 1545312bd8bSAvi Kivity 155e2eef170Spbrook static void io_mem_init(void); 15662152b8aSAvi Kivity static void memory_map_init(void); 15709daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener); 158e2eef170Spbrook 1591ec9b909SAvi Kivity static MemoryRegion io_mem_watch; 1606658ffb8Spbrook #endif 16154936004Sbellard 1626d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY) 163d6f2ea22SAvi Kivity 16453cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) 165f7bf5461SAvi Kivity { 16653cb28cbSMarcel Apfelbaum if (map->nodes_nb + nodes > map->nodes_nb_alloc) { 16753cb28cbSMarcel Apfelbaum map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16); 16853cb28cbSMarcel Apfelbaum map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes); 16953cb28cbSMarcel Apfelbaum map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); 170f7bf5461SAvi Kivity } 171f7bf5461SAvi Kivity } 172f7bf5461SAvi Kivity 17353cb28cbSMarcel Apfelbaum static uint32_t phys_map_node_alloc(PhysPageMap *map) 174d6f2ea22SAvi Kivity { 175d6f2ea22SAvi Kivity unsigned i; 1768b795765SMichael S. Tsirkin uint32_t ret; 177d6f2ea22SAvi Kivity 17853cb28cbSMarcel Apfelbaum ret = map->nodes_nb++; 179d6f2ea22SAvi Kivity assert(ret != PHYS_MAP_NODE_NIL); 18053cb28cbSMarcel Apfelbaum assert(ret != map->nodes_nb_alloc); 18103f49957SPaolo Bonzini for (i = 0; i < P_L2_SIZE; ++i) { 18253cb28cbSMarcel Apfelbaum map->nodes[ret][i].skip = 1; 18353cb28cbSMarcel Apfelbaum map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL; 184d6f2ea22SAvi Kivity } 185f7bf5461SAvi Kivity return ret; 186d6f2ea22SAvi Kivity } 187d6f2ea22SAvi Kivity 18853cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, 18953cb28cbSMarcel Apfelbaum hwaddr *index, hwaddr *nb, uint16_t leaf, 1902999097bSAvi Kivity int level) 19192e873b9Sbellard { 192f7bf5461SAvi Kivity PhysPageEntry *p; 193f7bf5461SAvi Kivity int i; 19403f49957SPaolo Bonzini hwaddr step = (hwaddr)1 << (level * P_L2_BITS); 1955cd2c5b6SRichard Henderson 1969736e55bSMichael S. Tsirkin if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { 19753cb28cbSMarcel Apfelbaum lp->ptr = phys_map_node_alloc(map); 19853cb28cbSMarcel Apfelbaum p = map->nodes[lp->ptr]; 199f7bf5461SAvi Kivity if (level == 0) { 20003f49957SPaolo Bonzini for (i = 0; i < P_L2_SIZE; i++) { 2019736e55bSMichael S. Tsirkin p[i].skip = 0; 202b41aac4fSLiu Ping Fan p[i].ptr = PHYS_SECTION_UNASSIGNED; 20367c4d23cSpbrook } 20492e873b9Sbellard } 205d6f2ea22SAvi Kivity } else { 20653cb28cbSMarcel Apfelbaum p = map->nodes[lp->ptr]; 2074346ae3eSAvi Kivity } 20803f49957SPaolo Bonzini lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; 209f7bf5461SAvi Kivity 21003f49957SPaolo Bonzini while (*nb && lp < &p[P_L2_SIZE]) { 21107f07b31SAvi Kivity if ((*index & (step - 1)) == 0 && *nb >= step) { 2129736e55bSMichael S. Tsirkin lp->skip = 0; 213c19e8800SAvi Kivity lp->ptr = leaf; 21407f07b31SAvi Kivity *index += step; 21507f07b31SAvi Kivity *nb -= step; 216f7bf5461SAvi Kivity } else { 21753cb28cbSMarcel Apfelbaum phys_page_set_level(map, lp, index, nb, leaf, level - 1); 2182999097bSAvi Kivity } 2192999097bSAvi Kivity ++lp; 220f7bf5461SAvi Kivity } 2214346ae3eSAvi Kivity } 2225cd2c5b6SRichard Henderson 223ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d, 224a8170e5eSAvi Kivity hwaddr index, hwaddr nb, 2252999097bSAvi Kivity uint16_t leaf) 226f7bf5461SAvi Kivity { 2272999097bSAvi Kivity /* Wildly overreserve - it doesn't matter much. */ 22853cb28cbSMarcel Apfelbaum phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); 229f7bf5461SAvi Kivity 23053cb28cbSMarcel Apfelbaum phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); 23192e873b9Sbellard } 23292e873b9Sbellard 233b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child, 234b35ba30fSMichael S. Tsirkin * and update our entry so we can skip it and go directly to the destination. 235b35ba30fSMichael S. Tsirkin */ 236b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted) 237b35ba30fSMichael S. Tsirkin { 238b35ba30fSMichael S. Tsirkin unsigned valid_ptr = P_L2_SIZE; 239b35ba30fSMichael S. Tsirkin int valid = 0; 240b35ba30fSMichael S. Tsirkin PhysPageEntry *p; 241b35ba30fSMichael S. Tsirkin int i; 242b35ba30fSMichael S. Tsirkin 243b35ba30fSMichael S. Tsirkin if (lp->ptr == PHYS_MAP_NODE_NIL) { 244b35ba30fSMichael S. Tsirkin return; 245b35ba30fSMichael S. Tsirkin } 246b35ba30fSMichael S. Tsirkin 247b35ba30fSMichael S. Tsirkin p = nodes[lp->ptr]; 248b35ba30fSMichael S. Tsirkin for (i = 0; i < P_L2_SIZE; i++) { 249b35ba30fSMichael S. Tsirkin if (p[i].ptr == PHYS_MAP_NODE_NIL) { 250b35ba30fSMichael S. Tsirkin continue; 251b35ba30fSMichael S. Tsirkin } 252b35ba30fSMichael S. Tsirkin 253b35ba30fSMichael S. Tsirkin valid_ptr = i; 254b35ba30fSMichael S. Tsirkin valid++; 255b35ba30fSMichael S. Tsirkin if (p[i].skip) { 256b35ba30fSMichael S. Tsirkin phys_page_compact(&p[i], nodes, compacted); 257b35ba30fSMichael S. Tsirkin } 258b35ba30fSMichael S. Tsirkin } 259b35ba30fSMichael S. Tsirkin 260b35ba30fSMichael S. Tsirkin /* We can only compress if there's only one child. */ 261b35ba30fSMichael S. Tsirkin if (valid != 1) { 262b35ba30fSMichael S. Tsirkin return; 263b35ba30fSMichael S. Tsirkin } 264b35ba30fSMichael S. Tsirkin 265b35ba30fSMichael S. Tsirkin assert(valid_ptr < P_L2_SIZE); 266b35ba30fSMichael S. Tsirkin 267b35ba30fSMichael S. Tsirkin /* Don't compress if it won't fit in the # of bits we have. */ 268b35ba30fSMichael S. Tsirkin if (lp->skip + p[valid_ptr].skip >= (1 << 3)) { 269b35ba30fSMichael S. Tsirkin return; 270b35ba30fSMichael S. Tsirkin } 271b35ba30fSMichael S. Tsirkin 272b35ba30fSMichael S. Tsirkin lp->ptr = p[valid_ptr].ptr; 273b35ba30fSMichael S. Tsirkin if (!p[valid_ptr].skip) { 274b35ba30fSMichael S. Tsirkin /* If our only child is a leaf, make this a leaf. */ 275b35ba30fSMichael S. Tsirkin /* By design, we should have made this node a leaf to begin with so we 276b35ba30fSMichael S. Tsirkin * should never reach here. 277b35ba30fSMichael S. Tsirkin * But since it's so simple to handle this, let's do it just in case we 278b35ba30fSMichael S. Tsirkin * change this rule. 279b35ba30fSMichael S. Tsirkin */ 280b35ba30fSMichael S. Tsirkin lp->skip = 0; 281b35ba30fSMichael S. Tsirkin } else { 282b35ba30fSMichael S. Tsirkin lp->skip += p[valid_ptr].skip; 283b35ba30fSMichael S. Tsirkin } 284b35ba30fSMichael S. Tsirkin } 285b35ba30fSMichael S. Tsirkin 286b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb) 287b35ba30fSMichael S. Tsirkin { 288b35ba30fSMichael S. Tsirkin DECLARE_BITMAP(compacted, nodes_nb); 289b35ba30fSMichael S. Tsirkin 290b35ba30fSMichael S. Tsirkin if (d->phys_map.skip) { 29153cb28cbSMarcel Apfelbaum phys_page_compact(&d->phys_map, d->map.nodes, compacted); 292b35ba30fSMichael S. Tsirkin } 293b35ba30fSMichael S. Tsirkin } 294b35ba30fSMichael S. Tsirkin 29597115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr, 2969affd6fcSPaolo Bonzini Node *nodes, MemoryRegionSection *sections) 29792e873b9Sbellard { 29831ab2b4aSAvi Kivity PhysPageEntry *p; 29997115a8dSMichael S. Tsirkin hwaddr index = addr >> TARGET_PAGE_BITS; 30031ab2b4aSAvi Kivity int i; 301f1f6e3b8SAvi Kivity 3029736e55bSMichael S. Tsirkin for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { 303c19e8800SAvi Kivity if (lp.ptr == PHYS_MAP_NODE_NIL) { 3049affd6fcSPaolo Bonzini return §ions[PHYS_SECTION_UNASSIGNED]; 305f1f6e3b8SAvi Kivity } 3069affd6fcSPaolo Bonzini p = nodes[lp.ptr]; 30703f49957SPaolo Bonzini lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; 30831ab2b4aSAvi Kivity } 309b35ba30fSMichael S. Tsirkin 310b35ba30fSMichael S. Tsirkin if (sections[lp.ptr].size.hi || 311b35ba30fSMichael S. Tsirkin range_covers_byte(sections[lp.ptr].offset_within_address_space, 312b35ba30fSMichael S. Tsirkin sections[lp.ptr].size.lo, addr)) { 3139affd6fcSPaolo Bonzini return §ions[lp.ptr]; 314b35ba30fSMichael S. Tsirkin } else { 315b35ba30fSMichael S. Tsirkin return §ions[PHYS_SECTION_UNASSIGNED]; 316b35ba30fSMichael S. Tsirkin } 317f3705d53SAvi Kivity } 318f3705d53SAvi Kivity 319e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr) 320e5548617SBlue Swirl { 3212a8e7499SPaolo Bonzini return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device 322e5548617SBlue Swirl && mr != &io_mem_watch; 323e5548617SBlue Swirl } 324149f54b5SPaolo Bonzini 32579e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 326c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, 32790260c6cSJan Kiszka hwaddr addr, 32890260c6cSJan Kiszka bool resolve_subpage) 3299f029603SJan Kiszka { 33090260c6cSJan Kiszka MemoryRegionSection *section; 33190260c6cSJan Kiszka subpage_t *subpage; 33290260c6cSJan Kiszka 33353cb28cbSMarcel Apfelbaum section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections); 33490260c6cSJan Kiszka if (resolve_subpage && section->mr->subpage) { 33590260c6cSJan Kiszka subpage = container_of(section->mr, subpage_t, iomem); 33653cb28cbSMarcel Apfelbaum section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; 33790260c6cSJan Kiszka } 33890260c6cSJan Kiszka return section; 3399f029603SJan Kiszka } 3409f029603SJan Kiszka 34179e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 34290260c6cSJan Kiszka static MemoryRegionSection * 343c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, 34490260c6cSJan Kiszka hwaddr *plen, bool resolve_subpage) 345149f54b5SPaolo Bonzini { 346149f54b5SPaolo Bonzini MemoryRegionSection *section; 347a87f3954SPaolo Bonzini Int128 diff; 348149f54b5SPaolo Bonzini 349c7086b4aSPaolo Bonzini section = address_space_lookup_region(d, addr, resolve_subpage); 350149f54b5SPaolo Bonzini /* Compute offset within MemoryRegionSection */ 351149f54b5SPaolo Bonzini addr -= section->offset_within_address_space; 352149f54b5SPaolo Bonzini 353149f54b5SPaolo Bonzini /* Compute offset within MemoryRegion */ 354149f54b5SPaolo Bonzini *xlat = addr + section->offset_within_region; 355149f54b5SPaolo Bonzini 356149f54b5SPaolo Bonzini diff = int128_sub(section->mr->size, int128_make64(addr)); 3573752a036SPeter Maydell *plen = int128_get64(int128_min(diff, int128_make64(*plen))); 358149f54b5SPaolo Bonzini return section; 359149f54b5SPaolo Bonzini } 36090260c6cSJan Kiszka 361a87f3954SPaolo Bonzini static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) 362a87f3954SPaolo Bonzini { 363a87f3954SPaolo Bonzini if (memory_region_is_ram(mr)) { 364a87f3954SPaolo Bonzini return !(is_write && mr->readonly); 365a87f3954SPaolo Bonzini } 366a87f3954SPaolo Bonzini if (memory_region_is_romd(mr)) { 367a87f3954SPaolo Bonzini return !is_write; 368a87f3954SPaolo Bonzini } 369a87f3954SPaolo Bonzini 370a87f3954SPaolo Bonzini return false; 371a87f3954SPaolo Bonzini } 372a87f3954SPaolo Bonzini 3735c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, 37490260c6cSJan Kiszka hwaddr *xlat, hwaddr *plen, 37590260c6cSJan Kiszka bool is_write) 37690260c6cSJan Kiszka { 37730951157SAvi Kivity IOMMUTLBEntry iotlb; 37830951157SAvi Kivity MemoryRegionSection *section; 37930951157SAvi Kivity MemoryRegion *mr; 38030951157SAvi Kivity hwaddr len = *plen; 38130951157SAvi Kivity 38279e2b9aeSPaolo Bonzini rcu_read_lock(); 38330951157SAvi Kivity for (;;) { 38479e2b9aeSPaolo Bonzini AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch); 38579e2b9aeSPaolo Bonzini section = address_space_translate_internal(d, addr, &addr, plen, true); 38630951157SAvi Kivity mr = section->mr; 38730951157SAvi Kivity 38830951157SAvi Kivity if (!mr->iommu_ops) { 38930951157SAvi Kivity break; 39030951157SAvi Kivity } 39130951157SAvi Kivity 3928d7b8cb9SLe Tan iotlb = mr->iommu_ops->translate(mr, addr, is_write); 39330951157SAvi Kivity addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 39430951157SAvi Kivity | (addr & iotlb.addr_mask)); 39530951157SAvi Kivity len = MIN(len, (addr | iotlb.addr_mask) - addr + 1); 39630951157SAvi Kivity if (!(iotlb.perm & (1 << is_write))) { 39730951157SAvi Kivity mr = &io_mem_unassigned; 39830951157SAvi Kivity break; 39930951157SAvi Kivity } 40030951157SAvi Kivity 40130951157SAvi Kivity as = iotlb.target_as; 40230951157SAvi Kivity } 40330951157SAvi Kivity 404fe680d0dSAlexey Kardashevskiy if (xen_enabled() && memory_access_is_direct(mr, is_write)) { 405a87f3954SPaolo Bonzini hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; 406a87f3954SPaolo Bonzini len = MIN(page, len); 407a87f3954SPaolo Bonzini } 408a87f3954SPaolo Bonzini 40930951157SAvi Kivity *plen = len; 41030951157SAvi Kivity *xlat = addr; 41179e2b9aeSPaolo Bonzini rcu_read_unlock(); 41230951157SAvi Kivity return mr; 41390260c6cSJan Kiszka } 41490260c6cSJan Kiszka 41579e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 41690260c6cSJan Kiszka MemoryRegionSection * 4179d82b5a7SPaolo Bonzini address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, 4189d82b5a7SPaolo Bonzini hwaddr *xlat, hwaddr *plen) 41990260c6cSJan Kiszka { 42030951157SAvi Kivity MemoryRegionSection *section; 4219d82b5a7SPaolo Bonzini section = address_space_translate_internal(cpu->memory_dispatch, 4229d82b5a7SPaolo Bonzini addr, xlat, plen, false); 42330951157SAvi Kivity 42430951157SAvi Kivity assert(!section->mr->iommu_ops); 42530951157SAvi Kivity return section; 42690260c6cSJan Kiszka } 4279fa3e853Sbellard #endif 428fd6ce8f6Sbellard 429d5ab9713SJan Kiszka void cpu_exec_init_all(void) 430d5ab9713SJan Kiszka { 431d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY) 432b2a8658eSUmesh Deshpande qemu_mutex_init(&ram_list.mutex); 433d5ab9713SJan Kiszka memory_map_init(); 434d5ab9713SJan Kiszka io_mem_init(); 435d5ab9713SJan Kiszka #endif 436d5ab9713SJan Kiszka } 437d5ab9713SJan Kiszka 438b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY) 4399656f324Spbrook 440e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id) 441e7f4eff7SJuan Quintela { 442259186a7SAndreas Färber CPUState *cpu = opaque; 443e7f4eff7SJuan Quintela 4443098dba0Saurel32 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the 4453098dba0Saurel32 version_id is increased. */ 446259186a7SAndreas Färber cpu->interrupt_request &= ~0x01; 447c01a71c1SChristian Borntraeger tlb_flush(cpu, 1); 4489656f324Spbrook 4499656f324Spbrook return 0; 4509656f324Spbrook } 451e7f4eff7SJuan Quintela 4526c3bff0eSPavel Dovgaluk static int cpu_common_pre_load(void *opaque) 4536c3bff0eSPavel Dovgaluk { 4546c3bff0eSPavel Dovgaluk CPUState *cpu = opaque; 4556c3bff0eSPavel Dovgaluk 456adee6424SPaolo Bonzini cpu->exception_index = -1; 4576c3bff0eSPavel Dovgaluk 4586c3bff0eSPavel Dovgaluk return 0; 4596c3bff0eSPavel Dovgaluk } 4606c3bff0eSPavel Dovgaluk 4616c3bff0eSPavel Dovgaluk static bool cpu_common_exception_index_needed(void *opaque) 4626c3bff0eSPavel Dovgaluk { 4636c3bff0eSPavel Dovgaluk CPUState *cpu = opaque; 4646c3bff0eSPavel Dovgaluk 465adee6424SPaolo Bonzini return tcg_enabled() && cpu->exception_index != -1; 4666c3bff0eSPavel Dovgaluk } 4676c3bff0eSPavel Dovgaluk 4686c3bff0eSPavel Dovgaluk static const VMStateDescription vmstate_cpu_common_exception_index = { 4696c3bff0eSPavel Dovgaluk .name = "cpu_common/exception_index", 4706c3bff0eSPavel Dovgaluk .version_id = 1, 4716c3bff0eSPavel Dovgaluk .minimum_version_id = 1, 4726c3bff0eSPavel Dovgaluk .fields = (VMStateField[]) { 4736c3bff0eSPavel Dovgaluk VMSTATE_INT32(exception_index, CPUState), 4746c3bff0eSPavel Dovgaluk VMSTATE_END_OF_LIST() 4756c3bff0eSPavel Dovgaluk } 4766c3bff0eSPavel Dovgaluk }; 4776c3bff0eSPavel Dovgaluk 4781a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = { 479e7f4eff7SJuan Quintela .name = "cpu_common", 480e7f4eff7SJuan Quintela .version_id = 1, 481e7f4eff7SJuan Quintela .minimum_version_id = 1, 4826c3bff0eSPavel Dovgaluk .pre_load = cpu_common_pre_load, 483e7f4eff7SJuan Quintela .post_load = cpu_common_post_load, 484e7f4eff7SJuan Quintela .fields = (VMStateField[]) { 485259186a7SAndreas Färber VMSTATE_UINT32(halted, CPUState), 486259186a7SAndreas Färber VMSTATE_UINT32(interrupt_request, CPUState), 487e7f4eff7SJuan Quintela VMSTATE_END_OF_LIST() 4886c3bff0eSPavel Dovgaluk }, 4896c3bff0eSPavel Dovgaluk .subsections = (VMStateSubsection[]) { 4906c3bff0eSPavel Dovgaluk { 4916c3bff0eSPavel Dovgaluk .vmsd = &vmstate_cpu_common_exception_index, 4926c3bff0eSPavel Dovgaluk .needed = cpu_common_exception_index_needed, 4936c3bff0eSPavel Dovgaluk } , { 4946c3bff0eSPavel Dovgaluk /* empty */ 4956c3bff0eSPavel Dovgaluk } 496e7f4eff7SJuan Quintela } 497e7f4eff7SJuan Quintela }; 4981a1562f5SAndreas Färber 4999656f324Spbrook #endif 5009656f324Spbrook 50138d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index) 502950f1472SGlauber Costa { 503bdc44640SAndreas Färber CPUState *cpu; 504950f1472SGlauber Costa 505bdc44640SAndreas Färber CPU_FOREACH(cpu) { 50655e5c285SAndreas Färber if (cpu->cpu_index == index) { 507bdc44640SAndreas Färber return cpu; 50855e5c285SAndreas Färber } 509950f1472SGlauber Costa } 510950f1472SGlauber Costa 511bdc44640SAndreas Färber return NULL; 512950f1472SGlauber Costa } 513950f1472SGlauber Costa 51409daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY) 51509daed84SEdgar E. Iglesias void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as) 51609daed84SEdgar E. Iglesias { 51709daed84SEdgar E. Iglesias /* We only support one address space per cpu at the moment. */ 51809daed84SEdgar E. Iglesias assert(cpu->as == as); 51909daed84SEdgar E. Iglesias 52009daed84SEdgar E. Iglesias if (cpu->tcg_as_listener) { 52109daed84SEdgar E. Iglesias memory_listener_unregister(cpu->tcg_as_listener); 52209daed84SEdgar E. Iglesias } else { 52309daed84SEdgar E. Iglesias cpu->tcg_as_listener = g_new0(MemoryListener, 1); 52409daed84SEdgar E. Iglesias } 52509daed84SEdgar E. Iglesias cpu->tcg_as_listener->commit = tcg_commit; 52609daed84SEdgar E. Iglesias memory_listener_register(cpu->tcg_as_listener, as); 52709daed84SEdgar E. Iglesias } 52809daed84SEdgar E. Iglesias #endif 52909daed84SEdgar E. Iglesias 5309349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env) 531fd6ce8f6Sbellard { 5329f09e18aSAndreas Färber CPUState *cpu = ENV_GET_CPU(env); 533b170fce3SAndreas Färber CPUClass *cc = CPU_GET_CLASS(cpu); 534bdc44640SAndreas Färber CPUState *some_cpu; 5356a00d601Sbellard int cpu_index; 5366a00d601Sbellard 537c2764719Spbrook #if defined(CONFIG_USER_ONLY) 538c2764719Spbrook cpu_list_lock(); 539c2764719Spbrook #endif 5406a00d601Sbellard cpu_index = 0; 541bdc44640SAndreas Färber CPU_FOREACH(some_cpu) { 5426a00d601Sbellard cpu_index++; 5436a00d601Sbellard } 54455e5c285SAndreas Färber cpu->cpu_index = cpu_index; 5451b1ed8dcSAndreas Färber cpu->numa_node = 0; 546f0c3c505SAndreas Färber QTAILQ_INIT(&cpu->breakpoints); 547ff4700b0SAndreas Färber QTAILQ_INIT(&cpu->watchpoints); 548dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY 54909daed84SEdgar E. Iglesias cpu->as = &address_space_memory; 5509f09e18aSAndreas Färber cpu->thread_id = qemu_get_thread_id(); 551dc7a09cfSJan Kiszka #endif 552bdc44640SAndreas Färber QTAILQ_INSERT_TAIL(&cpus, cpu, node); 553c2764719Spbrook #if defined(CONFIG_USER_ONLY) 554c2764719Spbrook cpu_list_unlock(); 555c2764719Spbrook #endif 556e0d47944SAndreas Färber if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { 557259186a7SAndreas Färber vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu); 558e0d47944SAndreas Färber } 559b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 5600be71e32SAlex Williamson register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, 561b3c7724cSpbrook cpu_save, cpu_load, env); 562b170fce3SAndreas Färber assert(cc->vmsd == NULL); 563e0d47944SAndreas Färber assert(qdev_get_vmsd(DEVICE(cpu)) == NULL); 564b3c7724cSpbrook #endif 565b170fce3SAndreas Färber if (cc->vmsd != NULL) { 566b170fce3SAndreas Färber vmstate_register(NULL, cpu_index, cc->vmsd, cpu); 567b170fce3SAndreas Färber } 568fd6ce8f6Sbellard } 569fd6ce8f6Sbellard 57094df27fdSPaul Brook #if defined(CONFIG_USER_ONLY) 57100b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) 57294df27fdSPaul Brook { 57394df27fdSPaul Brook tb_invalidate_phys_page_range(pc, pc + 1, 0); 57494df27fdSPaul Brook } 57594df27fdSPaul Brook #else 57600b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) 5771e7855a5SMax Filippov { 578e8262a1bSMax Filippov hwaddr phys = cpu_get_phys_page_debug(cpu, pc); 579e8262a1bSMax Filippov if (phys != -1) { 58009daed84SEdgar E. Iglesias tb_invalidate_phys_addr(cpu->as, 58129d8ec7bSEdgar E. Iglesias phys | (pc & ~TARGET_PAGE_MASK)); 582e8262a1bSMax Filippov } 5831e7855a5SMax Filippov } 584c27004ecSbellard #endif 585d720b93dSbellard 586c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY) 58775a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask) 588c527ee8fSPaul Brook 589c527ee8fSPaul Brook { 590c527ee8fSPaul Brook } 591c527ee8fSPaul Brook 5923ee887e8SPeter Maydell int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, 5933ee887e8SPeter Maydell int flags) 5943ee887e8SPeter Maydell { 5953ee887e8SPeter Maydell return -ENOSYS; 5963ee887e8SPeter Maydell } 5973ee887e8SPeter Maydell 5983ee887e8SPeter Maydell void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) 5993ee887e8SPeter Maydell { 6003ee887e8SPeter Maydell } 6013ee887e8SPeter Maydell 60275a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 603c527ee8fSPaul Brook int flags, CPUWatchpoint **watchpoint) 604c527ee8fSPaul Brook { 605c527ee8fSPaul Brook return -ENOSYS; 606c527ee8fSPaul Brook } 607c527ee8fSPaul Brook #else 6086658ffb8Spbrook /* Add a watchpoint. */ 60975a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 610a1d1bb31Saliguori int flags, CPUWatchpoint **watchpoint) 6116658ffb8Spbrook { 612c0ce998eSaliguori CPUWatchpoint *wp; 6136658ffb8Spbrook 61405068c0dSPeter Maydell /* forbid ranges which are empty or run off the end of the address space */ 61507e2863dSMax Filippov if (len == 0 || (addr + len - 1) < addr) { 61675a34036SAndreas Färber error_report("tried to set invalid watchpoint at %" 61775a34036SAndreas Färber VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); 618b4051334Saliguori return -EINVAL; 619b4051334Saliguori } 6207267c094SAnthony Liguori wp = g_malloc(sizeof(*wp)); 6216658ffb8Spbrook 622a1d1bb31Saliguori wp->vaddr = addr; 62305068c0dSPeter Maydell wp->len = len; 624a1d1bb31Saliguori wp->flags = flags; 625a1d1bb31Saliguori 6262dc9f411Saliguori /* keep all GDB-injected watchpoints in front */ 627ff4700b0SAndreas Färber if (flags & BP_GDB) { 628ff4700b0SAndreas Färber QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); 629ff4700b0SAndreas Färber } else { 630ff4700b0SAndreas Färber QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); 631ff4700b0SAndreas Färber } 632a1d1bb31Saliguori 63331b030d4SAndreas Färber tlb_flush_page(cpu, addr); 634a1d1bb31Saliguori 635a1d1bb31Saliguori if (watchpoint) 636a1d1bb31Saliguori *watchpoint = wp; 637a1d1bb31Saliguori return 0; 6386658ffb8Spbrook } 6396658ffb8Spbrook 640a1d1bb31Saliguori /* Remove a specific watchpoint. */ 64175a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, 642a1d1bb31Saliguori int flags) 6436658ffb8Spbrook { 644a1d1bb31Saliguori CPUWatchpoint *wp; 6456658ffb8Spbrook 646ff4700b0SAndreas Färber QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 64705068c0dSPeter Maydell if (addr == wp->vaddr && len == wp->len 6486e140f28Saliguori && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { 64975a34036SAndreas Färber cpu_watchpoint_remove_by_ref(cpu, wp); 6506658ffb8Spbrook return 0; 6516658ffb8Spbrook } 6526658ffb8Spbrook } 653a1d1bb31Saliguori return -ENOENT; 6546658ffb8Spbrook } 6556658ffb8Spbrook 656a1d1bb31Saliguori /* Remove a specific watchpoint by reference. */ 65775a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) 658a1d1bb31Saliguori { 659ff4700b0SAndreas Färber QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); 6607d03f82fSedgar_igl 66131b030d4SAndreas Färber tlb_flush_page(cpu, watchpoint->vaddr); 662a1d1bb31Saliguori 6637267c094SAnthony Liguori g_free(watchpoint); 6647d03f82fSedgar_igl } 6657d03f82fSedgar_igl 666a1d1bb31Saliguori /* Remove all matching watchpoints. */ 66775a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask) 668a1d1bb31Saliguori { 669c0ce998eSaliguori CPUWatchpoint *wp, *next; 670a1d1bb31Saliguori 671ff4700b0SAndreas Färber QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { 67275a34036SAndreas Färber if (wp->flags & mask) { 67375a34036SAndreas Färber cpu_watchpoint_remove_by_ref(cpu, wp); 67475a34036SAndreas Färber } 675a1d1bb31Saliguori } 676c0ce998eSaliguori } 67705068c0dSPeter Maydell 67805068c0dSPeter Maydell /* Return true if this watchpoint address matches the specified 67905068c0dSPeter Maydell * access (ie the address range covered by the watchpoint overlaps 68005068c0dSPeter Maydell * partially or completely with the address range covered by the 68105068c0dSPeter Maydell * access). 68205068c0dSPeter Maydell */ 68305068c0dSPeter Maydell static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, 68405068c0dSPeter Maydell vaddr addr, 68505068c0dSPeter Maydell vaddr len) 68605068c0dSPeter Maydell { 68705068c0dSPeter Maydell /* We know the lengths are non-zero, but a little caution is 68805068c0dSPeter Maydell * required to avoid errors in the case where the range ends 68905068c0dSPeter Maydell * exactly at the top of the address space and so addr + len 69005068c0dSPeter Maydell * wraps round to zero. 69105068c0dSPeter Maydell */ 69205068c0dSPeter Maydell vaddr wpend = wp->vaddr + wp->len - 1; 69305068c0dSPeter Maydell vaddr addrend = addr + len - 1; 69405068c0dSPeter Maydell 69505068c0dSPeter Maydell return !(addr > wpend || wp->vaddr > addrend); 69605068c0dSPeter Maydell } 69705068c0dSPeter Maydell 698c527ee8fSPaul Brook #endif 699a1d1bb31Saliguori 700a1d1bb31Saliguori /* Add a breakpoint. */ 701b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, 702a1d1bb31Saliguori CPUBreakpoint **breakpoint) 7034c3a88a2Sbellard { 704c0ce998eSaliguori CPUBreakpoint *bp; 7054c3a88a2Sbellard 7067267c094SAnthony Liguori bp = g_malloc(sizeof(*bp)); 7074c3a88a2Sbellard 708a1d1bb31Saliguori bp->pc = pc; 709a1d1bb31Saliguori bp->flags = flags; 710a1d1bb31Saliguori 7112dc9f411Saliguori /* keep all GDB-injected breakpoints in front */ 71200b941e5SAndreas Färber if (flags & BP_GDB) { 713f0c3c505SAndreas Färber QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); 71400b941e5SAndreas Färber } else { 715f0c3c505SAndreas Färber QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); 71600b941e5SAndreas Färber } 717d720b93dSbellard 718f0c3c505SAndreas Färber breakpoint_invalidate(cpu, pc); 719a1d1bb31Saliguori 72000b941e5SAndreas Färber if (breakpoint) { 721a1d1bb31Saliguori *breakpoint = bp; 72200b941e5SAndreas Färber } 7234c3a88a2Sbellard return 0; 7244c3a88a2Sbellard } 7254c3a88a2Sbellard 726a1d1bb31Saliguori /* Remove a specific breakpoint. */ 727b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) 728a1d1bb31Saliguori { 729a1d1bb31Saliguori CPUBreakpoint *bp; 730a1d1bb31Saliguori 731f0c3c505SAndreas Färber QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 732a1d1bb31Saliguori if (bp->pc == pc && bp->flags == flags) { 733b3310ab3SAndreas Färber cpu_breakpoint_remove_by_ref(cpu, bp); 734a1d1bb31Saliguori return 0; 7357d03f82fSedgar_igl } 736a1d1bb31Saliguori } 737a1d1bb31Saliguori return -ENOENT; 7387d03f82fSedgar_igl } 7397d03f82fSedgar_igl 740a1d1bb31Saliguori /* Remove a specific breakpoint by reference. */ 741b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) 7424c3a88a2Sbellard { 743f0c3c505SAndreas Färber QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); 744f0c3c505SAndreas Färber 745f0c3c505SAndreas Färber breakpoint_invalidate(cpu, breakpoint->pc); 746a1d1bb31Saliguori 7477267c094SAnthony Liguori g_free(breakpoint); 748a1d1bb31Saliguori } 749a1d1bb31Saliguori 750a1d1bb31Saliguori /* Remove all matching breakpoints. */ 751b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask) 752a1d1bb31Saliguori { 753c0ce998eSaliguori CPUBreakpoint *bp, *next; 754a1d1bb31Saliguori 755f0c3c505SAndreas Färber QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { 756b3310ab3SAndreas Färber if (bp->flags & mask) { 757b3310ab3SAndreas Färber cpu_breakpoint_remove_by_ref(cpu, bp); 758b3310ab3SAndreas Färber } 759c0ce998eSaliguori } 7604c3a88a2Sbellard } 7614c3a88a2Sbellard 762c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the 763c33a346eSbellard CPU loop after each instruction */ 7643825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled) 765c33a346eSbellard { 766ed2803daSAndreas Färber if (cpu->singlestep_enabled != enabled) { 767ed2803daSAndreas Färber cpu->singlestep_enabled = enabled; 768ed2803daSAndreas Färber if (kvm_enabled()) { 76938e478ecSStefan Weil kvm_update_guest_debug(cpu, 0); 770ed2803daSAndreas Färber } else { 771ccbb4d44SStuart Brady /* must flush all the translated code to avoid inconsistencies */ 7729fa3e853Sbellard /* XXX: only flush what is necessary */ 77338e478ecSStefan Weil CPUArchState *env = cpu->env_ptr; 7740124311eSbellard tb_flush(env); 775c33a346eSbellard } 776e22a25c9Saliguori } 777c33a346eSbellard } 778c33a346eSbellard 779a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...) 7807501267eSbellard { 7817501267eSbellard va_list ap; 782493ae1f0Spbrook va_list ap2; 7837501267eSbellard 7847501267eSbellard va_start(ap, fmt); 785493ae1f0Spbrook va_copy(ap2, ap); 7867501267eSbellard fprintf(stderr, "qemu: fatal: "); 7877501267eSbellard vfprintf(stderr, fmt, ap); 7887501267eSbellard fprintf(stderr, "\n"); 789878096eeSAndreas Färber cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); 79093fcfe39Saliguori if (qemu_log_enabled()) { 79193fcfe39Saliguori qemu_log("qemu: fatal: "); 79293fcfe39Saliguori qemu_log_vprintf(fmt, ap2); 79393fcfe39Saliguori qemu_log("\n"); 794a0762859SAndreas Färber log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); 79531b1a7b4Saliguori qemu_log_flush(); 79693fcfe39Saliguori qemu_log_close(); 797924edcaeSbalrog } 798493ae1f0Spbrook va_end(ap2); 799f9373291Sj_mayer va_end(ap); 800fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY) 801fd052bf6SRiku Voipio { 802fd052bf6SRiku Voipio struct sigaction act; 803fd052bf6SRiku Voipio sigfillset(&act.sa_mask); 804fd052bf6SRiku Voipio act.sa_handler = SIG_DFL; 805fd052bf6SRiku Voipio sigaction(SIGABRT, &act, NULL); 806fd052bf6SRiku Voipio } 807fd052bf6SRiku Voipio #endif 8087501267eSbellard abort(); 8097501267eSbellard } 8107501267eSbellard 8110124311eSbellard #if !defined(CONFIG_USER_ONLY) 8120dc3f44aSMike Day /* Called from RCU critical section */ 813041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr) 814041603feSPaolo Bonzini { 815041603feSPaolo Bonzini RAMBlock *block; 816041603feSPaolo Bonzini 81743771539SPaolo Bonzini block = atomic_rcu_read(&ram_list.mru_block); 8189b8424d5SMichael S. Tsirkin if (block && addr - block->offset < block->max_length) { 819041603feSPaolo Bonzini goto found; 820041603feSPaolo Bonzini } 8210dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 8229b8424d5SMichael S. Tsirkin if (addr - block->offset < block->max_length) { 823041603feSPaolo Bonzini goto found; 824041603feSPaolo Bonzini } 825041603feSPaolo Bonzini } 826041603feSPaolo Bonzini 827041603feSPaolo Bonzini fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 828041603feSPaolo Bonzini abort(); 829041603feSPaolo Bonzini 830041603feSPaolo Bonzini found: 83143771539SPaolo Bonzini /* It is safe to write mru_block outside the iothread lock. This 83243771539SPaolo Bonzini * is what happens: 83343771539SPaolo Bonzini * 83443771539SPaolo Bonzini * mru_block = xxx 83543771539SPaolo Bonzini * rcu_read_unlock() 83643771539SPaolo Bonzini * xxx removed from list 83743771539SPaolo Bonzini * rcu_read_lock() 83843771539SPaolo Bonzini * read mru_block 83943771539SPaolo Bonzini * mru_block = NULL; 84043771539SPaolo Bonzini * call_rcu(reclaim_ramblock, xxx); 84143771539SPaolo Bonzini * rcu_read_unlock() 84243771539SPaolo Bonzini * 84343771539SPaolo Bonzini * atomic_rcu_set is not needed here. The block was already published 84443771539SPaolo Bonzini * when it was placed into the list. Here we're just making an extra 84543771539SPaolo Bonzini * copy of the pointer. 84643771539SPaolo Bonzini */ 847041603feSPaolo Bonzini ram_list.mru_block = block; 848041603feSPaolo Bonzini return block; 849041603feSPaolo Bonzini } 850041603feSPaolo Bonzini 851a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) 8521ccde1cbSbellard { 853041603feSPaolo Bonzini ram_addr_t start1; 854a2f4d5beSJuan Quintela RAMBlock *block; 855a2f4d5beSJuan Quintela ram_addr_t end; 856a2f4d5beSJuan Quintela 857a2f4d5beSJuan Quintela end = TARGET_PAGE_ALIGN(start + length); 858a2f4d5beSJuan Quintela start &= TARGET_PAGE_MASK; 859f23db169Sbellard 8600dc3f44aSMike Day rcu_read_lock(); 861041603feSPaolo Bonzini block = qemu_get_ram_block(start); 862041603feSPaolo Bonzini assert(block == qemu_get_ram_block(end - 1)); 8631240be24SMichael S. Tsirkin start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); 864e5548617SBlue Swirl cpu_tlb_reset_dirty_all(start1, length); 8650dc3f44aSMike Day rcu_read_unlock(); 866d24981d3SJuan Quintela } 867d24981d3SJuan Quintela 868d24981d3SJuan Quintela /* Note: start and end must be within the same ram block. */ 869a2f4d5beSJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length, 87052159192SJuan Quintela unsigned client) 871d24981d3SJuan Quintela { 872d24981d3SJuan Quintela if (length == 0) 873d24981d3SJuan Quintela return; 874c8d6f66aSMichael S. Tsirkin cpu_physical_memory_clear_dirty_range_type(start, length, client); 875d24981d3SJuan Quintela 876d24981d3SJuan Quintela if (tcg_enabled()) { 877a2f4d5beSJuan Quintela tlb_reset_dirty_range_all(start, length); 878d24981d3SJuan Quintela } 8791ccde1cbSbellard } 8801ccde1cbSbellard 881981fdf23SJuan Quintela static void cpu_physical_memory_set_dirty_tracking(bool enable) 88274576198Saliguori { 88374576198Saliguori in_migration = enable; 88474576198Saliguori } 88574576198Saliguori 88679e2b9aeSPaolo Bonzini /* Called from RCU critical section */ 887bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu, 888e5548617SBlue Swirl MemoryRegionSection *section, 889e5548617SBlue Swirl target_ulong vaddr, 890149f54b5SPaolo Bonzini hwaddr paddr, hwaddr xlat, 891e5548617SBlue Swirl int prot, 892e5548617SBlue Swirl target_ulong *address) 893e5548617SBlue Swirl { 894a8170e5eSAvi Kivity hwaddr iotlb; 895e5548617SBlue Swirl CPUWatchpoint *wp; 896e5548617SBlue Swirl 897cc5bea60SBlue Swirl if (memory_region_is_ram(section->mr)) { 898e5548617SBlue Swirl /* Normal RAM. */ 899e5548617SBlue Swirl iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) 900149f54b5SPaolo Bonzini + xlat; 901e5548617SBlue Swirl if (!section->readonly) { 902b41aac4fSLiu Ping Fan iotlb |= PHYS_SECTION_NOTDIRTY; 903e5548617SBlue Swirl } else { 904b41aac4fSLiu Ping Fan iotlb |= PHYS_SECTION_ROM; 905e5548617SBlue Swirl } 906e5548617SBlue Swirl } else { 9071b3fb98fSEdgar E. Iglesias iotlb = section - section->address_space->dispatch->map.sections; 908149f54b5SPaolo Bonzini iotlb += xlat; 909e5548617SBlue Swirl } 910e5548617SBlue Swirl 911e5548617SBlue Swirl /* Make accesses to pages with watchpoints go via the 912e5548617SBlue Swirl watchpoint trap routines. */ 913ff4700b0SAndreas Färber QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 91405068c0dSPeter Maydell if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) { 915e5548617SBlue Swirl /* Avoid trapping reads of pages with a write breakpoint. */ 916e5548617SBlue Swirl if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { 917b41aac4fSLiu Ping Fan iotlb = PHYS_SECTION_WATCH + paddr; 918e5548617SBlue Swirl *address |= TLB_MMIO; 919e5548617SBlue Swirl break; 920e5548617SBlue Swirl } 921e5548617SBlue Swirl } 922e5548617SBlue Swirl } 923e5548617SBlue Swirl 924e5548617SBlue Swirl return iotlb; 925e5548617SBlue Swirl } 9269fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */ 92733417e70Sbellard 928e2eef170Spbrook #if !defined(CONFIG_USER_ONLY) 9298da3ff18Spbrook 930c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 9315312bd8bSAvi Kivity uint16_t section); 932acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base); 93354688b1eSAvi Kivity 934a2b257d6SIgor Mammedov static void *(*phys_mem_alloc)(size_t size, uint64_t *align) = 935a2b257d6SIgor Mammedov qemu_anon_ram_alloc; 93691138037SMarkus Armbruster 93791138037SMarkus Armbruster /* 93891138037SMarkus Armbruster * Set a custom physical guest memory alloator. 93991138037SMarkus Armbruster * Accelerators with unusual needs may need this. Hopefully, we can 94091138037SMarkus Armbruster * get rid of it eventually. 94191138037SMarkus Armbruster */ 942a2b257d6SIgor Mammedov void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)) 94391138037SMarkus Armbruster { 94491138037SMarkus Armbruster phys_mem_alloc = alloc; 94591138037SMarkus Armbruster } 94691138037SMarkus Armbruster 94753cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map, 94853cb28cbSMarcel Apfelbaum MemoryRegionSection *section) 9495312bd8bSAvi Kivity { 95068f3f65bSPaolo Bonzini /* The physical section number is ORed with a page-aligned 95168f3f65bSPaolo Bonzini * pointer to produce the iotlb entries. Thus it should 95268f3f65bSPaolo Bonzini * never overflow into the page-aligned value. 95368f3f65bSPaolo Bonzini */ 95453cb28cbSMarcel Apfelbaum assert(map->sections_nb < TARGET_PAGE_SIZE); 95568f3f65bSPaolo Bonzini 95653cb28cbSMarcel Apfelbaum if (map->sections_nb == map->sections_nb_alloc) { 95753cb28cbSMarcel Apfelbaum map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); 95853cb28cbSMarcel Apfelbaum map->sections = g_renew(MemoryRegionSection, map->sections, 95953cb28cbSMarcel Apfelbaum map->sections_nb_alloc); 9605312bd8bSAvi Kivity } 96153cb28cbSMarcel Apfelbaum map->sections[map->sections_nb] = *section; 962dfde4e6eSPaolo Bonzini memory_region_ref(section->mr); 96353cb28cbSMarcel Apfelbaum return map->sections_nb++; 9645312bd8bSAvi Kivity } 9655312bd8bSAvi Kivity 966058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr) 967058bc4b5SPaolo Bonzini { 968dfde4e6eSPaolo Bonzini memory_region_unref(mr); 969dfde4e6eSPaolo Bonzini 970058bc4b5SPaolo Bonzini if (mr->subpage) { 971058bc4b5SPaolo Bonzini subpage_t *subpage = container_of(mr, subpage_t, iomem); 972b4fefef9SPeter Crosthwaite object_unref(OBJECT(&subpage->iomem)); 973058bc4b5SPaolo Bonzini g_free(subpage); 974058bc4b5SPaolo Bonzini } 975058bc4b5SPaolo Bonzini } 976058bc4b5SPaolo Bonzini 9776092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map) 9785312bd8bSAvi Kivity { 9799affd6fcSPaolo Bonzini while (map->sections_nb > 0) { 9809affd6fcSPaolo Bonzini MemoryRegionSection *section = &map->sections[--map->sections_nb]; 981058bc4b5SPaolo Bonzini phys_section_destroy(section->mr); 982058bc4b5SPaolo Bonzini } 9839affd6fcSPaolo Bonzini g_free(map->sections); 9849affd6fcSPaolo Bonzini g_free(map->nodes); 9855312bd8bSAvi Kivity } 9865312bd8bSAvi Kivity 987ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) 9880f0cb164SAvi Kivity { 9890f0cb164SAvi Kivity subpage_t *subpage; 990a8170e5eSAvi Kivity hwaddr base = section->offset_within_address_space 9910f0cb164SAvi Kivity & TARGET_PAGE_MASK; 99297115a8dSMichael S. Tsirkin MemoryRegionSection *existing = phys_page_find(d->phys_map, base, 99353cb28cbSMarcel Apfelbaum d->map.nodes, d->map.sections); 9940f0cb164SAvi Kivity MemoryRegionSection subsection = { 9950f0cb164SAvi Kivity .offset_within_address_space = base, 996052e87b0SPaolo Bonzini .size = int128_make64(TARGET_PAGE_SIZE), 9970f0cb164SAvi Kivity }; 998a8170e5eSAvi Kivity hwaddr start, end; 9990f0cb164SAvi Kivity 1000f3705d53SAvi Kivity assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); 10010f0cb164SAvi Kivity 1002f3705d53SAvi Kivity if (!(existing->mr->subpage)) { 1003acc9d80bSJan Kiszka subpage = subpage_init(d->as, base); 10043be91e86SEdgar E. Iglesias subsection.address_space = d->as; 10050f0cb164SAvi Kivity subsection.mr = &subpage->iomem; 1006ac1970fbSAvi Kivity phys_page_set(d, base >> TARGET_PAGE_BITS, 1, 100753cb28cbSMarcel Apfelbaum phys_section_add(&d->map, &subsection)); 10080f0cb164SAvi Kivity } else { 1009f3705d53SAvi Kivity subpage = container_of(existing->mr, subpage_t, iomem); 10100f0cb164SAvi Kivity } 10110f0cb164SAvi Kivity start = section->offset_within_address_space & ~TARGET_PAGE_MASK; 1012052e87b0SPaolo Bonzini end = start + int128_get64(section->size) - 1; 101353cb28cbSMarcel Apfelbaum subpage_register(subpage, start, end, 101453cb28cbSMarcel Apfelbaum phys_section_add(&d->map, section)); 10150f0cb164SAvi Kivity } 10160f0cb164SAvi Kivity 10170f0cb164SAvi Kivity 1018052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d, 1019052e87b0SPaolo Bonzini MemoryRegionSection *section) 102033417e70Sbellard { 1021a8170e5eSAvi Kivity hwaddr start_addr = section->offset_within_address_space; 102253cb28cbSMarcel Apfelbaum uint16_t section_index = phys_section_add(&d->map, section); 1023052e87b0SPaolo Bonzini uint64_t num_pages = int128_get64(int128_rshift(section->size, 1024052e87b0SPaolo Bonzini TARGET_PAGE_BITS)); 1025dd81124bSAvi Kivity 1026733d5ef5SPaolo Bonzini assert(num_pages); 1027733d5ef5SPaolo Bonzini phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); 102833417e70Sbellard } 102933417e70Sbellard 1030ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section) 10310f0cb164SAvi Kivity { 103289ae337aSPaolo Bonzini AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); 103300752703SPaolo Bonzini AddressSpaceDispatch *d = as->next_dispatch; 103499b9cc06SPaolo Bonzini MemoryRegionSection now = *section, remain = *section; 1035052e87b0SPaolo Bonzini Int128 page_size = int128_make64(TARGET_PAGE_SIZE); 10360f0cb164SAvi Kivity 1037733d5ef5SPaolo Bonzini if (now.offset_within_address_space & ~TARGET_PAGE_MASK) { 1038733d5ef5SPaolo Bonzini uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) 1039733d5ef5SPaolo Bonzini - now.offset_within_address_space; 1040733d5ef5SPaolo Bonzini 1041052e87b0SPaolo Bonzini now.size = int128_min(int128_make64(left), now.size); 1042ac1970fbSAvi Kivity register_subpage(d, &now); 1043733d5ef5SPaolo Bonzini } else { 1044052e87b0SPaolo Bonzini now.size = int128_zero(); 1045733d5ef5SPaolo Bonzini } 1046052e87b0SPaolo Bonzini while (int128_ne(remain.size, now.size)) { 1047052e87b0SPaolo Bonzini remain.size = int128_sub(remain.size, now.size); 1048052e87b0SPaolo Bonzini remain.offset_within_address_space += int128_get64(now.size); 1049052e87b0SPaolo Bonzini remain.offset_within_region += int128_get64(now.size); 10500f0cb164SAvi Kivity now = remain; 1051052e87b0SPaolo Bonzini if (int128_lt(remain.size, page_size)) { 1052733d5ef5SPaolo Bonzini register_subpage(d, &now); 105388266249SHu Tao } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { 1054052e87b0SPaolo Bonzini now.size = page_size; 1055ac1970fbSAvi Kivity register_subpage(d, &now); 105669b67646STyler Hall } else { 1057052e87b0SPaolo Bonzini now.size = int128_and(now.size, int128_neg(page_size)); 1058ac1970fbSAvi Kivity register_multipage(d, &now); 105969b67646STyler Hall } 10600f0cb164SAvi Kivity } 10610f0cb164SAvi Kivity } 10620f0cb164SAvi Kivity 106362a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void) 106462a2744cSSheng Yang { 106562a2744cSSheng Yang if (kvm_enabled()) 106662a2744cSSheng Yang kvm_flush_coalesced_mmio_buffer(); 106762a2744cSSheng Yang } 106862a2744cSSheng Yang 1069b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void) 1070b2a8658eSUmesh Deshpande { 1071b2a8658eSUmesh Deshpande qemu_mutex_lock(&ram_list.mutex); 1072b2a8658eSUmesh Deshpande } 1073b2a8658eSUmesh Deshpande 1074b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void) 1075b2a8658eSUmesh Deshpande { 1076b2a8658eSUmesh Deshpande qemu_mutex_unlock(&ram_list.mutex); 1077b2a8658eSUmesh Deshpande } 1078b2a8658eSUmesh Deshpande 1079e1e84ba0SMarkus Armbruster #ifdef __linux__ 1080c902760fSMarcelo Tosatti 1081c902760fSMarcelo Tosatti #include <sys/vfs.h> 1082c902760fSMarcelo Tosatti 1083c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC 0x958458f6 1084c902760fSMarcelo Tosatti 1085fc7a5800SHu Tao static long gethugepagesize(const char *path, Error **errp) 1086c902760fSMarcelo Tosatti { 1087c902760fSMarcelo Tosatti struct statfs fs; 1088c902760fSMarcelo Tosatti int ret; 1089c902760fSMarcelo Tosatti 1090c902760fSMarcelo Tosatti do { 1091c902760fSMarcelo Tosatti ret = statfs(path, &fs); 1092c902760fSMarcelo Tosatti } while (ret != 0 && errno == EINTR); 1093c902760fSMarcelo Tosatti 1094c902760fSMarcelo Tosatti if (ret != 0) { 1095fc7a5800SHu Tao error_setg_errno(errp, errno, "failed to get page size of file %s", 1096fc7a5800SHu Tao path); 1097c902760fSMarcelo Tosatti return 0; 1098c902760fSMarcelo Tosatti } 1099c902760fSMarcelo Tosatti 1100c902760fSMarcelo Tosatti if (fs.f_type != HUGETLBFS_MAGIC) 1101c902760fSMarcelo Tosatti fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); 1102c902760fSMarcelo Tosatti 1103c902760fSMarcelo Tosatti return fs.f_bsize; 1104c902760fSMarcelo Tosatti } 1105c902760fSMarcelo Tosatti 110604b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block, 110704b16653SAlex Williamson ram_addr_t memory, 11087f56e740SPaolo Bonzini const char *path, 11097f56e740SPaolo Bonzini Error **errp) 1110c902760fSMarcelo Tosatti { 1111c902760fSMarcelo Tosatti char *filename; 11128ca761f6SPeter Feiner char *sanitized_name; 11138ca761f6SPeter Feiner char *c; 1114557529ddSHu Tao void *area = NULL; 1115c902760fSMarcelo Tosatti int fd; 1116557529ddSHu Tao uint64_t hpagesize; 1117fc7a5800SHu Tao Error *local_err = NULL; 1118c902760fSMarcelo Tosatti 1119fc7a5800SHu Tao hpagesize = gethugepagesize(path, &local_err); 1120fc7a5800SHu Tao if (local_err) { 1121fc7a5800SHu Tao error_propagate(errp, local_err); 1122f9a49dfaSMarcelo Tosatti goto error; 1123c902760fSMarcelo Tosatti } 1124a2b257d6SIgor Mammedov block->mr->align = hpagesize; 1125c902760fSMarcelo Tosatti 1126c902760fSMarcelo Tosatti if (memory < hpagesize) { 1127557529ddSHu Tao error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " 1128557529ddSHu Tao "or larger than huge page size 0x%" PRIx64, 1129557529ddSHu Tao memory, hpagesize); 1130557529ddSHu Tao goto error; 1131c902760fSMarcelo Tosatti } 1132c902760fSMarcelo Tosatti 1133c902760fSMarcelo Tosatti if (kvm_enabled() && !kvm_has_sync_mmu()) { 11347f56e740SPaolo Bonzini error_setg(errp, 11357f56e740SPaolo Bonzini "host lacks kvm mmu notifiers, -mem-path unsupported"); 1136f9a49dfaSMarcelo Tosatti goto error; 1137c902760fSMarcelo Tosatti } 1138c902760fSMarcelo Tosatti 11398ca761f6SPeter Feiner /* Make name safe to use with mkstemp by replacing '/' with '_'. */ 114083234bf2SPeter Crosthwaite sanitized_name = g_strdup(memory_region_name(block->mr)); 11418ca761f6SPeter Feiner for (c = sanitized_name; *c != '\0'; c++) { 11428ca761f6SPeter Feiner if (*c == '/') 11438ca761f6SPeter Feiner *c = '_'; 11448ca761f6SPeter Feiner } 11458ca761f6SPeter Feiner 11468ca761f6SPeter Feiner filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, 11478ca761f6SPeter Feiner sanitized_name); 11488ca761f6SPeter Feiner g_free(sanitized_name); 1149c902760fSMarcelo Tosatti 1150c902760fSMarcelo Tosatti fd = mkstemp(filename); 1151c902760fSMarcelo Tosatti if (fd < 0) { 11527f56e740SPaolo Bonzini error_setg_errno(errp, errno, 11537f56e740SPaolo Bonzini "unable to create backing store for hugepages"); 1154e4ada482SStefan Weil g_free(filename); 1155f9a49dfaSMarcelo Tosatti goto error; 1156c902760fSMarcelo Tosatti } 1157c902760fSMarcelo Tosatti unlink(filename); 1158e4ada482SStefan Weil g_free(filename); 1159c902760fSMarcelo Tosatti 1160c902760fSMarcelo Tosatti memory = (memory+hpagesize-1) & ~(hpagesize-1); 1161c902760fSMarcelo Tosatti 1162c902760fSMarcelo Tosatti /* 1163c902760fSMarcelo Tosatti * ftruncate is not supported by hugetlbfs in older 1164c902760fSMarcelo Tosatti * hosts, so don't bother bailing out on errors. 1165c902760fSMarcelo Tosatti * If anything goes wrong with it under other filesystems, 1166c902760fSMarcelo Tosatti * mmap will fail. 1167c902760fSMarcelo Tosatti */ 11687f56e740SPaolo Bonzini if (ftruncate(fd, memory)) { 1169c902760fSMarcelo Tosatti perror("ftruncate"); 11707f56e740SPaolo Bonzini } 1171c902760fSMarcelo Tosatti 1172dbcb8981SPaolo Bonzini area = mmap(0, memory, PROT_READ | PROT_WRITE, 1173dbcb8981SPaolo Bonzini (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE), 1174dbcb8981SPaolo Bonzini fd, 0); 1175c902760fSMarcelo Tosatti if (area == MAP_FAILED) { 11767f56e740SPaolo Bonzini error_setg_errno(errp, errno, 11777f56e740SPaolo Bonzini "unable to map backing store for hugepages"); 1178c902760fSMarcelo Tosatti close(fd); 1179f9a49dfaSMarcelo Tosatti goto error; 1180c902760fSMarcelo Tosatti } 1181ef36fa14SMarcelo Tosatti 1182ef36fa14SMarcelo Tosatti if (mem_prealloc) { 118338183310SPaolo Bonzini os_mem_prealloc(fd, area, memory); 1184ef36fa14SMarcelo Tosatti } 1185ef36fa14SMarcelo Tosatti 118604b16653SAlex Williamson block->fd = fd; 1187c902760fSMarcelo Tosatti return area; 1188f9a49dfaSMarcelo Tosatti 1189f9a49dfaSMarcelo Tosatti error: 1190f9a49dfaSMarcelo Tosatti if (mem_prealloc) { 119181b07353SGonglei error_report("%s", error_get_pretty(*errp)); 1192f9a49dfaSMarcelo Tosatti exit(1); 1193f9a49dfaSMarcelo Tosatti } 1194f9a49dfaSMarcelo Tosatti return NULL; 1195c902760fSMarcelo Tosatti } 1196c902760fSMarcelo Tosatti #endif 1197c902760fSMarcelo Tosatti 11980dc3f44aSMike Day /* Called with the ramlist lock held. */ 1199d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size) 1200d17b5288SAlex Williamson { 120104b16653SAlex Williamson RAMBlock *block, *next_block; 12023e837b2cSAlex Williamson ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; 120304b16653SAlex Williamson 120449cd9ac6SStefan Hajnoczi assert(size != 0); /* it would hand out same offset multiple times */ 120549cd9ac6SStefan Hajnoczi 12060dc3f44aSMike Day if (QLIST_EMPTY_RCU(&ram_list.blocks)) { 120704b16653SAlex Williamson return 0; 12080d53d9feSMike Day } 120904b16653SAlex Williamson 12100dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1211f15fbc4bSAnthony PERARD ram_addr_t end, next = RAM_ADDR_MAX; 121204b16653SAlex Williamson 121362be4e3aSMichael S. Tsirkin end = block->offset + block->max_length; 121404b16653SAlex Williamson 12150dc3f44aSMike Day QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) { 121604b16653SAlex Williamson if (next_block->offset >= end) { 121704b16653SAlex Williamson next = MIN(next, next_block->offset); 121804b16653SAlex Williamson } 121904b16653SAlex Williamson } 122004b16653SAlex Williamson if (next - end >= size && next - end < mingap) { 122104b16653SAlex Williamson offset = end; 122204b16653SAlex Williamson mingap = next - end; 122304b16653SAlex Williamson } 122404b16653SAlex Williamson } 12253e837b2cSAlex Williamson 12263e837b2cSAlex Williamson if (offset == RAM_ADDR_MAX) { 12273e837b2cSAlex Williamson fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", 12283e837b2cSAlex Williamson (uint64_t)size); 12293e837b2cSAlex Williamson abort(); 12303e837b2cSAlex Williamson } 12313e837b2cSAlex Williamson 123204b16653SAlex Williamson return offset; 123304b16653SAlex Williamson } 123404b16653SAlex Williamson 1235652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void) 123604b16653SAlex Williamson { 1237d17b5288SAlex Williamson RAMBlock *block; 1238d17b5288SAlex Williamson ram_addr_t last = 0; 1239d17b5288SAlex Williamson 12400dc3f44aSMike Day rcu_read_lock(); 12410dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 124262be4e3aSMichael S. Tsirkin last = MAX(last, block->offset + block->max_length); 12430d53d9feSMike Day } 12440dc3f44aSMike Day rcu_read_unlock(); 1245d17b5288SAlex Williamson return last; 1246d17b5288SAlex Williamson } 1247d17b5288SAlex Williamson 1248ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size) 1249ddb97f1dSJason Baron { 1250ddb97f1dSJason Baron int ret; 1251ddb97f1dSJason Baron 1252ddb97f1dSJason Baron /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ 12532ff3de68SMarkus Armbruster if (!qemu_opt_get_bool(qemu_get_machine_opts(), 12542ff3de68SMarkus Armbruster "dump-guest-core", true)) { 1255ddb97f1dSJason Baron ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); 1256ddb97f1dSJason Baron if (ret) { 1257ddb97f1dSJason Baron perror("qemu_madvise"); 1258ddb97f1dSJason Baron fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " 1259ddb97f1dSJason Baron "but dump_guest_core=off specified\n"); 1260ddb97f1dSJason Baron } 1261ddb97f1dSJason Baron } 1262ddb97f1dSJason Baron } 1263ddb97f1dSJason Baron 12640dc3f44aSMike Day /* Called within an RCU critical section, or while the ramlist lock 12650dc3f44aSMike Day * is held. 12660dc3f44aSMike Day */ 126720cfe881SHu Tao static RAMBlock *find_ram_block(ram_addr_t addr) 126884b89d78SCam Macdonell { 126920cfe881SHu Tao RAMBlock *block; 127084b89d78SCam Macdonell 12710dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1272c5705a77SAvi Kivity if (block->offset == addr) { 127320cfe881SHu Tao return block; 1274c5705a77SAvi Kivity } 1275c5705a77SAvi Kivity } 127620cfe881SHu Tao 127720cfe881SHu Tao return NULL; 127820cfe881SHu Tao } 127920cfe881SHu Tao 1280ae3a7047SMike Day /* Called with iothread lock held. */ 128120cfe881SHu Tao void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) 128220cfe881SHu Tao { 1283ae3a7047SMike Day RAMBlock *new_block, *block; 128420cfe881SHu Tao 12850dc3f44aSMike Day rcu_read_lock(); 1286ae3a7047SMike Day new_block = find_ram_block(addr); 1287c5705a77SAvi Kivity assert(new_block); 1288c5705a77SAvi Kivity assert(!new_block->idstr[0]); 128984b89d78SCam Macdonell 129009e5ab63SAnthony Liguori if (dev) { 129109e5ab63SAnthony Liguori char *id = qdev_get_dev_path(dev); 129284b89d78SCam Macdonell if (id) { 129384b89d78SCam Macdonell snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 12947267c094SAnthony Liguori g_free(id); 129584b89d78SCam Macdonell } 129684b89d78SCam Macdonell } 129784b89d78SCam Macdonell pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 129884b89d78SCam Macdonell 12990dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1300c5705a77SAvi Kivity if (block != new_block && !strcmp(block->idstr, new_block->idstr)) { 130184b89d78SCam Macdonell fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 130284b89d78SCam Macdonell new_block->idstr); 130384b89d78SCam Macdonell abort(); 130484b89d78SCam Macdonell } 130584b89d78SCam Macdonell } 13060dc3f44aSMike Day rcu_read_unlock(); 1307c5705a77SAvi Kivity } 1308c5705a77SAvi Kivity 1309ae3a7047SMike Day /* Called with iothread lock held. */ 131020cfe881SHu Tao void qemu_ram_unset_idstr(ram_addr_t addr) 131120cfe881SHu Tao { 1312ae3a7047SMike Day RAMBlock *block; 131320cfe881SHu Tao 1314ae3a7047SMike Day /* FIXME: arch_init.c assumes that this is not called throughout 1315ae3a7047SMike Day * migration. Ignore the problem since hot-unplug during migration 1316ae3a7047SMike Day * does not work anyway. 1317ae3a7047SMike Day */ 1318ae3a7047SMike Day 13190dc3f44aSMike Day rcu_read_lock(); 1320ae3a7047SMike Day block = find_ram_block(addr); 132120cfe881SHu Tao if (block) { 132220cfe881SHu Tao memset(block->idstr, 0, sizeof(block->idstr)); 132320cfe881SHu Tao } 13240dc3f44aSMike Day rcu_read_unlock(); 132520cfe881SHu Tao } 132620cfe881SHu Tao 13278490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len) 13288490fc78SLuiz Capitulino { 13292ff3de68SMarkus Armbruster if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) { 13308490fc78SLuiz Capitulino /* disabled by the user */ 13318490fc78SLuiz Capitulino return 0; 13328490fc78SLuiz Capitulino } 13338490fc78SLuiz Capitulino 13348490fc78SLuiz Capitulino return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); 13358490fc78SLuiz Capitulino } 13368490fc78SLuiz Capitulino 133762be4e3aSMichael S. Tsirkin /* Only legal before guest might have detected the memory size: e.g. on 133862be4e3aSMichael S. Tsirkin * incoming migration, or right after reset. 133962be4e3aSMichael S. Tsirkin * 134062be4e3aSMichael S. Tsirkin * As memory core doesn't know how is memory accessed, it is up to 134162be4e3aSMichael S. Tsirkin * resize callback to update device state and/or add assertions to detect 134262be4e3aSMichael S. Tsirkin * misuse, if necessary. 134362be4e3aSMichael S. Tsirkin */ 134462be4e3aSMichael S. Tsirkin int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp) 134562be4e3aSMichael S. Tsirkin { 134662be4e3aSMichael S. Tsirkin RAMBlock *block = find_ram_block(base); 134762be4e3aSMichael S. Tsirkin 134862be4e3aSMichael S. Tsirkin assert(block); 134962be4e3aSMichael S. Tsirkin 1350129ddaf3SMichael S. Tsirkin newsize = TARGET_PAGE_ALIGN(newsize); 1351129ddaf3SMichael S. Tsirkin 135262be4e3aSMichael S. Tsirkin if (block->used_length == newsize) { 135362be4e3aSMichael S. Tsirkin return 0; 135462be4e3aSMichael S. Tsirkin } 135562be4e3aSMichael S. Tsirkin 135662be4e3aSMichael S. Tsirkin if (!(block->flags & RAM_RESIZEABLE)) { 135762be4e3aSMichael S. Tsirkin error_setg_errno(errp, EINVAL, 135862be4e3aSMichael S. Tsirkin "Length mismatch: %s: 0x" RAM_ADDR_FMT 135962be4e3aSMichael S. Tsirkin " in != 0x" RAM_ADDR_FMT, block->idstr, 136062be4e3aSMichael S. Tsirkin newsize, block->used_length); 136162be4e3aSMichael S. Tsirkin return -EINVAL; 136262be4e3aSMichael S. Tsirkin } 136362be4e3aSMichael S. Tsirkin 136462be4e3aSMichael S. Tsirkin if (block->max_length < newsize) { 136562be4e3aSMichael S. Tsirkin error_setg_errno(errp, EINVAL, 136662be4e3aSMichael S. Tsirkin "Length too large: %s: 0x" RAM_ADDR_FMT 136762be4e3aSMichael S. Tsirkin " > 0x" RAM_ADDR_FMT, block->idstr, 136862be4e3aSMichael S. Tsirkin newsize, block->max_length); 136962be4e3aSMichael S. Tsirkin return -EINVAL; 137062be4e3aSMichael S. Tsirkin } 137162be4e3aSMichael S. Tsirkin 137262be4e3aSMichael S. Tsirkin cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); 137362be4e3aSMichael S. Tsirkin block->used_length = newsize; 137462be4e3aSMichael S. Tsirkin cpu_physical_memory_set_dirty_range(block->offset, block->used_length); 137562be4e3aSMichael S. Tsirkin memory_region_set_size(block->mr, newsize); 137662be4e3aSMichael S. Tsirkin if (block->resized) { 137762be4e3aSMichael S. Tsirkin block->resized(block->idstr, newsize, block->host); 137862be4e3aSMichael S. Tsirkin } 137962be4e3aSMichael S. Tsirkin return 0; 138062be4e3aSMichael S. Tsirkin } 138162be4e3aSMichael S. Tsirkin 1382ef701d7bSHu Tao static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp) 1383c5705a77SAvi Kivity { 1384e1c57ab8SPaolo Bonzini RAMBlock *block; 13850d53d9feSMike Day RAMBlock *last_block = NULL; 13862152f5caSJuan Quintela ram_addr_t old_ram_size, new_ram_size; 13872152f5caSJuan Quintela 13882152f5caSJuan Quintela old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS; 1389c5705a77SAvi Kivity 1390b2a8658eSUmesh Deshpande qemu_mutex_lock_ramlist(); 13919b8424d5SMichael S. Tsirkin new_block->offset = find_ram_offset(new_block->max_length); 1392e1c57ab8SPaolo Bonzini 13930628c182SMarkus Armbruster if (!new_block->host) { 1394e1c57ab8SPaolo Bonzini if (xen_enabled()) { 13959b8424d5SMichael S. Tsirkin xen_ram_alloc(new_block->offset, new_block->max_length, 13969b8424d5SMichael S. Tsirkin new_block->mr); 1397e1c57ab8SPaolo Bonzini } else { 13989b8424d5SMichael S. Tsirkin new_block->host = phys_mem_alloc(new_block->max_length, 1399a2b257d6SIgor Mammedov &new_block->mr->align); 140039228250SMarkus Armbruster if (!new_block->host) { 1401ef701d7bSHu Tao error_setg_errno(errp, errno, 1402ef701d7bSHu Tao "cannot set up guest memory '%s'", 1403ef701d7bSHu Tao memory_region_name(new_block->mr)); 1404ef701d7bSHu Tao qemu_mutex_unlock_ramlist(); 1405ef701d7bSHu Tao return -1; 140639228250SMarkus Armbruster } 14079b8424d5SMichael S. Tsirkin memory_try_enable_merging(new_block->host, new_block->max_length); 1408c902760fSMarcelo Tosatti } 14096977dfe6SYoshiaki Tamura } 141094a6b54fSpbrook 14110d53d9feSMike Day /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, 14120d53d9feSMike Day * QLIST (which has an RCU-friendly variant) does not have insertion at 14130d53d9feSMike Day * tail, so save the last element in last_block. 14140d53d9feSMike Day */ 14150dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 14160d53d9feSMike Day last_block = block; 14179b8424d5SMichael S. Tsirkin if (block->max_length < new_block->max_length) { 1418abb26d63SPaolo Bonzini break; 1419abb26d63SPaolo Bonzini } 1420abb26d63SPaolo Bonzini } 1421abb26d63SPaolo Bonzini if (block) { 14220dc3f44aSMike Day QLIST_INSERT_BEFORE_RCU(block, new_block, next); 14230d53d9feSMike Day } else if (last_block) { 14240dc3f44aSMike Day QLIST_INSERT_AFTER_RCU(last_block, new_block, next); 14250d53d9feSMike Day } else { /* list is empty */ 14260dc3f44aSMike Day QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); 1427abb26d63SPaolo Bonzini } 14280d6d3c87SPaolo Bonzini ram_list.mru_block = NULL; 142994a6b54fSpbrook 14300dc3f44aSMike Day /* Write list before version */ 14310dc3f44aSMike Day smp_wmb(); 1432f798b07fSUmesh Deshpande ram_list.version++; 1433b2a8658eSUmesh Deshpande qemu_mutex_unlock_ramlist(); 1434f798b07fSUmesh Deshpande 14352152f5caSJuan Quintela new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS; 14362152f5caSJuan Quintela 14372152f5caSJuan Quintela if (new_ram_size > old_ram_size) { 14381ab4c8ceSJuan Quintela int i; 1439ae3a7047SMike Day 1440ae3a7047SMike Day /* ram_list.dirty_memory[] is protected by the iothread lock. */ 14411ab4c8ceSJuan Quintela for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 14421ab4c8ceSJuan Quintela ram_list.dirty_memory[i] = 14431ab4c8ceSJuan Quintela bitmap_zero_extend(ram_list.dirty_memory[i], 14441ab4c8ceSJuan Quintela old_ram_size, new_ram_size); 14451ab4c8ceSJuan Quintela } 14462152f5caSJuan Quintela } 14479b8424d5SMichael S. Tsirkin cpu_physical_memory_set_dirty_range(new_block->offset, 14489b8424d5SMichael S. Tsirkin new_block->used_length); 144994a6b54fSpbrook 1450a904c911SPaolo Bonzini if (new_block->host) { 14519b8424d5SMichael S. Tsirkin qemu_ram_setup_dump(new_block->host, new_block->max_length); 14529b8424d5SMichael S. Tsirkin qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); 14539b8424d5SMichael S. Tsirkin qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK); 1454e1c57ab8SPaolo Bonzini if (kvm_enabled()) { 14559b8424d5SMichael S. Tsirkin kvm_setup_guest_memory(new_block->host, new_block->max_length); 1456e1c57ab8SPaolo Bonzini } 1457a904c911SPaolo Bonzini } 14586f0437e8SJan Kiszka 145994a6b54fSpbrook return new_block->offset; 146094a6b54fSpbrook } 1461e9a1ab19Sbellard 14620b183fc8SPaolo Bonzini #ifdef __linux__ 1463e1c57ab8SPaolo Bonzini ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 1464dbcb8981SPaolo Bonzini bool share, const char *mem_path, 14657f56e740SPaolo Bonzini Error **errp) 1466e1c57ab8SPaolo Bonzini { 1467e1c57ab8SPaolo Bonzini RAMBlock *new_block; 1468ef701d7bSHu Tao ram_addr_t addr; 1469ef701d7bSHu Tao Error *local_err = NULL; 1470e1c57ab8SPaolo Bonzini 1471e1c57ab8SPaolo Bonzini if (xen_enabled()) { 14727f56e740SPaolo Bonzini error_setg(errp, "-mem-path not supported with Xen"); 14737f56e740SPaolo Bonzini return -1; 1474e1c57ab8SPaolo Bonzini } 1475e1c57ab8SPaolo Bonzini 1476e1c57ab8SPaolo Bonzini if (phys_mem_alloc != qemu_anon_ram_alloc) { 1477e1c57ab8SPaolo Bonzini /* 1478e1c57ab8SPaolo Bonzini * file_ram_alloc() needs to allocate just like 1479e1c57ab8SPaolo Bonzini * phys_mem_alloc, but we haven't bothered to provide 1480e1c57ab8SPaolo Bonzini * a hook there. 1481e1c57ab8SPaolo Bonzini */ 14827f56e740SPaolo Bonzini error_setg(errp, 14837f56e740SPaolo Bonzini "-mem-path not supported with this accelerator"); 14847f56e740SPaolo Bonzini return -1; 1485e1c57ab8SPaolo Bonzini } 1486e1c57ab8SPaolo Bonzini 1487e1c57ab8SPaolo Bonzini size = TARGET_PAGE_ALIGN(size); 1488e1c57ab8SPaolo Bonzini new_block = g_malloc0(sizeof(*new_block)); 1489e1c57ab8SPaolo Bonzini new_block->mr = mr; 14909b8424d5SMichael S. Tsirkin new_block->used_length = size; 14919b8424d5SMichael S. Tsirkin new_block->max_length = size; 1492dbcb8981SPaolo Bonzini new_block->flags = share ? RAM_SHARED : 0; 14937f56e740SPaolo Bonzini new_block->host = file_ram_alloc(new_block, size, 14947f56e740SPaolo Bonzini mem_path, errp); 14957f56e740SPaolo Bonzini if (!new_block->host) { 14967f56e740SPaolo Bonzini g_free(new_block); 14977f56e740SPaolo Bonzini return -1; 14987f56e740SPaolo Bonzini } 14997f56e740SPaolo Bonzini 1500ef701d7bSHu Tao addr = ram_block_add(new_block, &local_err); 1501ef701d7bSHu Tao if (local_err) { 1502ef701d7bSHu Tao g_free(new_block); 1503ef701d7bSHu Tao error_propagate(errp, local_err); 1504ef701d7bSHu Tao return -1; 1505ef701d7bSHu Tao } 1506ef701d7bSHu Tao return addr; 1507e1c57ab8SPaolo Bonzini } 15080b183fc8SPaolo Bonzini #endif 1509e1c57ab8SPaolo Bonzini 151062be4e3aSMichael S. Tsirkin static 151162be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, 151262be4e3aSMichael S. Tsirkin void (*resized)(const char*, 151362be4e3aSMichael S. Tsirkin uint64_t length, 151462be4e3aSMichael S. Tsirkin void *host), 151562be4e3aSMichael S. Tsirkin void *host, bool resizeable, 1516ef701d7bSHu Tao MemoryRegion *mr, Error **errp) 1517e1c57ab8SPaolo Bonzini { 1518e1c57ab8SPaolo Bonzini RAMBlock *new_block; 1519ef701d7bSHu Tao ram_addr_t addr; 1520ef701d7bSHu Tao Error *local_err = NULL; 1521e1c57ab8SPaolo Bonzini 1522e1c57ab8SPaolo Bonzini size = TARGET_PAGE_ALIGN(size); 152362be4e3aSMichael S. Tsirkin max_size = TARGET_PAGE_ALIGN(max_size); 1524e1c57ab8SPaolo Bonzini new_block = g_malloc0(sizeof(*new_block)); 1525e1c57ab8SPaolo Bonzini new_block->mr = mr; 152662be4e3aSMichael S. Tsirkin new_block->resized = resized; 15279b8424d5SMichael S. Tsirkin new_block->used_length = size; 15289b8424d5SMichael S. Tsirkin new_block->max_length = max_size; 152962be4e3aSMichael S. Tsirkin assert(max_size >= size); 1530e1c57ab8SPaolo Bonzini new_block->fd = -1; 1531e1c57ab8SPaolo Bonzini new_block->host = host; 1532e1c57ab8SPaolo Bonzini if (host) { 15337bd4f430SPaolo Bonzini new_block->flags |= RAM_PREALLOC; 1534e1c57ab8SPaolo Bonzini } 153562be4e3aSMichael S. Tsirkin if (resizeable) { 153662be4e3aSMichael S. Tsirkin new_block->flags |= RAM_RESIZEABLE; 153762be4e3aSMichael S. Tsirkin } 1538ef701d7bSHu Tao addr = ram_block_add(new_block, &local_err); 1539ef701d7bSHu Tao if (local_err) { 1540ef701d7bSHu Tao g_free(new_block); 1541ef701d7bSHu Tao error_propagate(errp, local_err); 1542ef701d7bSHu Tao return -1; 1543ef701d7bSHu Tao } 1544ef701d7bSHu Tao return addr; 1545e1c57ab8SPaolo Bonzini } 1546e1c57ab8SPaolo Bonzini 154762be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 154862be4e3aSMichael S. Tsirkin MemoryRegion *mr, Error **errp) 154962be4e3aSMichael S. Tsirkin { 155062be4e3aSMichael S. Tsirkin return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp); 155162be4e3aSMichael S. Tsirkin } 155262be4e3aSMichael S. Tsirkin 1553ef701d7bSHu Tao ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp) 15546977dfe6SYoshiaki Tamura { 155562be4e3aSMichael S. Tsirkin return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp); 155662be4e3aSMichael S. Tsirkin } 155762be4e3aSMichael S. Tsirkin 155862be4e3aSMichael S. Tsirkin ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, 155962be4e3aSMichael S. Tsirkin void (*resized)(const char*, 156062be4e3aSMichael S. Tsirkin uint64_t length, 156162be4e3aSMichael S. Tsirkin void *host), 156262be4e3aSMichael S. Tsirkin MemoryRegion *mr, Error **errp) 156362be4e3aSMichael S. Tsirkin { 156462be4e3aSMichael S. Tsirkin return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp); 15656977dfe6SYoshiaki Tamura } 15666977dfe6SYoshiaki Tamura 15671f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr) 15681f2e98b6SAlex Williamson { 15691f2e98b6SAlex Williamson RAMBlock *block; 15701f2e98b6SAlex Williamson 1571b2a8658eSUmesh Deshpande qemu_mutex_lock_ramlist(); 15720dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 15731f2e98b6SAlex Williamson if (addr == block->offset) { 15740dc3f44aSMike Day QLIST_REMOVE_RCU(block, next); 15750d6d3c87SPaolo Bonzini ram_list.mru_block = NULL; 15760dc3f44aSMike Day /* Write list before version */ 15770dc3f44aSMike Day smp_wmb(); 1578f798b07fSUmesh Deshpande ram_list.version++; 157943771539SPaolo Bonzini g_free_rcu(block, rcu); 1580b2a8658eSUmesh Deshpande break; 15811f2e98b6SAlex Williamson } 15821f2e98b6SAlex Williamson } 1583b2a8658eSUmesh Deshpande qemu_mutex_unlock_ramlist(); 15841f2e98b6SAlex Williamson } 15851f2e98b6SAlex Williamson 158643771539SPaolo Bonzini static void reclaim_ramblock(RAMBlock *block) 1587e9a1ab19Sbellard { 15887bd4f430SPaolo Bonzini if (block->flags & RAM_PREALLOC) { 1589cd19cfa2SHuang Ying ; 1590dfeaf2abSMarkus Armbruster } else if (xen_enabled()) { 1591dfeaf2abSMarkus Armbruster xen_invalidate_map_cache_entry(block->host); 1592089f3f76SStefan Weil #ifndef _WIN32 15933435f395SMarkus Armbruster } else if (block->fd >= 0) { 15949b8424d5SMichael S. Tsirkin munmap(block->host, block->max_length); 159504b16653SAlex Williamson close(block->fd); 1596089f3f76SStefan Weil #endif 159704b16653SAlex Williamson } else { 15989b8424d5SMichael S. Tsirkin qemu_anon_ram_free(block->host, block->max_length); 159904b16653SAlex Williamson } 16007267c094SAnthony Liguori g_free(block); 160143771539SPaolo Bonzini } 160243771539SPaolo Bonzini 160343771539SPaolo Bonzini void qemu_ram_free(ram_addr_t addr) 160443771539SPaolo Bonzini { 160543771539SPaolo Bonzini RAMBlock *block; 160643771539SPaolo Bonzini 160743771539SPaolo Bonzini qemu_mutex_lock_ramlist(); 16080dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 160943771539SPaolo Bonzini if (addr == block->offset) { 16100dc3f44aSMike Day QLIST_REMOVE_RCU(block, next); 161143771539SPaolo Bonzini ram_list.mru_block = NULL; 16120dc3f44aSMike Day /* Write list before version */ 16130dc3f44aSMike Day smp_wmb(); 161443771539SPaolo Bonzini ram_list.version++; 161543771539SPaolo Bonzini call_rcu(block, reclaim_ramblock, rcu); 1616b2a8658eSUmesh Deshpande break; 161704b16653SAlex Williamson } 161804b16653SAlex Williamson } 1619b2a8658eSUmesh Deshpande qemu_mutex_unlock_ramlist(); 1620e9a1ab19Sbellard } 1621e9a1ab19Sbellard 1622cd19cfa2SHuang Ying #ifndef _WIN32 1623cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) 1624cd19cfa2SHuang Ying { 1625cd19cfa2SHuang Ying RAMBlock *block; 1626cd19cfa2SHuang Ying ram_addr_t offset; 1627cd19cfa2SHuang Ying int flags; 1628cd19cfa2SHuang Ying void *area, *vaddr; 1629cd19cfa2SHuang Ying 16300dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1631cd19cfa2SHuang Ying offset = addr - block->offset; 16329b8424d5SMichael S. Tsirkin if (offset < block->max_length) { 16331240be24SMichael S. Tsirkin vaddr = ramblock_ptr(block, offset); 16347bd4f430SPaolo Bonzini if (block->flags & RAM_PREALLOC) { 1635cd19cfa2SHuang Ying ; 1636dfeaf2abSMarkus Armbruster } else if (xen_enabled()) { 1637dfeaf2abSMarkus Armbruster abort(); 1638cd19cfa2SHuang Ying } else { 1639cd19cfa2SHuang Ying flags = MAP_FIXED; 1640cd19cfa2SHuang Ying munmap(vaddr, length); 16413435f395SMarkus Armbruster if (block->fd >= 0) { 1642dbcb8981SPaolo Bonzini flags |= (block->flags & RAM_SHARED ? 1643dbcb8981SPaolo Bonzini MAP_SHARED : MAP_PRIVATE); 1644cd19cfa2SHuang Ying area = mmap(vaddr, length, PROT_READ | PROT_WRITE, 1645cd19cfa2SHuang Ying flags, block->fd, offset); 1646cd19cfa2SHuang Ying } else { 16472eb9fbaaSMarkus Armbruster /* 16482eb9fbaaSMarkus Armbruster * Remap needs to match alloc. Accelerators that 16492eb9fbaaSMarkus Armbruster * set phys_mem_alloc never remap. If they did, 16502eb9fbaaSMarkus Armbruster * we'd need a remap hook here. 16512eb9fbaaSMarkus Armbruster */ 16522eb9fbaaSMarkus Armbruster assert(phys_mem_alloc == qemu_anon_ram_alloc); 16532eb9fbaaSMarkus Armbruster 1654cd19cfa2SHuang Ying flags |= MAP_PRIVATE | MAP_ANONYMOUS; 1655cd19cfa2SHuang Ying area = mmap(vaddr, length, PROT_READ | PROT_WRITE, 1656cd19cfa2SHuang Ying flags, -1, 0); 1657cd19cfa2SHuang Ying } 1658cd19cfa2SHuang Ying if (area != vaddr) { 1659f15fbc4bSAnthony PERARD fprintf(stderr, "Could not remap addr: " 1660f15fbc4bSAnthony PERARD RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", 1661cd19cfa2SHuang Ying length, addr); 1662cd19cfa2SHuang Ying exit(1); 1663cd19cfa2SHuang Ying } 16648490fc78SLuiz Capitulino memory_try_enable_merging(vaddr, length); 1665ddb97f1dSJason Baron qemu_ram_setup_dump(vaddr, length); 1666cd19cfa2SHuang Ying } 1667cd19cfa2SHuang Ying } 1668cd19cfa2SHuang Ying } 1669cd19cfa2SHuang Ying } 1670cd19cfa2SHuang Ying #endif /* !_WIN32 */ 1671cd19cfa2SHuang Ying 1672a35ba7beSPaolo Bonzini int qemu_get_ram_fd(ram_addr_t addr) 1673a35ba7beSPaolo Bonzini { 1674ae3a7047SMike Day RAMBlock *block; 1675ae3a7047SMike Day int fd; 1676a35ba7beSPaolo Bonzini 16770dc3f44aSMike Day rcu_read_lock(); 1678ae3a7047SMike Day block = qemu_get_ram_block(addr); 1679ae3a7047SMike Day fd = block->fd; 16800dc3f44aSMike Day rcu_read_unlock(); 1681ae3a7047SMike Day return fd; 1682a35ba7beSPaolo Bonzini } 1683a35ba7beSPaolo Bonzini 16843fd74b84SDamjan Marion void *qemu_get_ram_block_host_ptr(ram_addr_t addr) 16853fd74b84SDamjan Marion { 1686ae3a7047SMike Day RAMBlock *block; 1687ae3a7047SMike Day void *ptr; 16883fd74b84SDamjan Marion 16890dc3f44aSMike Day rcu_read_lock(); 1690ae3a7047SMike Day block = qemu_get_ram_block(addr); 1691ae3a7047SMike Day ptr = ramblock_ptr(block, 0); 16920dc3f44aSMike Day rcu_read_unlock(); 1693ae3a7047SMike Day return ptr; 16943fd74b84SDamjan Marion } 16953fd74b84SDamjan Marion 16961b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc. 1697ae3a7047SMike Day * This should not be used for general purpose DMA. Use address_space_map 1698ae3a7047SMike Day * or address_space_rw instead. For local memory (e.g. video ram) that the 1699ae3a7047SMike Day * device owns, use memory_region_get_ram_ptr. 17000dc3f44aSMike Day * 17010dc3f44aSMike Day * By the time this function returns, the returned pointer is not protected 17020dc3f44aSMike Day * by RCU anymore. If the caller is not within an RCU critical section and 17030dc3f44aSMike Day * does not hold the iothread lock, it must have other means of protecting the 17040dc3f44aSMike Day * pointer, such as a reference to the region that includes the incoming 17050dc3f44aSMike Day * ram_addr_t. 17061b5ec234SPaolo Bonzini */ 17071b5ec234SPaolo Bonzini void *qemu_get_ram_ptr(ram_addr_t addr) 17081b5ec234SPaolo Bonzini { 1709ae3a7047SMike Day RAMBlock *block; 1710ae3a7047SMike Day void *ptr; 17111b5ec234SPaolo Bonzini 17120dc3f44aSMike Day rcu_read_lock(); 1713ae3a7047SMike Day block = qemu_get_ram_block(addr); 1714ae3a7047SMike Day 1715ae3a7047SMike Day if (xen_enabled() && block->host == NULL) { 1716432d268cSJun Nakajima /* We need to check if the requested address is in the RAM 1717432d268cSJun Nakajima * because we don't want to map the entire memory in QEMU. 1718712c2b41SStefano Stabellini * In that case just map until the end of the page. 1719432d268cSJun Nakajima */ 1720432d268cSJun Nakajima if (block->offset == 0) { 1721ae3a7047SMike Day ptr = xen_map_cache(addr, 0, 0); 17220dc3f44aSMike Day goto unlock; 1723432d268cSJun Nakajima } 1724ae3a7047SMike Day 1725ae3a7047SMike Day block->host = xen_map_cache(block->offset, block->max_length, 1); 1726432d268cSJun Nakajima } 1727ae3a7047SMike Day ptr = ramblock_ptr(block, addr - block->offset); 1728ae3a7047SMike Day 17290dc3f44aSMike Day unlock: 17300dc3f44aSMike Day rcu_read_unlock(); 1731ae3a7047SMike Day return ptr; 173294a6b54fSpbrook } 1733f471a17eSAlex Williamson 173438bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr 1735ae3a7047SMike Day * but takes a size argument. 17360dc3f44aSMike Day * 17370dc3f44aSMike Day * By the time this function returns, the returned pointer is not protected 17380dc3f44aSMike Day * by RCU anymore. If the caller is not within an RCU critical section and 17390dc3f44aSMike Day * does not hold the iothread lock, it must have other means of protecting the 17400dc3f44aSMike Day * pointer, such as a reference to the region that includes the incoming 17410dc3f44aSMike Day * ram_addr_t. 1742ae3a7047SMike Day */ 1743cb85f7abSPeter Maydell static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size) 174438bee5dcSStefano Stabellini { 1745ae3a7047SMike Day void *ptr; 17468ab934f9SStefano Stabellini if (*size == 0) { 17478ab934f9SStefano Stabellini return NULL; 17488ab934f9SStefano Stabellini } 1749868bb33fSJan Kiszka if (xen_enabled()) { 1750e41d7c69SJan Kiszka return xen_map_cache(addr, *size, 1); 1751868bb33fSJan Kiszka } else { 175238bee5dcSStefano Stabellini RAMBlock *block; 17530dc3f44aSMike Day rcu_read_lock(); 17540dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 17559b8424d5SMichael S. Tsirkin if (addr - block->offset < block->max_length) { 17569b8424d5SMichael S. Tsirkin if (addr - block->offset + *size > block->max_length) 17579b8424d5SMichael S. Tsirkin *size = block->max_length - addr + block->offset; 1758ae3a7047SMike Day ptr = ramblock_ptr(block, addr - block->offset); 17590dc3f44aSMike Day rcu_read_unlock(); 1760ae3a7047SMike Day return ptr; 176138bee5dcSStefano Stabellini } 176238bee5dcSStefano Stabellini } 176338bee5dcSStefano Stabellini 176438bee5dcSStefano Stabellini fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 176538bee5dcSStefano Stabellini abort(); 176638bee5dcSStefano Stabellini } 176738bee5dcSStefano Stabellini } 176838bee5dcSStefano Stabellini 17697443b437SPaolo Bonzini /* Some of the softmmu routines need to translate from a host pointer 1770ae3a7047SMike Day * (typically a TLB entry) back to a ram offset. 1771ae3a7047SMike Day * 1772ae3a7047SMike Day * By the time this function returns, the returned pointer is not protected 1773ae3a7047SMike Day * by RCU anymore. If the caller is not within an RCU critical section and 1774ae3a7047SMike Day * does not hold the iothread lock, it must have other means of protecting the 1775ae3a7047SMike Day * pointer, such as a reference to the region that includes the incoming 1776ae3a7047SMike Day * ram_addr_t. 1777ae3a7047SMike Day */ 17781b5ec234SPaolo Bonzini MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) 17795579c7f3Spbrook { 178094a6b54fSpbrook RAMBlock *block; 178194a6b54fSpbrook uint8_t *host = ptr; 1782ae3a7047SMike Day MemoryRegion *mr; 178394a6b54fSpbrook 1784868bb33fSJan Kiszka if (xen_enabled()) { 17850dc3f44aSMike Day rcu_read_lock(); 1786e41d7c69SJan Kiszka *ram_addr = xen_ram_addr_from_mapcache(ptr); 1787ae3a7047SMike Day mr = qemu_get_ram_block(*ram_addr)->mr; 17880dc3f44aSMike Day rcu_read_unlock(); 1789ae3a7047SMike Day return mr; 1790712c2b41SStefano Stabellini } 1791712c2b41SStefano Stabellini 17920dc3f44aSMike Day rcu_read_lock(); 17930dc3f44aSMike Day block = atomic_rcu_read(&ram_list.mru_block); 17949b8424d5SMichael S. Tsirkin if (block && block->host && host - block->host < block->max_length) { 179523887b79SPaolo Bonzini goto found; 179623887b79SPaolo Bonzini } 179723887b79SPaolo Bonzini 17980dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1799432d268cSJun Nakajima /* This case append when the block is not mapped. */ 1800432d268cSJun Nakajima if (block->host == NULL) { 1801432d268cSJun Nakajima continue; 1802432d268cSJun Nakajima } 18039b8424d5SMichael S. Tsirkin if (host - block->host < block->max_length) { 180423887b79SPaolo Bonzini goto found; 180594a6b54fSpbrook } 1806f471a17eSAlex Williamson } 1807432d268cSJun Nakajima 18080dc3f44aSMike Day rcu_read_unlock(); 18091b5ec234SPaolo Bonzini return NULL; 181023887b79SPaolo Bonzini 181123887b79SPaolo Bonzini found: 181223887b79SPaolo Bonzini *ram_addr = block->offset + (host - block->host); 1813ae3a7047SMike Day mr = block->mr; 18140dc3f44aSMike Day rcu_read_unlock(); 1815ae3a7047SMike Day return mr; 1816e890261fSMarcelo Tosatti } 1817f471a17eSAlex Williamson 1818a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr, 18190e0df1e2SAvi Kivity uint64_t val, unsigned size) 18201ccde1cbSbellard { 182152159192SJuan Quintela if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 18220e0df1e2SAvi Kivity tb_invalidate_phys_page_fast(ram_addr, size); 18233a7d929eSbellard } 18240e0df1e2SAvi Kivity switch (size) { 18250e0df1e2SAvi Kivity case 1: 18265579c7f3Spbrook stb_p(qemu_get_ram_ptr(ram_addr), val); 18270e0df1e2SAvi Kivity break; 18280e0df1e2SAvi Kivity case 2: 18295579c7f3Spbrook stw_p(qemu_get_ram_ptr(ram_addr), val); 18300e0df1e2SAvi Kivity break; 18310e0df1e2SAvi Kivity case 4: 18325579c7f3Spbrook stl_p(qemu_get_ram_ptr(ram_addr), val); 18330e0df1e2SAvi Kivity break; 18340e0df1e2SAvi Kivity default: 18350e0df1e2SAvi Kivity abort(); 18360e0df1e2SAvi Kivity } 18376886867eSPaolo Bonzini cpu_physical_memory_set_dirty_range_nocode(ram_addr, size); 1838f23db169Sbellard /* we remove the notdirty callback only if the code has been 1839f23db169Sbellard flushed */ 1840a2cd8c85SJuan Quintela if (!cpu_physical_memory_is_clean(ram_addr)) { 18414917cf44SAndreas Färber CPUArchState *env = current_cpu->env_ptr; 184293afeadeSAndreas Färber tlb_set_dirty(env, current_cpu->mem_io_vaddr); 18434917cf44SAndreas Färber } 18441ccde1cbSbellard } 18451ccde1cbSbellard 1846b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr, 1847b018ddf6SPaolo Bonzini unsigned size, bool is_write) 1848b018ddf6SPaolo Bonzini { 1849b018ddf6SPaolo Bonzini return is_write; 1850b018ddf6SPaolo Bonzini } 1851b018ddf6SPaolo Bonzini 18520e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = { 18530e0df1e2SAvi Kivity .write = notdirty_mem_write, 1854b018ddf6SPaolo Bonzini .valid.accepts = notdirty_mem_accepts, 18550e0df1e2SAvi Kivity .endianness = DEVICE_NATIVE_ENDIAN, 18561ccde1cbSbellard }; 18571ccde1cbSbellard 18580f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit. */ 185905068c0dSPeter Maydell static void check_watchpoint(int offset, int len, int flags) 18600f459d16Spbrook { 186193afeadeSAndreas Färber CPUState *cpu = current_cpu; 186293afeadeSAndreas Färber CPUArchState *env = cpu->env_ptr; 186306d55cc1Saliguori target_ulong pc, cs_base; 18640f459d16Spbrook target_ulong vaddr; 1865a1d1bb31Saliguori CPUWatchpoint *wp; 186606d55cc1Saliguori int cpu_flags; 18670f459d16Spbrook 1868ff4700b0SAndreas Färber if (cpu->watchpoint_hit) { 186906d55cc1Saliguori /* We re-entered the check after replacing the TB. Now raise 187006d55cc1Saliguori * the debug interrupt so that is will trigger after the 187106d55cc1Saliguori * current instruction. */ 187293afeadeSAndreas Färber cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); 187306d55cc1Saliguori return; 187406d55cc1Saliguori } 187593afeadeSAndreas Färber vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; 1876ff4700b0SAndreas Färber QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 187705068c0dSPeter Maydell if (cpu_watchpoint_address_matches(wp, vaddr, len) 187805068c0dSPeter Maydell && (wp->flags & flags)) { 187908225676SPeter Maydell if (flags == BP_MEM_READ) { 188008225676SPeter Maydell wp->flags |= BP_WATCHPOINT_HIT_READ; 188108225676SPeter Maydell } else { 188208225676SPeter Maydell wp->flags |= BP_WATCHPOINT_HIT_WRITE; 188308225676SPeter Maydell } 188408225676SPeter Maydell wp->hitaddr = vaddr; 1885ff4700b0SAndreas Färber if (!cpu->watchpoint_hit) { 1886ff4700b0SAndreas Färber cpu->watchpoint_hit = wp; 1887239c51a5SAndreas Färber tb_check_watchpoint(cpu); 188806d55cc1Saliguori if (wp->flags & BP_STOP_BEFORE_ACCESS) { 188927103424SAndreas Färber cpu->exception_index = EXCP_DEBUG; 18905638d180SAndreas Färber cpu_loop_exit(cpu); 189106d55cc1Saliguori } else { 189206d55cc1Saliguori cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); 1893648f034cSAndreas Färber tb_gen_code(cpu, pc, cs_base, cpu_flags, 1); 18940ea8cb88SAndreas Färber cpu_resume_from_signal(cpu, NULL); 18950f459d16Spbrook } 1896488d6577SMax Filippov } 18976e140f28Saliguori } else { 18986e140f28Saliguori wp->flags &= ~BP_WATCHPOINT_HIT; 18996e140f28Saliguori } 19000f459d16Spbrook } 19010f459d16Spbrook } 19020f459d16Spbrook 19036658ffb8Spbrook /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, 19046658ffb8Spbrook so these check for a hit then pass through to the normal out-of-line 19056658ffb8Spbrook phys routines. */ 1906a8170e5eSAvi Kivity static uint64_t watch_mem_read(void *opaque, hwaddr addr, 19071ec9b909SAvi Kivity unsigned size) 19086658ffb8Spbrook { 190905068c0dSPeter Maydell check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ); 19101ec9b909SAvi Kivity switch (size) { 19112c17449bSEdgar E. Iglesias case 1: return ldub_phys(&address_space_memory, addr); 191241701aa4SEdgar E. Iglesias case 2: return lduw_phys(&address_space_memory, addr); 1913fdfba1a2SEdgar E. Iglesias case 4: return ldl_phys(&address_space_memory, addr); 19141ec9b909SAvi Kivity default: abort(); 19151ec9b909SAvi Kivity } 19166658ffb8Spbrook } 19176658ffb8Spbrook 1918a8170e5eSAvi Kivity static void watch_mem_write(void *opaque, hwaddr addr, 19191ec9b909SAvi Kivity uint64_t val, unsigned size) 19206658ffb8Spbrook { 192105068c0dSPeter Maydell check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE); 19221ec9b909SAvi Kivity switch (size) { 192367364150SMax Filippov case 1: 1924db3be60dSEdgar E. Iglesias stb_phys(&address_space_memory, addr, val); 192567364150SMax Filippov break; 192667364150SMax Filippov case 2: 19275ce5944dSEdgar E. Iglesias stw_phys(&address_space_memory, addr, val); 192867364150SMax Filippov break; 192967364150SMax Filippov case 4: 1930ab1da857SEdgar E. Iglesias stl_phys(&address_space_memory, addr, val); 193167364150SMax Filippov break; 19321ec9b909SAvi Kivity default: abort(); 19331ec9b909SAvi Kivity } 19346658ffb8Spbrook } 19356658ffb8Spbrook 19361ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = { 19371ec9b909SAvi Kivity .read = watch_mem_read, 19381ec9b909SAvi Kivity .write = watch_mem_write, 19391ec9b909SAvi Kivity .endianness = DEVICE_NATIVE_ENDIAN, 19406658ffb8Spbrook }; 19416658ffb8Spbrook 1942a8170e5eSAvi Kivity static uint64_t subpage_read(void *opaque, hwaddr addr, 194370c68e44SAvi Kivity unsigned len) 1944db7b5426Sblueswir1 { 1945acc9d80bSJan Kiszka subpage_t *subpage = opaque; 1946ff6cff75SPaolo Bonzini uint8_t buf[8]; 1947791af8c8SPaolo Bonzini 1948db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 1949016e9d62SAmos Kong printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, 1950acc9d80bSJan Kiszka subpage, len, addr); 1951db7b5426Sblueswir1 #endif 1952acc9d80bSJan Kiszka address_space_read(subpage->as, addr + subpage->base, buf, len); 1953acc9d80bSJan Kiszka switch (len) { 1954acc9d80bSJan Kiszka case 1: 1955acc9d80bSJan Kiszka return ldub_p(buf); 1956acc9d80bSJan Kiszka case 2: 1957acc9d80bSJan Kiszka return lduw_p(buf); 1958acc9d80bSJan Kiszka case 4: 1959acc9d80bSJan Kiszka return ldl_p(buf); 1960ff6cff75SPaolo Bonzini case 8: 1961ff6cff75SPaolo Bonzini return ldq_p(buf); 1962acc9d80bSJan Kiszka default: 1963acc9d80bSJan Kiszka abort(); 1964acc9d80bSJan Kiszka } 1965db7b5426Sblueswir1 } 1966db7b5426Sblueswir1 1967a8170e5eSAvi Kivity static void subpage_write(void *opaque, hwaddr addr, 196870c68e44SAvi Kivity uint64_t value, unsigned len) 1969db7b5426Sblueswir1 { 1970acc9d80bSJan Kiszka subpage_t *subpage = opaque; 1971ff6cff75SPaolo Bonzini uint8_t buf[8]; 1972acc9d80bSJan Kiszka 1973db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 1974016e9d62SAmos Kong printf("%s: subpage %p len %u addr " TARGET_FMT_plx 1975acc9d80bSJan Kiszka " value %"PRIx64"\n", 1976acc9d80bSJan Kiszka __func__, subpage, len, addr, value); 1977db7b5426Sblueswir1 #endif 1978acc9d80bSJan Kiszka switch (len) { 1979acc9d80bSJan Kiszka case 1: 1980acc9d80bSJan Kiszka stb_p(buf, value); 1981acc9d80bSJan Kiszka break; 1982acc9d80bSJan Kiszka case 2: 1983acc9d80bSJan Kiszka stw_p(buf, value); 1984acc9d80bSJan Kiszka break; 1985acc9d80bSJan Kiszka case 4: 1986acc9d80bSJan Kiszka stl_p(buf, value); 1987acc9d80bSJan Kiszka break; 1988ff6cff75SPaolo Bonzini case 8: 1989ff6cff75SPaolo Bonzini stq_p(buf, value); 1990ff6cff75SPaolo Bonzini break; 1991acc9d80bSJan Kiszka default: 1992acc9d80bSJan Kiszka abort(); 1993acc9d80bSJan Kiszka } 1994acc9d80bSJan Kiszka address_space_write(subpage->as, addr + subpage->base, buf, len); 1995db7b5426Sblueswir1 } 1996db7b5426Sblueswir1 1997c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr, 1998016e9d62SAmos Kong unsigned len, bool is_write) 1999c353e4ccSPaolo Bonzini { 2000acc9d80bSJan Kiszka subpage_t *subpage = opaque; 2001c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE) 2002016e9d62SAmos Kong printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n", 2003acc9d80bSJan Kiszka __func__, subpage, is_write ? 'w' : 'r', len, addr); 2004c353e4ccSPaolo Bonzini #endif 2005c353e4ccSPaolo Bonzini 2006acc9d80bSJan Kiszka return address_space_access_valid(subpage->as, addr + subpage->base, 2007016e9d62SAmos Kong len, is_write); 2008c353e4ccSPaolo Bonzini } 2009c353e4ccSPaolo Bonzini 201070c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = { 201170c68e44SAvi Kivity .read = subpage_read, 201270c68e44SAvi Kivity .write = subpage_write, 2013ff6cff75SPaolo Bonzini .impl.min_access_size = 1, 2014ff6cff75SPaolo Bonzini .impl.max_access_size = 8, 2015ff6cff75SPaolo Bonzini .valid.min_access_size = 1, 2016ff6cff75SPaolo Bonzini .valid.max_access_size = 8, 2017c353e4ccSPaolo Bonzini .valid.accepts = subpage_accepts, 201870c68e44SAvi Kivity .endianness = DEVICE_NATIVE_ENDIAN, 2019db7b5426Sblueswir1 }; 2020db7b5426Sblueswir1 2021c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 20225312bd8bSAvi Kivity uint16_t section) 2023db7b5426Sblueswir1 { 2024db7b5426Sblueswir1 int idx, eidx; 2025db7b5426Sblueswir1 2026db7b5426Sblueswir1 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) 2027db7b5426Sblueswir1 return -1; 2028db7b5426Sblueswir1 idx = SUBPAGE_IDX(start); 2029db7b5426Sblueswir1 eidx = SUBPAGE_IDX(end); 2030db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 2031016e9d62SAmos Kong printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", 2032016e9d62SAmos Kong __func__, mmio, start, end, idx, eidx, section); 2033db7b5426Sblueswir1 #endif 2034db7b5426Sblueswir1 for (; idx <= eidx; idx++) { 20355312bd8bSAvi Kivity mmio->sub_section[idx] = section; 2036db7b5426Sblueswir1 } 2037db7b5426Sblueswir1 2038db7b5426Sblueswir1 return 0; 2039db7b5426Sblueswir1 } 2040db7b5426Sblueswir1 2041acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base) 2042db7b5426Sblueswir1 { 2043c227f099SAnthony Liguori subpage_t *mmio; 2044db7b5426Sblueswir1 20457267c094SAnthony Liguori mmio = g_malloc0(sizeof(subpage_t)); 20461eec614bSaliguori 2047acc9d80bSJan Kiszka mmio->as = as; 2048db7b5426Sblueswir1 mmio->base = base; 20492c9b15caSPaolo Bonzini memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, 2050b4fefef9SPeter Crosthwaite NULL, TARGET_PAGE_SIZE); 2051b3b00c78SAvi Kivity mmio->iomem.subpage = true; 2052db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 2053016e9d62SAmos Kong printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__, 2054016e9d62SAmos Kong mmio, base, TARGET_PAGE_SIZE); 2055db7b5426Sblueswir1 #endif 2056b41aac4fSLiu Ping Fan subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED); 2057db7b5426Sblueswir1 2058db7b5426Sblueswir1 return mmio; 2059db7b5426Sblueswir1 } 2060db7b5426Sblueswir1 2061a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as, 2062a656e22fSPeter Crosthwaite MemoryRegion *mr) 20635312bd8bSAvi Kivity { 2064a656e22fSPeter Crosthwaite assert(as); 20655312bd8bSAvi Kivity MemoryRegionSection section = { 2066a656e22fSPeter Crosthwaite .address_space = as, 20675312bd8bSAvi Kivity .mr = mr, 20685312bd8bSAvi Kivity .offset_within_address_space = 0, 20695312bd8bSAvi Kivity .offset_within_region = 0, 2070052e87b0SPaolo Bonzini .size = int128_2_64(), 20715312bd8bSAvi Kivity }; 20725312bd8bSAvi Kivity 207353cb28cbSMarcel Apfelbaum return phys_section_add(map, §ion); 20745312bd8bSAvi Kivity } 20755312bd8bSAvi Kivity 20769d82b5a7SPaolo Bonzini MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index) 2077aa102231SAvi Kivity { 207879e2b9aeSPaolo Bonzini AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch); 207979e2b9aeSPaolo Bonzini MemoryRegionSection *sections = d->map.sections; 20809d82b5a7SPaolo Bonzini 20819d82b5a7SPaolo Bonzini return sections[index & ~TARGET_PAGE_MASK].mr; 2082aa102231SAvi Kivity } 2083aa102231SAvi Kivity 2084e9179ce1SAvi Kivity static void io_mem_init(void) 2085e9179ce1SAvi Kivity { 20861f6245e5SPaolo Bonzini memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX); 20872c9b15caSPaolo Bonzini memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, 20881f6245e5SPaolo Bonzini NULL, UINT64_MAX); 20892c9b15caSPaolo Bonzini memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, 20901f6245e5SPaolo Bonzini NULL, UINT64_MAX); 20912c9b15caSPaolo Bonzini memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL, 20921f6245e5SPaolo Bonzini NULL, UINT64_MAX); 2093e9179ce1SAvi Kivity } 2094e9179ce1SAvi Kivity 2095ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener) 2096ac1970fbSAvi Kivity { 209789ae337aSPaolo Bonzini AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); 209853cb28cbSMarcel Apfelbaum AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); 209953cb28cbSMarcel Apfelbaum uint16_t n; 210053cb28cbSMarcel Apfelbaum 2101a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_unassigned); 210253cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_UNASSIGNED); 2103a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_notdirty); 210453cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_NOTDIRTY); 2105a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_rom); 210653cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_ROM); 2107a656e22fSPeter Crosthwaite n = dummy_section(&d->map, as, &io_mem_watch); 210853cb28cbSMarcel Apfelbaum assert(n == PHYS_SECTION_WATCH); 210900752703SPaolo Bonzini 21109736e55bSMichael S. Tsirkin d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; 211100752703SPaolo Bonzini d->as = as; 211200752703SPaolo Bonzini as->next_dispatch = d; 211300752703SPaolo Bonzini } 211400752703SPaolo Bonzini 211579e2b9aeSPaolo Bonzini static void address_space_dispatch_free(AddressSpaceDispatch *d) 211679e2b9aeSPaolo Bonzini { 211779e2b9aeSPaolo Bonzini phys_sections_free(&d->map); 211879e2b9aeSPaolo Bonzini g_free(d); 211979e2b9aeSPaolo Bonzini } 212079e2b9aeSPaolo Bonzini 212100752703SPaolo Bonzini static void mem_commit(MemoryListener *listener) 212200752703SPaolo Bonzini { 212300752703SPaolo Bonzini AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); 21240475d94fSPaolo Bonzini AddressSpaceDispatch *cur = as->dispatch; 21250475d94fSPaolo Bonzini AddressSpaceDispatch *next = as->next_dispatch; 2126ac1970fbSAvi Kivity 212753cb28cbSMarcel Apfelbaum phys_page_compact_all(next, next->map.nodes_nb); 2128b35ba30fSMichael S. Tsirkin 212979e2b9aeSPaolo Bonzini atomic_rcu_set(&as->dispatch, next); 213053cb28cbSMarcel Apfelbaum if (cur) { 213179e2b9aeSPaolo Bonzini call_rcu(cur, address_space_dispatch_free, rcu); 2132ac1970fbSAvi Kivity } 21339affd6fcSPaolo Bonzini } 21349affd6fcSPaolo Bonzini 21351d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener) 213650c1e149SAvi Kivity { 2137182735efSAndreas Färber CPUState *cpu; 2138117712c3SAvi Kivity 2139117712c3SAvi Kivity /* since each CPU stores ram addresses in its TLB cache, we must 2140117712c3SAvi Kivity reset the modified entries */ 2141117712c3SAvi Kivity /* XXX: slow ! */ 2142bdc44640SAndreas Färber CPU_FOREACH(cpu) { 214333bde2e1SEdgar E. Iglesias /* FIXME: Disentangle the cpu.h circular files deps so we can 214433bde2e1SEdgar E. Iglesias directly get the right CPU from listener. */ 214533bde2e1SEdgar E. Iglesias if (cpu->tcg_as_listener != listener) { 214633bde2e1SEdgar E. Iglesias continue; 214733bde2e1SEdgar E. Iglesias } 214876e5c76fSPaolo Bonzini cpu_reload_memory_map(cpu); 2149117712c3SAvi Kivity } 215050c1e149SAvi Kivity } 215150c1e149SAvi Kivity 215293632747SAvi Kivity static void core_log_global_start(MemoryListener *listener) 215393632747SAvi Kivity { 2154981fdf23SJuan Quintela cpu_physical_memory_set_dirty_tracking(true); 215593632747SAvi Kivity } 215693632747SAvi Kivity 215793632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener) 215893632747SAvi Kivity { 2159981fdf23SJuan Quintela cpu_physical_memory_set_dirty_tracking(false); 216093632747SAvi Kivity } 216193632747SAvi Kivity 216293632747SAvi Kivity static MemoryListener core_memory_listener = { 216393632747SAvi Kivity .log_global_start = core_log_global_start, 216493632747SAvi Kivity .log_global_stop = core_log_global_stop, 2165ac1970fbSAvi Kivity .priority = 1, 216693632747SAvi Kivity }; 216793632747SAvi Kivity 2168ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as) 2169ac1970fbSAvi Kivity { 217000752703SPaolo Bonzini as->dispatch = NULL; 217189ae337aSPaolo Bonzini as->dispatch_listener = (MemoryListener) { 2172ac1970fbSAvi Kivity .begin = mem_begin, 217300752703SPaolo Bonzini .commit = mem_commit, 2174ac1970fbSAvi Kivity .region_add = mem_add, 2175ac1970fbSAvi Kivity .region_nop = mem_add, 2176ac1970fbSAvi Kivity .priority = 0, 2177ac1970fbSAvi Kivity }; 217889ae337aSPaolo Bonzini memory_listener_register(&as->dispatch_listener, as); 2179ac1970fbSAvi Kivity } 2180ac1970fbSAvi Kivity 21816e48e8f9SPaolo Bonzini void address_space_unregister(AddressSpace *as) 21826e48e8f9SPaolo Bonzini { 21836e48e8f9SPaolo Bonzini memory_listener_unregister(&as->dispatch_listener); 21846e48e8f9SPaolo Bonzini } 21856e48e8f9SPaolo Bonzini 218683f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as) 218783f3c251SAvi Kivity { 218883f3c251SAvi Kivity AddressSpaceDispatch *d = as->dispatch; 218983f3c251SAvi Kivity 219079e2b9aeSPaolo Bonzini atomic_rcu_set(&as->dispatch, NULL); 219179e2b9aeSPaolo Bonzini if (d) { 219279e2b9aeSPaolo Bonzini call_rcu(d, address_space_dispatch_free, rcu); 219379e2b9aeSPaolo Bonzini } 219483f3c251SAvi Kivity } 219583f3c251SAvi Kivity 219662152b8aSAvi Kivity static void memory_map_init(void) 219762152b8aSAvi Kivity { 21987267c094SAnthony Liguori system_memory = g_malloc(sizeof(*system_memory)); 219903f49957SPaolo Bonzini 220057271d63SPaolo Bonzini memory_region_init(system_memory, NULL, "system", UINT64_MAX); 22017dca8043SAlexey Kardashevskiy address_space_init(&address_space_memory, system_memory, "memory"); 2202309cb471SAvi Kivity 22037267c094SAnthony Liguori system_io = g_malloc(sizeof(*system_io)); 22043bb28b72SJan Kiszka memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", 22053bb28b72SJan Kiszka 65536); 22067dca8043SAlexey Kardashevskiy address_space_init(&address_space_io, system_io, "I/O"); 220793632747SAvi Kivity 2208f6790af6SAvi Kivity memory_listener_register(&core_memory_listener, &address_space_memory); 22092641689aSliguang } 221062152b8aSAvi Kivity 221162152b8aSAvi Kivity MemoryRegion *get_system_memory(void) 221262152b8aSAvi Kivity { 221362152b8aSAvi Kivity return system_memory; 221462152b8aSAvi Kivity } 221562152b8aSAvi Kivity 2216309cb471SAvi Kivity MemoryRegion *get_system_io(void) 2217309cb471SAvi Kivity { 2218309cb471SAvi Kivity return system_io; 2219309cb471SAvi Kivity } 2220309cb471SAvi Kivity 2221e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */ 2222e2eef170Spbrook 222313eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */ 222413eb76e0Sbellard #if defined(CONFIG_USER_ONLY) 2225f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, 2226a68fe89cSPaul Brook uint8_t *buf, int len, int is_write) 222713eb76e0Sbellard { 222813eb76e0Sbellard int l, flags; 222913eb76e0Sbellard target_ulong page; 223053a5960aSpbrook void * p; 223113eb76e0Sbellard 223213eb76e0Sbellard while (len > 0) { 223313eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 223413eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 223513eb76e0Sbellard if (l > len) 223613eb76e0Sbellard l = len; 223713eb76e0Sbellard flags = page_get_flags(page); 223813eb76e0Sbellard if (!(flags & PAGE_VALID)) 2239a68fe89cSPaul Brook return -1; 224013eb76e0Sbellard if (is_write) { 224113eb76e0Sbellard if (!(flags & PAGE_WRITE)) 2242a68fe89cSPaul Brook return -1; 2243579a97f7Sbellard /* XXX: this code should not depend on lock_user */ 224472fb7daaSaurel32 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) 2245a68fe89cSPaul Brook return -1; 224672fb7daaSaurel32 memcpy(p, buf, l); 224772fb7daaSaurel32 unlock_user(p, addr, l); 224813eb76e0Sbellard } else { 224913eb76e0Sbellard if (!(flags & PAGE_READ)) 2250a68fe89cSPaul Brook return -1; 2251579a97f7Sbellard /* XXX: this code should not depend on lock_user */ 225272fb7daaSaurel32 if (!(p = lock_user(VERIFY_READ, addr, l, 1))) 2253a68fe89cSPaul Brook return -1; 225472fb7daaSaurel32 memcpy(buf, p, l); 22555b257578Saurel32 unlock_user(p, addr, 0); 225613eb76e0Sbellard } 225713eb76e0Sbellard len -= l; 225813eb76e0Sbellard buf += l; 225913eb76e0Sbellard addr += l; 226013eb76e0Sbellard } 2261a68fe89cSPaul Brook return 0; 226213eb76e0Sbellard } 22638df1cd07Sbellard 226413eb76e0Sbellard #else 226551d7a9ebSAnthony PERARD 2266a8170e5eSAvi Kivity static void invalidate_and_set_dirty(hwaddr addr, 2267a8170e5eSAvi Kivity hwaddr length) 226851d7a9ebSAnthony PERARD { 2269f874bf90SPeter Maydell if (cpu_physical_memory_range_includes_clean(addr, length)) { 2270f874bf90SPeter Maydell tb_invalidate_phys_range(addr, addr + length, 0); 22716886867eSPaolo Bonzini cpu_physical_memory_set_dirty_range_nocode(addr, length); 227251d7a9ebSAnthony PERARD } 2273e226939dSAnthony PERARD xen_modified_memory(addr, length); 227451d7a9ebSAnthony PERARD } 227551d7a9ebSAnthony PERARD 227623326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) 227782f2563fSPaolo Bonzini { 2278e1622f4bSPaolo Bonzini unsigned access_size_max = mr->ops->valid.max_access_size; 227923326164SRichard Henderson 228023326164SRichard Henderson /* Regions are assumed to support 1-4 byte accesses unless 228123326164SRichard Henderson otherwise specified. */ 228223326164SRichard Henderson if (access_size_max == 0) { 228323326164SRichard Henderson access_size_max = 4; 228482f2563fSPaolo Bonzini } 228523326164SRichard Henderson 228623326164SRichard Henderson /* Bound the maximum access by the alignment of the address. */ 228723326164SRichard Henderson if (!mr->ops->impl.unaligned) { 228823326164SRichard Henderson unsigned align_size_max = addr & -addr; 228923326164SRichard Henderson if (align_size_max != 0 && align_size_max < access_size_max) { 229023326164SRichard Henderson access_size_max = align_size_max; 229123326164SRichard Henderson } 229223326164SRichard Henderson } 229323326164SRichard Henderson 229423326164SRichard Henderson /* Don't attempt accesses larger than the maximum. */ 229523326164SRichard Henderson if (l > access_size_max) { 229623326164SRichard Henderson l = access_size_max; 229723326164SRichard Henderson } 2298098178f2SPaolo Bonzini if (l & (l - 1)) { 2299098178f2SPaolo Bonzini l = 1 << (qemu_fls(l) - 1); 2300098178f2SPaolo Bonzini } 230123326164SRichard Henderson 230223326164SRichard Henderson return l; 230382f2563fSPaolo Bonzini } 230482f2563fSPaolo Bonzini 2305fd8aaa76SPaolo Bonzini bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, 2306ac1970fbSAvi Kivity int len, bool is_write) 230713eb76e0Sbellard { 2308149f54b5SPaolo Bonzini hwaddr l; 230913eb76e0Sbellard uint8_t *ptr; 2310791af8c8SPaolo Bonzini uint64_t val; 2311149f54b5SPaolo Bonzini hwaddr addr1; 23125c8a00ceSPaolo Bonzini MemoryRegion *mr; 2313fd8aaa76SPaolo Bonzini bool error = false; 231413eb76e0Sbellard 231513eb76e0Sbellard while (len > 0) { 231613eb76e0Sbellard l = len; 23175c8a00ceSPaolo Bonzini mr = address_space_translate(as, addr, &addr1, &l, is_write); 231813eb76e0Sbellard 231913eb76e0Sbellard if (is_write) { 23205c8a00ceSPaolo Bonzini if (!memory_access_is_direct(mr, is_write)) { 23215c8a00ceSPaolo Bonzini l = memory_access_size(mr, l, addr1); 23224917cf44SAndreas Färber /* XXX: could force current_cpu to NULL to avoid 23236a00d601Sbellard potential bugs */ 232423326164SRichard Henderson switch (l) { 232523326164SRichard Henderson case 8: 232623326164SRichard Henderson /* 64 bit write access */ 232723326164SRichard Henderson val = ldq_p(buf); 232823326164SRichard Henderson error |= io_mem_write(mr, addr1, val, 8); 232923326164SRichard Henderson break; 233023326164SRichard Henderson case 4: 23311c213d19Sbellard /* 32 bit write access */ 2332c27004ecSbellard val = ldl_p(buf); 23335c8a00ceSPaolo Bonzini error |= io_mem_write(mr, addr1, val, 4); 233423326164SRichard Henderson break; 233523326164SRichard Henderson case 2: 23361c213d19Sbellard /* 16 bit write access */ 2337c27004ecSbellard val = lduw_p(buf); 23385c8a00ceSPaolo Bonzini error |= io_mem_write(mr, addr1, val, 2); 233923326164SRichard Henderson break; 234023326164SRichard Henderson case 1: 23411c213d19Sbellard /* 8 bit write access */ 2342c27004ecSbellard val = ldub_p(buf); 23435c8a00ceSPaolo Bonzini error |= io_mem_write(mr, addr1, val, 1); 234423326164SRichard Henderson break; 234523326164SRichard Henderson default: 234623326164SRichard Henderson abort(); 234713eb76e0Sbellard } 23482bbfa05dSPaolo Bonzini } else { 23495c8a00ceSPaolo Bonzini addr1 += memory_region_get_ram_addr(mr); 235013eb76e0Sbellard /* RAM case */ 23515579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 235213eb76e0Sbellard memcpy(ptr, buf, l); 235351d7a9ebSAnthony PERARD invalidate_and_set_dirty(addr1, l); 23543a7d929eSbellard } 235513eb76e0Sbellard } else { 23565c8a00ceSPaolo Bonzini if (!memory_access_is_direct(mr, is_write)) { 235713eb76e0Sbellard /* I/O case */ 23585c8a00ceSPaolo Bonzini l = memory_access_size(mr, l, addr1); 235923326164SRichard Henderson switch (l) { 236023326164SRichard Henderson case 8: 236123326164SRichard Henderson /* 64 bit read access */ 236223326164SRichard Henderson error |= io_mem_read(mr, addr1, &val, 8); 236323326164SRichard Henderson stq_p(buf, val); 236423326164SRichard Henderson break; 236523326164SRichard Henderson case 4: 236613eb76e0Sbellard /* 32 bit read access */ 23675c8a00ceSPaolo Bonzini error |= io_mem_read(mr, addr1, &val, 4); 2368c27004ecSbellard stl_p(buf, val); 236923326164SRichard Henderson break; 237023326164SRichard Henderson case 2: 237113eb76e0Sbellard /* 16 bit read access */ 23725c8a00ceSPaolo Bonzini error |= io_mem_read(mr, addr1, &val, 2); 2373c27004ecSbellard stw_p(buf, val); 237423326164SRichard Henderson break; 237523326164SRichard Henderson case 1: 23761c213d19Sbellard /* 8 bit read access */ 23775c8a00ceSPaolo Bonzini error |= io_mem_read(mr, addr1, &val, 1); 2378c27004ecSbellard stb_p(buf, val); 237923326164SRichard Henderson break; 238023326164SRichard Henderson default: 238123326164SRichard Henderson abort(); 238213eb76e0Sbellard } 238313eb76e0Sbellard } else { 238413eb76e0Sbellard /* RAM case */ 23855c8a00ceSPaolo Bonzini ptr = qemu_get_ram_ptr(mr->ram_addr + addr1); 2386f3705d53SAvi Kivity memcpy(buf, ptr, l); 238713eb76e0Sbellard } 238813eb76e0Sbellard } 238913eb76e0Sbellard len -= l; 239013eb76e0Sbellard buf += l; 239113eb76e0Sbellard addr += l; 239213eb76e0Sbellard } 2393fd8aaa76SPaolo Bonzini 2394fd8aaa76SPaolo Bonzini return error; 239513eb76e0Sbellard } 23968df1cd07Sbellard 2397fd8aaa76SPaolo Bonzini bool address_space_write(AddressSpace *as, hwaddr addr, 2398ac1970fbSAvi Kivity const uint8_t *buf, int len) 2399ac1970fbSAvi Kivity { 2400fd8aaa76SPaolo Bonzini return address_space_rw(as, addr, (uint8_t *)buf, len, true); 2401ac1970fbSAvi Kivity } 2402ac1970fbSAvi Kivity 2403fd8aaa76SPaolo Bonzini bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) 2404ac1970fbSAvi Kivity { 2405fd8aaa76SPaolo Bonzini return address_space_rw(as, addr, buf, len, false); 2406ac1970fbSAvi Kivity } 2407ac1970fbSAvi Kivity 2408ac1970fbSAvi Kivity 2409a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, 2410ac1970fbSAvi Kivity int len, int is_write) 2411ac1970fbSAvi Kivity { 2412fd8aaa76SPaolo Bonzini address_space_rw(&address_space_memory, addr, buf, len, is_write); 2413ac1970fbSAvi Kivity } 2414ac1970fbSAvi Kivity 2415582b55a9SAlexander Graf enum write_rom_type { 2416582b55a9SAlexander Graf WRITE_DATA, 2417582b55a9SAlexander Graf FLUSH_CACHE, 2418582b55a9SAlexander Graf }; 2419582b55a9SAlexander Graf 24202a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as, 2421582b55a9SAlexander Graf hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type) 2422d0ecd2aaSbellard { 2423149f54b5SPaolo Bonzini hwaddr l; 2424d0ecd2aaSbellard uint8_t *ptr; 2425149f54b5SPaolo Bonzini hwaddr addr1; 24265c8a00ceSPaolo Bonzini MemoryRegion *mr; 2427d0ecd2aaSbellard 2428d0ecd2aaSbellard while (len > 0) { 2429d0ecd2aaSbellard l = len; 24302a221651SEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, true); 2431d0ecd2aaSbellard 24325c8a00ceSPaolo Bonzini if (!(memory_region_is_ram(mr) || 24335c8a00ceSPaolo Bonzini memory_region_is_romd(mr))) { 2434d0ecd2aaSbellard /* do nothing */ 2435d0ecd2aaSbellard } else { 24365c8a00ceSPaolo Bonzini addr1 += memory_region_get_ram_addr(mr); 2437d0ecd2aaSbellard /* ROM/RAM case */ 24385579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 2439582b55a9SAlexander Graf switch (type) { 2440582b55a9SAlexander Graf case WRITE_DATA: 2441d0ecd2aaSbellard memcpy(ptr, buf, l); 244251d7a9ebSAnthony PERARD invalidate_and_set_dirty(addr1, l); 2443582b55a9SAlexander Graf break; 2444582b55a9SAlexander Graf case FLUSH_CACHE: 2445582b55a9SAlexander Graf flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l); 2446582b55a9SAlexander Graf break; 2447582b55a9SAlexander Graf } 2448d0ecd2aaSbellard } 2449d0ecd2aaSbellard len -= l; 2450d0ecd2aaSbellard buf += l; 2451d0ecd2aaSbellard addr += l; 2452d0ecd2aaSbellard } 2453d0ecd2aaSbellard } 2454d0ecd2aaSbellard 2455582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */ 24562a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr, 2457582b55a9SAlexander Graf const uint8_t *buf, int len) 2458582b55a9SAlexander Graf { 24592a221651SEdgar E. Iglesias cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA); 2460582b55a9SAlexander Graf } 2461582b55a9SAlexander Graf 2462582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len) 2463582b55a9SAlexander Graf { 2464582b55a9SAlexander Graf /* 2465582b55a9SAlexander Graf * This function should do the same thing as an icache flush that was 2466582b55a9SAlexander Graf * triggered from within the guest. For TCG we are always cache coherent, 2467582b55a9SAlexander Graf * so there is no need to flush anything. For KVM / Xen we need to flush 2468582b55a9SAlexander Graf * the host's instruction cache at least. 2469582b55a9SAlexander Graf */ 2470582b55a9SAlexander Graf if (tcg_enabled()) { 2471582b55a9SAlexander Graf return; 2472582b55a9SAlexander Graf } 2473582b55a9SAlexander Graf 24742a221651SEdgar E. Iglesias cpu_physical_memory_write_rom_internal(&address_space_memory, 24752a221651SEdgar E. Iglesias start, NULL, len, FLUSH_CACHE); 2476582b55a9SAlexander Graf } 2477582b55a9SAlexander Graf 24786d16c2f8Saliguori typedef struct { 2479d3e71559SPaolo Bonzini MemoryRegion *mr; 24806d16c2f8Saliguori void *buffer; 2481a8170e5eSAvi Kivity hwaddr addr; 2482a8170e5eSAvi Kivity hwaddr len; 24836d16c2f8Saliguori } BounceBuffer; 24846d16c2f8Saliguori 24856d16c2f8Saliguori static BounceBuffer bounce; 24866d16c2f8Saliguori 2487ba223c29Saliguori typedef struct MapClient { 2488ba223c29Saliguori void *opaque; 2489ba223c29Saliguori void (*callback)(void *opaque); 249072cf2d4fSBlue Swirl QLIST_ENTRY(MapClient) link; 2491ba223c29Saliguori } MapClient; 2492ba223c29Saliguori 249372cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list 249472cf2d4fSBlue Swirl = QLIST_HEAD_INITIALIZER(map_client_list); 2495ba223c29Saliguori 2496ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) 2497ba223c29Saliguori { 24987267c094SAnthony Liguori MapClient *client = g_malloc(sizeof(*client)); 2499ba223c29Saliguori 2500ba223c29Saliguori client->opaque = opaque; 2501ba223c29Saliguori client->callback = callback; 250272cf2d4fSBlue Swirl QLIST_INSERT_HEAD(&map_client_list, client, link); 2503ba223c29Saliguori return client; 2504ba223c29Saliguori } 2505ba223c29Saliguori 25068b9c99d9SBlue Swirl static void cpu_unregister_map_client(void *_client) 2507ba223c29Saliguori { 2508ba223c29Saliguori MapClient *client = (MapClient *)_client; 2509ba223c29Saliguori 251072cf2d4fSBlue Swirl QLIST_REMOVE(client, link); 25117267c094SAnthony Liguori g_free(client); 2512ba223c29Saliguori } 2513ba223c29Saliguori 2514ba223c29Saliguori static void cpu_notify_map_clients(void) 2515ba223c29Saliguori { 2516ba223c29Saliguori MapClient *client; 2517ba223c29Saliguori 251872cf2d4fSBlue Swirl while (!QLIST_EMPTY(&map_client_list)) { 251972cf2d4fSBlue Swirl client = QLIST_FIRST(&map_client_list); 2520ba223c29Saliguori client->callback(client->opaque); 252134d5e948SIsaku Yamahata cpu_unregister_map_client(client); 2522ba223c29Saliguori } 2523ba223c29Saliguori } 2524ba223c29Saliguori 252551644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) 252651644ab7SPaolo Bonzini { 25275c8a00ceSPaolo Bonzini MemoryRegion *mr; 252851644ab7SPaolo Bonzini hwaddr l, xlat; 252951644ab7SPaolo Bonzini 253051644ab7SPaolo Bonzini while (len > 0) { 253151644ab7SPaolo Bonzini l = len; 25325c8a00ceSPaolo Bonzini mr = address_space_translate(as, addr, &xlat, &l, is_write); 25335c8a00ceSPaolo Bonzini if (!memory_access_is_direct(mr, is_write)) { 25345c8a00ceSPaolo Bonzini l = memory_access_size(mr, l, addr); 25355c8a00ceSPaolo Bonzini if (!memory_region_access_valid(mr, xlat, l, is_write)) { 253651644ab7SPaolo Bonzini return false; 253751644ab7SPaolo Bonzini } 253851644ab7SPaolo Bonzini } 253951644ab7SPaolo Bonzini 254051644ab7SPaolo Bonzini len -= l; 254151644ab7SPaolo Bonzini addr += l; 254251644ab7SPaolo Bonzini } 254351644ab7SPaolo Bonzini return true; 254451644ab7SPaolo Bonzini } 254551644ab7SPaolo Bonzini 25466d16c2f8Saliguori /* Map a physical memory region into a host virtual address. 25476d16c2f8Saliguori * May map a subset of the requested range, given by and returned in *plen. 25486d16c2f8Saliguori * May return NULL if resources needed to perform the mapping are exhausted. 25496d16c2f8Saliguori * Use only for reads OR writes - not for read-modify-write operations. 2550ba223c29Saliguori * Use cpu_register_map_client() to know when retrying the map operation is 2551ba223c29Saliguori * likely to succeed. 25526d16c2f8Saliguori */ 2553ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as, 2554a8170e5eSAvi Kivity hwaddr addr, 2555a8170e5eSAvi Kivity hwaddr *plen, 2556ac1970fbSAvi Kivity bool is_write) 25576d16c2f8Saliguori { 2558a8170e5eSAvi Kivity hwaddr len = *plen; 2559e3127ae0SPaolo Bonzini hwaddr done = 0; 2560e3127ae0SPaolo Bonzini hwaddr l, xlat, base; 2561e3127ae0SPaolo Bonzini MemoryRegion *mr, *this_mr; 2562e3127ae0SPaolo Bonzini ram_addr_t raddr; 25636d16c2f8Saliguori 2564e3127ae0SPaolo Bonzini if (len == 0) { 2565e3127ae0SPaolo Bonzini return NULL; 2566e3127ae0SPaolo Bonzini } 2567e3127ae0SPaolo Bonzini 25686d16c2f8Saliguori l = len; 25695c8a00ceSPaolo Bonzini mr = address_space_translate(as, addr, &xlat, &l, is_write); 25705c8a00ceSPaolo Bonzini if (!memory_access_is_direct(mr, is_write)) { 2571e3127ae0SPaolo Bonzini if (bounce.buffer) { 2572e3127ae0SPaolo Bonzini return NULL; 25736d16c2f8Saliguori } 2574e85d9db5SKevin Wolf /* Avoid unbounded allocations */ 2575e85d9db5SKevin Wolf l = MIN(l, TARGET_PAGE_SIZE); 2576e85d9db5SKevin Wolf bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); 25776d16c2f8Saliguori bounce.addr = addr; 25786d16c2f8Saliguori bounce.len = l; 2579d3e71559SPaolo Bonzini 2580d3e71559SPaolo Bonzini memory_region_ref(mr); 2581d3e71559SPaolo Bonzini bounce.mr = mr; 25826d16c2f8Saliguori if (!is_write) { 2583ac1970fbSAvi Kivity address_space_read(as, addr, bounce.buffer, l); 25846d16c2f8Saliguori } 258538bee5dcSStefano Stabellini 258638bee5dcSStefano Stabellini *plen = l; 258738bee5dcSStefano Stabellini return bounce.buffer; 25886d16c2f8Saliguori } 2589e3127ae0SPaolo Bonzini 2590e3127ae0SPaolo Bonzini base = xlat; 2591e3127ae0SPaolo Bonzini raddr = memory_region_get_ram_addr(mr); 2592e3127ae0SPaolo Bonzini 2593e3127ae0SPaolo Bonzini for (;;) { 2594e3127ae0SPaolo Bonzini len -= l; 2595e3127ae0SPaolo Bonzini addr += l; 2596e3127ae0SPaolo Bonzini done += l; 2597e3127ae0SPaolo Bonzini if (len == 0) { 2598e3127ae0SPaolo Bonzini break; 2599e3127ae0SPaolo Bonzini } 2600e3127ae0SPaolo Bonzini 2601e3127ae0SPaolo Bonzini l = len; 2602e3127ae0SPaolo Bonzini this_mr = address_space_translate(as, addr, &xlat, &l, is_write); 2603e3127ae0SPaolo Bonzini if (this_mr != mr || xlat != base + done) { 2604149f54b5SPaolo Bonzini break; 2605149f54b5SPaolo Bonzini } 26068ab934f9SStefano Stabellini } 26076d16c2f8Saliguori 2608d3e71559SPaolo Bonzini memory_region_ref(mr); 2609e3127ae0SPaolo Bonzini *plen = done; 2610e3127ae0SPaolo Bonzini return qemu_ram_ptr_length(raddr + base, plen); 26116d16c2f8Saliguori } 26126d16c2f8Saliguori 2613ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map(). 26146d16c2f8Saliguori * Will also mark the memory as dirty if is_write == 1. access_len gives 26156d16c2f8Saliguori * the amount of memory that was actually read or written by the caller. 26166d16c2f8Saliguori */ 2617a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 2618a8170e5eSAvi Kivity int is_write, hwaddr access_len) 26196d16c2f8Saliguori { 26206d16c2f8Saliguori if (buffer != bounce.buffer) { 2621d3e71559SPaolo Bonzini MemoryRegion *mr; 26227443b437SPaolo Bonzini ram_addr_t addr1; 2623d3e71559SPaolo Bonzini 2624d3e71559SPaolo Bonzini mr = qemu_ram_addr_from_host(buffer, &addr1); 26251b5ec234SPaolo Bonzini assert(mr != NULL); 2626d3e71559SPaolo Bonzini if (is_write) { 26276886867eSPaolo Bonzini invalidate_and_set_dirty(addr1, access_len); 26286d16c2f8Saliguori } 2629868bb33fSJan Kiszka if (xen_enabled()) { 2630e41d7c69SJan Kiszka xen_invalidate_map_cache_entry(buffer); 2631050a0ddfSAnthony PERARD } 2632d3e71559SPaolo Bonzini memory_region_unref(mr); 26336d16c2f8Saliguori return; 26346d16c2f8Saliguori } 26356d16c2f8Saliguori if (is_write) { 2636ac1970fbSAvi Kivity address_space_write(as, bounce.addr, bounce.buffer, access_len); 26376d16c2f8Saliguori } 2638f8a83245SHerve Poussineau qemu_vfree(bounce.buffer); 26396d16c2f8Saliguori bounce.buffer = NULL; 2640d3e71559SPaolo Bonzini memory_region_unref(bounce.mr); 2641ba223c29Saliguori cpu_notify_map_clients(); 26426d16c2f8Saliguori } 2643d0ecd2aaSbellard 2644a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr, 2645a8170e5eSAvi Kivity hwaddr *plen, 2646ac1970fbSAvi Kivity int is_write) 2647ac1970fbSAvi Kivity { 2648ac1970fbSAvi Kivity return address_space_map(&address_space_memory, addr, plen, is_write); 2649ac1970fbSAvi Kivity } 2650ac1970fbSAvi Kivity 2651a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len, 2652a8170e5eSAvi Kivity int is_write, hwaddr access_len) 2653ac1970fbSAvi Kivity { 2654ac1970fbSAvi Kivity return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); 2655ac1970fbSAvi Kivity } 2656ac1970fbSAvi Kivity 26578df1cd07Sbellard /* warning: addr must be aligned */ 2658fdfba1a2SEdgar E. Iglesias static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr, 26591e78bcc1SAlexander Graf enum device_endian endian) 26608df1cd07Sbellard { 26618df1cd07Sbellard uint8_t *ptr; 2662791af8c8SPaolo Bonzini uint64_t val; 26635c8a00ceSPaolo Bonzini MemoryRegion *mr; 2664149f54b5SPaolo Bonzini hwaddr l = 4; 2665149f54b5SPaolo Bonzini hwaddr addr1; 26668df1cd07Sbellard 2667fdfba1a2SEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, false); 26685c8a00ceSPaolo Bonzini if (l < 4 || !memory_access_is_direct(mr, false)) { 26698df1cd07Sbellard /* I/O case */ 26705c8a00ceSPaolo Bonzini io_mem_read(mr, addr1, &val, 4); 26711e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 26721e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 26731e78bcc1SAlexander Graf val = bswap32(val); 26741e78bcc1SAlexander Graf } 26751e78bcc1SAlexander Graf #else 26761e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 26771e78bcc1SAlexander Graf val = bswap32(val); 26781e78bcc1SAlexander Graf } 26791e78bcc1SAlexander Graf #endif 26808df1cd07Sbellard } else { 26818df1cd07Sbellard /* RAM case */ 26825c8a00ceSPaolo Bonzini ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) 268306ef3525SAvi Kivity & TARGET_PAGE_MASK) 2684149f54b5SPaolo Bonzini + addr1); 26851e78bcc1SAlexander Graf switch (endian) { 26861e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 26871e78bcc1SAlexander Graf val = ldl_le_p(ptr); 26881e78bcc1SAlexander Graf break; 26891e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 26901e78bcc1SAlexander Graf val = ldl_be_p(ptr); 26911e78bcc1SAlexander Graf break; 26921e78bcc1SAlexander Graf default: 26938df1cd07Sbellard val = ldl_p(ptr); 26941e78bcc1SAlexander Graf break; 26951e78bcc1SAlexander Graf } 26968df1cd07Sbellard } 26978df1cd07Sbellard return val; 26988df1cd07Sbellard } 26998df1cd07Sbellard 2700fdfba1a2SEdgar E. Iglesias uint32_t ldl_phys(AddressSpace *as, hwaddr addr) 27011e78bcc1SAlexander Graf { 2702fdfba1a2SEdgar E. Iglesias return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); 27031e78bcc1SAlexander Graf } 27041e78bcc1SAlexander Graf 2705fdfba1a2SEdgar E. Iglesias uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr) 27061e78bcc1SAlexander Graf { 2707fdfba1a2SEdgar E. Iglesias return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); 27081e78bcc1SAlexander Graf } 27091e78bcc1SAlexander Graf 2710fdfba1a2SEdgar E. Iglesias uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr) 27111e78bcc1SAlexander Graf { 2712fdfba1a2SEdgar E. Iglesias return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN); 27131e78bcc1SAlexander Graf } 27141e78bcc1SAlexander Graf 271584b7b8e7Sbellard /* warning: addr must be aligned */ 27162c17449bSEdgar E. Iglesias static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr, 27171e78bcc1SAlexander Graf enum device_endian endian) 271884b7b8e7Sbellard { 271984b7b8e7Sbellard uint8_t *ptr; 272084b7b8e7Sbellard uint64_t val; 27215c8a00ceSPaolo Bonzini MemoryRegion *mr; 2722149f54b5SPaolo Bonzini hwaddr l = 8; 2723149f54b5SPaolo Bonzini hwaddr addr1; 272484b7b8e7Sbellard 27252c17449bSEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, 2726149f54b5SPaolo Bonzini false); 27275c8a00ceSPaolo Bonzini if (l < 8 || !memory_access_is_direct(mr, false)) { 272884b7b8e7Sbellard /* I/O case */ 27295c8a00ceSPaolo Bonzini io_mem_read(mr, addr1, &val, 8); 2730968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN) 2731968a5627SPaolo Bonzini if (endian == DEVICE_LITTLE_ENDIAN) { 2732968a5627SPaolo Bonzini val = bswap64(val); 2733968a5627SPaolo Bonzini } 2734968a5627SPaolo Bonzini #else 2735968a5627SPaolo Bonzini if (endian == DEVICE_BIG_ENDIAN) { 2736968a5627SPaolo Bonzini val = bswap64(val); 2737968a5627SPaolo Bonzini } 2738968a5627SPaolo Bonzini #endif 273984b7b8e7Sbellard } else { 274084b7b8e7Sbellard /* RAM case */ 27415c8a00ceSPaolo Bonzini ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) 274206ef3525SAvi Kivity & TARGET_PAGE_MASK) 2743149f54b5SPaolo Bonzini + addr1); 27441e78bcc1SAlexander Graf switch (endian) { 27451e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 27461e78bcc1SAlexander Graf val = ldq_le_p(ptr); 27471e78bcc1SAlexander Graf break; 27481e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 27491e78bcc1SAlexander Graf val = ldq_be_p(ptr); 27501e78bcc1SAlexander Graf break; 27511e78bcc1SAlexander Graf default: 275284b7b8e7Sbellard val = ldq_p(ptr); 27531e78bcc1SAlexander Graf break; 27541e78bcc1SAlexander Graf } 275584b7b8e7Sbellard } 275684b7b8e7Sbellard return val; 275784b7b8e7Sbellard } 275884b7b8e7Sbellard 27592c17449bSEdgar E. Iglesias uint64_t ldq_phys(AddressSpace *as, hwaddr addr) 27601e78bcc1SAlexander Graf { 27612c17449bSEdgar E. Iglesias return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); 27621e78bcc1SAlexander Graf } 27631e78bcc1SAlexander Graf 27642c17449bSEdgar E. Iglesias uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr) 27651e78bcc1SAlexander Graf { 27662c17449bSEdgar E. Iglesias return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); 27671e78bcc1SAlexander Graf } 27681e78bcc1SAlexander Graf 27692c17449bSEdgar E. Iglesias uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr) 27701e78bcc1SAlexander Graf { 27712c17449bSEdgar E. Iglesias return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN); 27721e78bcc1SAlexander Graf } 27731e78bcc1SAlexander Graf 2774aab33094Sbellard /* XXX: optimize */ 27752c17449bSEdgar E. Iglesias uint32_t ldub_phys(AddressSpace *as, hwaddr addr) 2776aab33094Sbellard { 2777aab33094Sbellard uint8_t val; 27782c17449bSEdgar E. Iglesias address_space_rw(as, addr, &val, 1, 0); 2779aab33094Sbellard return val; 2780aab33094Sbellard } 2781aab33094Sbellard 2782733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */ 278341701aa4SEdgar E. Iglesias static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr, 27841e78bcc1SAlexander Graf enum device_endian endian) 2785aab33094Sbellard { 2786733f0b02SMichael S. Tsirkin uint8_t *ptr; 2787733f0b02SMichael S. Tsirkin uint64_t val; 27885c8a00ceSPaolo Bonzini MemoryRegion *mr; 2789149f54b5SPaolo Bonzini hwaddr l = 2; 2790149f54b5SPaolo Bonzini hwaddr addr1; 2791733f0b02SMichael S. Tsirkin 279241701aa4SEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, 2793149f54b5SPaolo Bonzini false); 27945c8a00ceSPaolo Bonzini if (l < 2 || !memory_access_is_direct(mr, false)) { 2795733f0b02SMichael S. Tsirkin /* I/O case */ 27965c8a00ceSPaolo Bonzini io_mem_read(mr, addr1, &val, 2); 27971e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 27981e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 27991e78bcc1SAlexander Graf val = bswap16(val); 28001e78bcc1SAlexander Graf } 28011e78bcc1SAlexander Graf #else 28021e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 28031e78bcc1SAlexander Graf val = bswap16(val); 28041e78bcc1SAlexander Graf } 28051e78bcc1SAlexander Graf #endif 2806733f0b02SMichael S. Tsirkin } else { 2807733f0b02SMichael S. Tsirkin /* RAM case */ 28085c8a00ceSPaolo Bonzini ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) 280906ef3525SAvi Kivity & TARGET_PAGE_MASK) 2810149f54b5SPaolo Bonzini + addr1); 28111e78bcc1SAlexander Graf switch (endian) { 28121e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 28131e78bcc1SAlexander Graf val = lduw_le_p(ptr); 28141e78bcc1SAlexander Graf break; 28151e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 28161e78bcc1SAlexander Graf val = lduw_be_p(ptr); 28171e78bcc1SAlexander Graf break; 28181e78bcc1SAlexander Graf default: 2819733f0b02SMichael S. Tsirkin val = lduw_p(ptr); 28201e78bcc1SAlexander Graf break; 28211e78bcc1SAlexander Graf } 2822733f0b02SMichael S. Tsirkin } 2823733f0b02SMichael S. Tsirkin return val; 2824aab33094Sbellard } 2825aab33094Sbellard 282641701aa4SEdgar E. Iglesias uint32_t lduw_phys(AddressSpace *as, hwaddr addr) 28271e78bcc1SAlexander Graf { 282841701aa4SEdgar E. Iglesias return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); 28291e78bcc1SAlexander Graf } 28301e78bcc1SAlexander Graf 283141701aa4SEdgar E. Iglesias uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr) 28321e78bcc1SAlexander Graf { 283341701aa4SEdgar E. Iglesias return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); 28341e78bcc1SAlexander Graf } 28351e78bcc1SAlexander Graf 283641701aa4SEdgar E. Iglesias uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr) 28371e78bcc1SAlexander Graf { 283841701aa4SEdgar E. Iglesias return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN); 28391e78bcc1SAlexander Graf } 28401e78bcc1SAlexander Graf 28418df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty 28428df1cd07Sbellard and the code inside is not invalidated. It is useful if the dirty 28438df1cd07Sbellard bits are used to track modified PTEs */ 28442198a121SEdgar E. Iglesias void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) 28458df1cd07Sbellard { 28468df1cd07Sbellard uint8_t *ptr; 28475c8a00ceSPaolo Bonzini MemoryRegion *mr; 2848149f54b5SPaolo Bonzini hwaddr l = 4; 2849149f54b5SPaolo Bonzini hwaddr addr1; 28508df1cd07Sbellard 28512198a121SEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, 2852149f54b5SPaolo Bonzini true); 28535c8a00ceSPaolo Bonzini if (l < 4 || !memory_access_is_direct(mr, true)) { 28545c8a00ceSPaolo Bonzini io_mem_write(mr, addr1, val, 4); 28558df1cd07Sbellard } else { 28565c8a00ceSPaolo Bonzini addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; 28575579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 28588df1cd07Sbellard stl_p(ptr, val); 285974576198Saliguori 286074576198Saliguori if (unlikely(in_migration)) { 2861a2cd8c85SJuan Quintela if (cpu_physical_memory_is_clean(addr1)) { 286274576198Saliguori /* invalidate code */ 286374576198Saliguori tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); 286474576198Saliguori /* set dirty bit */ 28656886867eSPaolo Bonzini cpu_physical_memory_set_dirty_range_nocode(addr1, 4); 286674576198Saliguori } 286774576198Saliguori } 28688df1cd07Sbellard } 28698df1cd07Sbellard } 28708df1cd07Sbellard 28718df1cd07Sbellard /* warning: addr must be aligned */ 2872ab1da857SEdgar E. Iglesias static inline void stl_phys_internal(AddressSpace *as, 2873ab1da857SEdgar E. Iglesias hwaddr addr, uint32_t val, 28741e78bcc1SAlexander Graf enum device_endian endian) 28758df1cd07Sbellard { 28768df1cd07Sbellard uint8_t *ptr; 28775c8a00ceSPaolo Bonzini MemoryRegion *mr; 2878149f54b5SPaolo Bonzini hwaddr l = 4; 2879149f54b5SPaolo Bonzini hwaddr addr1; 28808df1cd07Sbellard 2881ab1da857SEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, 2882149f54b5SPaolo Bonzini true); 28835c8a00ceSPaolo Bonzini if (l < 4 || !memory_access_is_direct(mr, true)) { 28841e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 28851e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 28861e78bcc1SAlexander Graf val = bswap32(val); 28871e78bcc1SAlexander Graf } 28881e78bcc1SAlexander Graf #else 28891e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 28901e78bcc1SAlexander Graf val = bswap32(val); 28911e78bcc1SAlexander Graf } 28921e78bcc1SAlexander Graf #endif 28935c8a00ceSPaolo Bonzini io_mem_write(mr, addr1, val, 4); 28948df1cd07Sbellard } else { 28958df1cd07Sbellard /* RAM case */ 28965c8a00ceSPaolo Bonzini addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; 28975579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 28981e78bcc1SAlexander Graf switch (endian) { 28991e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 29001e78bcc1SAlexander Graf stl_le_p(ptr, val); 29011e78bcc1SAlexander Graf break; 29021e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 29031e78bcc1SAlexander Graf stl_be_p(ptr, val); 29041e78bcc1SAlexander Graf break; 29051e78bcc1SAlexander Graf default: 29068df1cd07Sbellard stl_p(ptr, val); 29071e78bcc1SAlexander Graf break; 29081e78bcc1SAlexander Graf } 290951d7a9ebSAnthony PERARD invalidate_and_set_dirty(addr1, 4); 29108df1cd07Sbellard } 29113a7d929eSbellard } 29128df1cd07Sbellard 2913ab1da857SEdgar E. Iglesias void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29141e78bcc1SAlexander Graf { 2915ab1da857SEdgar E. Iglesias stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN); 29161e78bcc1SAlexander Graf } 29171e78bcc1SAlexander Graf 2918ab1da857SEdgar E. Iglesias void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29191e78bcc1SAlexander Graf { 2920ab1da857SEdgar E. Iglesias stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN); 29211e78bcc1SAlexander Graf } 29221e78bcc1SAlexander Graf 2923ab1da857SEdgar E. Iglesias void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29241e78bcc1SAlexander Graf { 2925ab1da857SEdgar E. Iglesias stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN); 29261e78bcc1SAlexander Graf } 29271e78bcc1SAlexander Graf 2928aab33094Sbellard /* XXX: optimize */ 2929db3be60dSEdgar E. Iglesias void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val) 2930aab33094Sbellard { 2931aab33094Sbellard uint8_t v = val; 2932db3be60dSEdgar E. Iglesias address_space_rw(as, addr, &v, 1, 1); 2933aab33094Sbellard } 2934aab33094Sbellard 2935733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */ 29365ce5944dSEdgar E. Iglesias static inline void stw_phys_internal(AddressSpace *as, 29375ce5944dSEdgar E. Iglesias hwaddr addr, uint32_t val, 29381e78bcc1SAlexander Graf enum device_endian endian) 2939aab33094Sbellard { 2940733f0b02SMichael S. Tsirkin uint8_t *ptr; 29415c8a00ceSPaolo Bonzini MemoryRegion *mr; 2942149f54b5SPaolo Bonzini hwaddr l = 2; 2943149f54b5SPaolo Bonzini hwaddr addr1; 2944733f0b02SMichael S. Tsirkin 29455ce5944dSEdgar E. Iglesias mr = address_space_translate(as, addr, &addr1, &l, true); 29465c8a00ceSPaolo Bonzini if (l < 2 || !memory_access_is_direct(mr, true)) { 29471e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 29481e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 29491e78bcc1SAlexander Graf val = bswap16(val); 29501e78bcc1SAlexander Graf } 29511e78bcc1SAlexander Graf #else 29521e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 29531e78bcc1SAlexander Graf val = bswap16(val); 29541e78bcc1SAlexander Graf } 29551e78bcc1SAlexander Graf #endif 29565c8a00ceSPaolo Bonzini io_mem_write(mr, addr1, val, 2); 2957733f0b02SMichael S. Tsirkin } else { 2958733f0b02SMichael S. Tsirkin /* RAM case */ 29595c8a00ceSPaolo Bonzini addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; 2960733f0b02SMichael S. Tsirkin ptr = qemu_get_ram_ptr(addr1); 29611e78bcc1SAlexander Graf switch (endian) { 29621e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 29631e78bcc1SAlexander Graf stw_le_p(ptr, val); 29641e78bcc1SAlexander Graf break; 29651e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 29661e78bcc1SAlexander Graf stw_be_p(ptr, val); 29671e78bcc1SAlexander Graf break; 29681e78bcc1SAlexander Graf default: 2969733f0b02SMichael S. Tsirkin stw_p(ptr, val); 29701e78bcc1SAlexander Graf break; 29711e78bcc1SAlexander Graf } 297251d7a9ebSAnthony PERARD invalidate_and_set_dirty(addr1, 2); 2973733f0b02SMichael S. Tsirkin } 2974aab33094Sbellard } 2975aab33094Sbellard 29765ce5944dSEdgar E. Iglesias void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29771e78bcc1SAlexander Graf { 29785ce5944dSEdgar E. Iglesias stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN); 29791e78bcc1SAlexander Graf } 29801e78bcc1SAlexander Graf 29815ce5944dSEdgar E. Iglesias void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29821e78bcc1SAlexander Graf { 29835ce5944dSEdgar E. Iglesias stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN); 29841e78bcc1SAlexander Graf } 29851e78bcc1SAlexander Graf 29865ce5944dSEdgar E. Iglesias void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val) 29871e78bcc1SAlexander Graf { 29885ce5944dSEdgar E. Iglesias stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN); 29891e78bcc1SAlexander Graf } 29901e78bcc1SAlexander Graf 2991aab33094Sbellard /* XXX: optimize */ 2992f606604fSEdgar E. Iglesias void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val) 2993aab33094Sbellard { 2994aab33094Sbellard val = tswap64(val); 2995f606604fSEdgar E. Iglesias address_space_rw(as, addr, (void *) &val, 8, 1); 2996aab33094Sbellard } 2997aab33094Sbellard 2998f606604fSEdgar E. Iglesias void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val) 29991e78bcc1SAlexander Graf { 30001e78bcc1SAlexander Graf val = cpu_to_le64(val); 3001f606604fSEdgar E. Iglesias address_space_rw(as, addr, (void *) &val, 8, 1); 30021e78bcc1SAlexander Graf } 30031e78bcc1SAlexander Graf 3004f606604fSEdgar E. Iglesias void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val) 30051e78bcc1SAlexander Graf { 30061e78bcc1SAlexander Graf val = cpu_to_be64(val); 3007f606604fSEdgar E. Iglesias address_space_rw(as, addr, (void *) &val, 8, 1); 30081e78bcc1SAlexander Graf } 30091e78bcc1SAlexander Graf 30105e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */ 3011f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, 3012b448f2f3Sbellard uint8_t *buf, int len, int is_write) 301313eb76e0Sbellard { 301413eb76e0Sbellard int l; 3015a8170e5eSAvi Kivity hwaddr phys_addr; 30169b3c35e0Sj_mayer target_ulong page; 301713eb76e0Sbellard 301813eb76e0Sbellard while (len > 0) { 301913eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 3020f17ec444SAndreas Färber phys_addr = cpu_get_phys_page_debug(cpu, page); 302113eb76e0Sbellard /* if no physical page mapped, return an error */ 302213eb76e0Sbellard if (phys_addr == -1) 302313eb76e0Sbellard return -1; 302413eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 302513eb76e0Sbellard if (l > len) 302613eb76e0Sbellard l = len; 30275e2972fdSaliguori phys_addr += (addr & ~TARGET_PAGE_MASK); 30282e38847bSEdgar E. Iglesias if (is_write) { 30292e38847bSEdgar E. Iglesias cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l); 30302e38847bSEdgar E. Iglesias } else { 30312e38847bSEdgar E. Iglesias address_space_rw(cpu->as, phys_addr, buf, l, 0); 30322e38847bSEdgar E. Iglesias } 303313eb76e0Sbellard len -= l; 303413eb76e0Sbellard buf += l; 303513eb76e0Sbellard addr += l; 303613eb76e0Sbellard } 303713eb76e0Sbellard return 0; 303813eb76e0Sbellard } 3039a68fe89cSPaul Brook #endif 304013eb76e0Sbellard 30418e4a424bSBlue Swirl /* 30428e4a424bSBlue Swirl * A helper function for the _utterly broken_ virtio device model to find out if 30438e4a424bSBlue Swirl * it's running on a big endian machine. Don't do this at home kids! 30448e4a424bSBlue Swirl */ 304598ed8ecfSGreg Kurz bool target_words_bigendian(void); 304698ed8ecfSGreg Kurz bool target_words_bigendian(void) 30478e4a424bSBlue Swirl { 30488e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN) 30498e4a424bSBlue Swirl return true; 30508e4a424bSBlue Swirl #else 30518e4a424bSBlue Swirl return false; 30528e4a424bSBlue Swirl #endif 30538e4a424bSBlue Swirl } 30548e4a424bSBlue Swirl 305576f35538SWen Congyang #ifndef CONFIG_USER_ONLY 3056a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr) 305776f35538SWen Congyang { 30585c8a00ceSPaolo Bonzini MemoryRegion*mr; 3059149f54b5SPaolo Bonzini hwaddr l = 1; 306076f35538SWen Congyang 30615c8a00ceSPaolo Bonzini mr = address_space_translate(&address_space_memory, 3062149f54b5SPaolo Bonzini phys_addr, &phys_addr, &l, false); 306376f35538SWen Congyang 30645c8a00ceSPaolo Bonzini return !(memory_region_is_ram(mr) || 30655c8a00ceSPaolo Bonzini memory_region_is_romd(mr)); 306676f35538SWen Congyang } 3067bd2fa51fSMichael R. Hines 3068bd2fa51fSMichael R. Hines void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) 3069bd2fa51fSMichael R. Hines { 3070bd2fa51fSMichael R. Hines RAMBlock *block; 3071bd2fa51fSMichael R. Hines 30720dc3f44aSMike Day rcu_read_lock(); 30730dc3f44aSMike Day QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 30749b8424d5SMichael S. Tsirkin func(block->host, block->offset, block->used_length, opaque); 3075bd2fa51fSMichael R. Hines } 30760dc3f44aSMike Day rcu_read_unlock(); 3077bd2fa51fSMichael R. Hines } 3078ec3f8c99SPeter Maydell #endif 3079