xref: /qemu/system/physmem.c (revision c8d6f66ae7d0ce5f3622c19e29a2333d28dc1e9a)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20777872e5SStefan Weil #ifndef _WIN32
21a98d49b1Sbellard #include <sys/types.h>
22d5a8f07cSbellard #include <sys/mman.h>
23d5a8f07cSbellard #endif
2454936004Sbellard 
25055403b2SStefan Weil #include "qemu-common.h"
266180a181Sbellard #include "cpu.h"
27b67d9a52Sbellard #include "tcg.h"
28b3c7724cSpbrook #include "hw/hw.h"
29cc9e98cbSAlex Williamson #include "hw/qdev.h"
301de7afc9SPaolo Bonzini #include "qemu/osdep.h"
319c17d615SPaolo Bonzini #include "sysemu/kvm.h"
322ff3de68SMarkus Armbruster #include "sysemu/sysemu.h"
330d09e41aSPaolo Bonzini #include "hw/xen/xen.h"
341de7afc9SPaolo Bonzini #include "qemu/timer.h"
351de7afc9SPaolo Bonzini #include "qemu/config-file.h"
3675a34036SAndreas Färber #include "qemu/error-report.h"
37022c62cbSPaolo Bonzini #include "exec/memory.h"
389c17d615SPaolo Bonzini #include "sysemu/dma.h"
39022c62cbSPaolo Bonzini #include "exec/address-spaces.h"
4053a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4153a5960aSpbrook #include <qemu.h>
42432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
439c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
446506e4f9SStefano Stabellini #include "trace.h"
4553a5960aSpbrook #endif
460d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
4754936004Sbellard 
48022c62cbSPaolo Bonzini #include "exec/cputlb.h"
495b6dd868SBlue Swirl #include "translate-all.h"
500cac1b66SBlue Swirl 
51022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
52220c3ebdSJuan Quintela #include "exec/ram_addr.h"
5367d95c15SAvi Kivity 
54b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
55b35ba30fSMichael S. Tsirkin 
56db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
571196be37Sths 
5899773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
59981fdf23SJuan Quintela static bool in_migration;
6094a6b54fSpbrook 
61a3161038SPaolo Bonzini RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
6262152b8aSAvi Kivity 
6362152b8aSAvi Kivity static MemoryRegion *system_memory;
64309cb471SAvi Kivity static MemoryRegion *system_io;
6562152b8aSAvi Kivity 
66f6790af6SAvi Kivity AddressSpace address_space_io;
67f6790af6SAvi Kivity AddressSpace address_space_memory;
682673a5daSAvi Kivity 
690844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
70acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
710e0df1e2SAvi Kivity 
727bd4f430SPaolo Bonzini /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
737bd4f430SPaolo Bonzini #define RAM_PREALLOC   (1 << 0)
747bd4f430SPaolo Bonzini 
75dbcb8981SPaolo Bonzini /* RAM is mmap-ed with MAP_SHARED */
76dbcb8981SPaolo Bonzini #define RAM_SHARED     (1 << 1)
77dbcb8981SPaolo Bonzini 
78e2eef170Spbrook #endif
799fa3e853Sbellard 
80bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
816a00d601Sbellard /* current CPU in the current thread. It is only valid inside
826a00d601Sbellard    cpu_exec() */
834917cf44SAndreas Färber DEFINE_TLS(CPUState *, current_cpu);
842e70f6efSpbrook /* 0 = Do not count executed instructions.
85bf20dc07Sths    1 = Precise instruction counting.
862e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
875708fc66SPaolo Bonzini int use_icount;
886a00d601Sbellard 
89e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
904346ae3eSAvi Kivity 
911db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
921db8abb1SPaolo Bonzini 
931db8abb1SPaolo Bonzini struct PhysPageEntry {
949736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
958b795765SMichael S. Tsirkin     uint32_t skip : 6;
969736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
978b795765SMichael S. Tsirkin     uint32_t ptr : 26;
981db8abb1SPaolo Bonzini };
991db8abb1SPaolo Bonzini 
1008b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
1018b795765SMichael S. Tsirkin 
10203f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
10357271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
10403f49957SPaolo Bonzini 
105026736ceSMichael S. Tsirkin #define P_L2_BITS 9
10603f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
10703f49957SPaolo Bonzini 
10803f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
10903f49957SPaolo Bonzini 
11003f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1110475d94fSPaolo Bonzini 
11253cb28cbSMarcel Apfelbaum typedef struct PhysPageMap {
11353cb28cbSMarcel Apfelbaum     unsigned sections_nb;
11453cb28cbSMarcel Apfelbaum     unsigned sections_nb_alloc;
11553cb28cbSMarcel Apfelbaum     unsigned nodes_nb;
11653cb28cbSMarcel Apfelbaum     unsigned nodes_nb_alloc;
11753cb28cbSMarcel Apfelbaum     Node *nodes;
11853cb28cbSMarcel Apfelbaum     MemoryRegionSection *sections;
11953cb28cbSMarcel Apfelbaum } PhysPageMap;
12053cb28cbSMarcel Apfelbaum 
1211db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
1221db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1231db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1241db8abb1SPaolo Bonzini      */
1251db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
12653cb28cbSMarcel Apfelbaum     PhysPageMap map;
127acc9d80bSJan Kiszka     AddressSpace *as;
1281db8abb1SPaolo Bonzini };
1291db8abb1SPaolo Bonzini 
13090260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
13190260c6cSJan Kiszka typedef struct subpage_t {
13290260c6cSJan Kiszka     MemoryRegion iomem;
133acc9d80bSJan Kiszka     AddressSpace *as;
13490260c6cSJan Kiszka     hwaddr base;
13590260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
13690260c6cSJan Kiszka } subpage_t;
13790260c6cSJan Kiszka 
138b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
139b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1
140b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2
141b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3
1425312bd8bSAvi Kivity 
143e2eef170Spbrook static void io_mem_init(void);
14462152b8aSAvi Kivity static void memory_map_init(void);
14509daed84SEdgar E. Iglesias static void tcg_commit(MemoryListener *listener);
146e2eef170Spbrook 
1471ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
1486658ffb8Spbrook #endif
14954936004Sbellard 
1506d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
151d6f2ea22SAvi Kivity 
15253cb28cbSMarcel Apfelbaum static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
153f7bf5461SAvi Kivity {
15453cb28cbSMarcel Apfelbaum     if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
15553cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
15653cb28cbSMarcel Apfelbaum         map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
15753cb28cbSMarcel Apfelbaum         map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
158f7bf5461SAvi Kivity     }
159f7bf5461SAvi Kivity }
160f7bf5461SAvi Kivity 
16153cb28cbSMarcel Apfelbaum static uint32_t phys_map_node_alloc(PhysPageMap *map)
162d6f2ea22SAvi Kivity {
163d6f2ea22SAvi Kivity     unsigned i;
1648b795765SMichael S. Tsirkin     uint32_t ret;
165d6f2ea22SAvi Kivity 
16653cb28cbSMarcel Apfelbaum     ret = map->nodes_nb++;
167d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
16853cb28cbSMarcel Apfelbaum     assert(ret != map->nodes_nb_alloc);
16903f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
17053cb28cbSMarcel Apfelbaum         map->nodes[ret][i].skip = 1;
17153cb28cbSMarcel Apfelbaum         map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
172d6f2ea22SAvi Kivity     }
173f7bf5461SAvi Kivity     return ret;
174d6f2ea22SAvi Kivity }
175d6f2ea22SAvi Kivity 
17653cb28cbSMarcel Apfelbaum static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
17753cb28cbSMarcel Apfelbaum                                 hwaddr *index, hwaddr *nb, uint16_t leaf,
1782999097bSAvi Kivity                                 int level)
17992e873b9Sbellard {
180f7bf5461SAvi Kivity     PhysPageEntry *p;
181f7bf5461SAvi Kivity     int i;
18203f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
1835cd2c5b6SRichard Henderson 
1849736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
18553cb28cbSMarcel Apfelbaum         lp->ptr = phys_map_node_alloc(map);
18653cb28cbSMarcel Apfelbaum         p = map->nodes[lp->ptr];
187f7bf5461SAvi Kivity         if (level == 0) {
18803f49957SPaolo Bonzini             for (i = 0; i < P_L2_SIZE; i++) {
1899736e55bSMichael S. Tsirkin                 p[i].skip = 0;
190b41aac4fSLiu Ping Fan                 p[i].ptr = PHYS_SECTION_UNASSIGNED;
19167c4d23cSpbrook             }
19292e873b9Sbellard         }
193d6f2ea22SAvi Kivity     } else {
19453cb28cbSMarcel Apfelbaum         p = map->nodes[lp->ptr];
1954346ae3eSAvi Kivity     }
19603f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
197f7bf5461SAvi Kivity 
19803f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
19907f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2009736e55bSMichael S. Tsirkin             lp->skip = 0;
201c19e8800SAvi Kivity             lp->ptr = leaf;
20207f07b31SAvi Kivity             *index += step;
20307f07b31SAvi Kivity             *nb -= step;
204f7bf5461SAvi Kivity         } else {
20553cb28cbSMarcel Apfelbaum             phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2062999097bSAvi Kivity         }
2072999097bSAvi Kivity         ++lp;
208f7bf5461SAvi Kivity     }
2094346ae3eSAvi Kivity }
2105cd2c5b6SRichard Henderson 
211ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
212a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2132999097bSAvi Kivity                           uint16_t leaf)
214f7bf5461SAvi Kivity {
2152999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
21653cb28cbSMarcel Apfelbaum     phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
217f7bf5461SAvi Kivity 
21853cb28cbSMarcel Apfelbaum     phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
21992e873b9Sbellard }
22092e873b9Sbellard 
221b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
223b35ba30fSMichael S. Tsirkin  */
224b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225b35ba30fSMichael S. Tsirkin {
226b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
227b35ba30fSMichael S. Tsirkin     int valid = 0;
228b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
229b35ba30fSMichael S. Tsirkin     int i;
230b35ba30fSMichael S. Tsirkin 
231b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
232b35ba30fSMichael S. Tsirkin         return;
233b35ba30fSMichael S. Tsirkin     }
234b35ba30fSMichael S. Tsirkin 
235b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
236b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
237b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238b35ba30fSMichael S. Tsirkin             continue;
239b35ba30fSMichael S. Tsirkin         }
240b35ba30fSMichael S. Tsirkin 
241b35ba30fSMichael S. Tsirkin         valid_ptr = i;
242b35ba30fSMichael S. Tsirkin         valid++;
243b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
244b35ba30fSMichael S. Tsirkin             phys_page_compact(&p[i], nodes, compacted);
245b35ba30fSMichael S. Tsirkin         }
246b35ba30fSMichael S. Tsirkin     }
247b35ba30fSMichael S. Tsirkin 
248b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
249b35ba30fSMichael S. Tsirkin     if (valid != 1) {
250b35ba30fSMichael S. Tsirkin         return;
251b35ba30fSMichael S. Tsirkin     }
252b35ba30fSMichael S. Tsirkin 
253b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
254b35ba30fSMichael S. Tsirkin 
255b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
256b35ba30fSMichael S. Tsirkin     if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257b35ba30fSMichael S. Tsirkin         return;
258b35ba30fSMichael S. Tsirkin     }
259b35ba30fSMichael S. Tsirkin 
260b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
261b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
262b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
263b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
264b35ba30fSMichael S. Tsirkin          * should never reach here.
265b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
266b35ba30fSMichael S. Tsirkin          * change this rule.
267b35ba30fSMichael S. Tsirkin          */
268b35ba30fSMichael S. Tsirkin         lp->skip = 0;
269b35ba30fSMichael S. Tsirkin     } else {
270b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
271b35ba30fSMichael S. Tsirkin     }
272b35ba30fSMichael S. Tsirkin }
273b35ba30fSMichael S. Tsirkin 
274b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275b35ba30fSMichael S. Tsirkin {
276b35ba30fSMichael S. Tsirkin     DECLARE_BITMAP(compacted, nodes_nb);
277b35ba30fSMichael S. Tsirkin 
278b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
27953cb28cbSMarcel Apfelbaum         phys_page_compact(&d->phys_map, d->map.nodes, compacted);
280b35ba30fSMichael S. Tsirkin     }
281b35ba30fSMichael S. Tsirkin }
282b35ba30fSMichael S. Tsirkin 
28397115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
2849affd6fcSPaolo Bonzini                                            Node *nodes, MemoryRegionSection *sections)
28592e873b9Sbellard {
28631ab2b4aSAvi Kivity     PhysPageEntry *p;
28797115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
28831ab2b4aSAvi Kivity     int i;
289f1f6e3b8SAvi Kivity 
2909736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
291c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
2929affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
293f1f6e3b8SAvi Kivity         }
2949affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
29503f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
29631ab2b4aSAvi Kivity     }
297b35ba30fSMichael S. Tsirkin 
298b35ba30fSMichael S. Tsirkin     if (sections[lp.ptr].size.hi ||
299b35ba30fSMichael S. Tsirkin         range_covers_byte(sections[lp.ptr].offset_within_address_space,
300b35ba30fSMichael S. Tsirkin                           sections[lp.ptr].size.lo, addr)) {
3019affd6fcSPaolo Bonzini         return &sections[lp.ptr];
302b35ba30fSMichael S. Tsirkin     } else {
303b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
304b35ba30fSMichael S. Tsirkin     }
305f3705d53SAvi Kivity }
306f3705d53SAvi Kivity 
307e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
308e5548617SBlue Swirl {
3092a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
310e5548617SBlue Swirl         && mr != &io_mem_watch;
311e5548617SBlue Swirl }
312149f54b5SPaolo Bonzini 
313c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
31490260c6cSJan Kiszka                                                         hwaddr addr,
31590260c6cSJan Kiszka                                                         bool resolve_subpage)
3169f029603SJan Kiszka {
31790260c6cSJan Kiszka     MemoryRegionSection *section;
31890260c6cSJan Kiszka     subpage_t *subpage;
31990260c6cSJan Kiszka 
32053cb28cbSMarcel Apfelbaum     section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
32190260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
32290260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
32353cb28cbSMarcel Apfelbaum         section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
32490260c6cSJan Kiszka     }
32590260c6cSJan Kiszka     return section;
3269f029603SJan Kiszka }
3279f029603SJan Kiszka 
32890260c6cSJan Kiszka static MemoryRegionSection *
329c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
33090260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
331149f54b5SPaolo Bonzini {
332149f54b5SPaolo Bonzini     MemoryRegionSection *section;
333a87f3954SPaolo Bonzini     Int128 diff;
334149f54b5SPaolo Bonzini 
335c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
336149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
337149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
338149f54b5SPaolo Bonzini 
339149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
340149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
341149f54b5SPaolo Bonzini 
342149f54b5SPaolo Bonzini     diff = int128_sub(section->mr->size, int128_make64(addr));
3433752a036SPeter Maydell     *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
344149f54b5SPaolo Bonzini     return section;
345149f54b5SPaolo Bonzini }
34690260c6cSJan Kiszka 
347a87f3954SPaolo Bonzini static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348a87f3954SPaolo Bonzini {
349a87f3954SPaolo Bonzini     if (memory_region_is_ram(mr)) {
350a87f3954SPaolo Bonzini         return !(is_write && mr->readonly);
351a87f3954SPaolo Bonzini     }
352a87f3954SPaolo Bonzini     if (memory_region_is_romd(mr)) {
353a87f3954SPaolo Bonzini         return !is_write;
354a87f3954SPaolo Bonzini     }
355a87f3954SPaolo Bonzini 
356a87f3954SPaolo Bonzini     return false;
357a87f3954SPaolo Bonzini }
358a87f3954SPaolo Bonzini 
3595c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
36090260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
36190260c6cSJan Kiszka                                       bool is_write)
36290260c6cSJan Kiszka {
36330951157SAvi Kivity     IOMMUTLBEntry iotlb;
36430951157SAvi Kivity     MemoryRegionSection *section;
36530951157SAvi Kivity     MemoryRegion *mr;
36630951157SAvi Kivity     hwaddr len = *plen;
36730951157SAvi Kivity 
36830951157SAvi Kivity     for (;;) {
369a87f3954SPaolo Bonzini         section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
37030951157SAvi Kivity         mr = section->mr;
37130951157SAvi Kivity 
37230951157SAvi Kivity         if (!mr->iommu_ops) {
37330951157SAvi Kivity             break;
37430951157SAvi Kivity         }
37530951157SAvi Kivity 
3768d7b8cb9SLe Tan         iotlb = mr->iommu_ops->translate(mr, addr, is_write);
37730951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
37830951157SAvi Kivity                 | (addr & iotlb.addr_mask));
37930951157SAvi Kivity         len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
38030951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
38130951157SAvi Kivity             mr = &io_mem_unassigned;
38230951157SAvi Kivity             break;
38330951157SAvi Kivity         }
38430951157SAvi Kivity 
38530951157SAvi Kivity         as = iotlb.target_as;
38630951157SAvi Kivity     }
38730951157SAvi Kivity 
388fe680d0dSAlexey Kardashevskiy     if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
389a87f3954SPaolo Bonzini         hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390a87f3954SPaolo Bonzini         len = MIN(page, len);
391a87f3954SPaolo Bonzini     }
392a87f3954SPaolo Bonzini 
39330951157SAvi Kivity     *plen = len;
39430951157SAvi Kivity     *xlat = addr;
39530951157SAvi Kivity     return mr;
39690260c6cSJan Kiszka }
39790260c6cSJan Kiszka 
39890260c6cSJan Kiszka MemoryRegionSection *
39990260c6cSJan Kiszka address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
40090260c6cSJan Kiszka                                   hwaddr *plen)
40190260c6cSJan Kiszka {
40230951157SAvi Kivity     MemoryRegionSection *section;
403c7086b4aSPaolo Bonzini     section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
40430951157SAvi Kivity 
40530951157SAvi Kivity     assert(!section->mr->iommu_ops);
40630951157SAvi Kivity     return section;
40790260c6cSJan Kiszka }
4089fa3e853Sbellard #endif
409fd6ce8f6Sbellard 
410d5ab9713SJan Kiszka void cpu_exec_init_all(void)
411d5ab9713SJan Kiszka {
412d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY)
413b2a8658eSUmesh Deshpande     qemu_mutex_init(&ram_list.mutex);
414d5ab9713SJan Kiszka     memory_map_init();
415d5ab9713SJan Kiszka     io_mem_init();
416d5ab9713SJan Kiszka #endif
417d5ab9713SJan Kiszka }
418d5ab9713SJan Kiszka 
419b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
4209656f324Spbrook 
421e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
422e7f4eff7SJuan Quintela {
423259186a7SAndreas Färber     CPUState *cpu = opaque;
424e7f4eff7SJuan Quintela 
4253098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
4263098dba0Saurel32        version_id is increased. */
427259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
428c01a71c1SChristian Borntraeger     tlb_flush(cpu, 1);
4299656f324Spbrook 
4309656f324Spbrook     return 0;
4319656f324Spbrook }
432e7f4eff7SJuan Quintela 
4336c3bff0eSPavel Dovgaluk static int cpu_common_pre_load(void *opaque)
4346c3bff0eSPavel Dovgaluk {
4356c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4366c3bff0eSPavel Dovgaluk 
4376c3bff0eSPavel Dovgaluk     cpu->exception_index = 0;
4386c3bff0eSPavel Dovgaluk 
4396c3bff0eSPavel Dovgaluk     return 0;
4406c3bff0eSPavel Dovgaluk }
4416c3bff0eSPavel Dovgaluk 
4426c3bff0eSPavel Dovgaluk static bool cpu_common_exception_index_needed(void *opaque)
4436c3bff0eSPavel Dovgaluk {
4446c3bff0eSPavel Dovgaluk     CPUState *cpu = opaque;
4456c3bff0eSPavel Dovgaluk 
4466c3bff0eSPavel Dovgaluk     return cpu->exception_index != 0;
4476c3bff0eSPavel Dovgaluk }
4486c3bff0eSPavel Dovgaluk 
4496c3bff0eSPavel Dovgaluk static const VMStateDescription vmstate_cpu_common_exception_index = {
4506c3bff0eSPavel Dovgaluk     .name = "cpu_common/exception_index",
4516c3bff0eSPavel Dovgaluk     .version_id = 1,
4526c3bff0eSPavel Dovgaluk     .minimum_version_id = 1,
4536c3bff0eSPavel Dovgaluk     .fields = (VMStateField[]) {
4546c3bff0eSPavel Dovgaluk         VMSTATE_INT32(exception_index, CPUState),
4556c3bff0eSPavel Dovgaluk         VMSTATE_END_OF_LIST()
4566c3bff0eSPavel Dovgaluk     }
4576c3bff0eSPavel Dovgaluk };
4586c3bff0eSPavel Dovgaluk 
4591a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
460e7f4eff7SJuan Quintela     .name = "cpu_common",
461e7f4eff7SJuan Quintela     .version_id = 1,
462e7f4eff7SJuan Quintela     .minimum_version_id = 1,
4636c3bff0eSPavel Dovgaluk     .pre_load = cpu_common_pre_load,
464e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
465e7f4eff7SJuan Quintela     .fields = (VMStateField[]) {
466259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
467259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
468e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
4696c3bff0eSPavel Dovgaluk     },
4706c3bff0eSPavel Dovgaluk     .subsections = (VMStateSubsection[]) {
4716c3bff0eSPavel Dovgaluk         {
4726c3bff0eSPavel Dovgaluk             .vmsd = &vmstate_cpu_common_exception_index,
4736c3bff0eSPavel Dovgaluk             .needed = cpu_common_exception_index_needed,
4746c3bff0eSPavel Dovgaluk         } , {
4756c3bff0eSPavel Dovgaluk             /* empty */
4766c3bff0eSPavel Dovgaluk         }
477e7f4eff7SJuan Quintela     }
478e7f4eff7SJuan Quintela };
4791a1562f5SAndreas Färber 
4809656f324Spbrook #endif
4819656f324Spbrook 
48238d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
483950f1472SGlauber Costa {
484bdc44640SAndreas Färber     CPUState *cpu;
485950f1472SGlauber Costa 
486bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
48755e5c285SAndreas Färber         if (cpu->cpu_index == index) {
488bdc44640SAndreas Färber             return cpu;
48955e5c285SAndreas Färber         }
490950f1472SGlauber Costa     }
491950f1472SGlauber Costa 
492bdc44640SAndreas Färber     return NULL;
493950f1472SGlauber Costa }
494950f1472SGlauber Costa 
49509daed84SEdgar E. Iglesias #if !defined(CONFIG_USER_ONLY)
49609daed84SEdgar E. Iglesias void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
49709daed84SEdgar E. Iglesias {
49809daed84SEdgar E. Iglesias     /* We only support one address space per cpu at the moment.  */
49909daed84SEdgar E. Iglesias     assert(cpu->as == as);
50009daed84SEdgar E. Iglesias 
50109daed84SEdgar E. Iglesias     if (cpu->tcg_as_listener) {
50209daed84SEdgar E. Iglesias         memory_listener_unregister(cpu->tcg_as_listener);
50309daed84SEdgar E. Iglesias     } else {
50409daed84SEdgar E. Iglesias         cpu->tcg_as_listener = g_new0(MemoryListener, 1);
50509daed84SEdgar E. Iglesias     }
50609daed84SEdgar E. Iglesias     cpu->tcg_as_listener->commit = tcg_commit;
50709daed84SEdgar E. Iglesias     memory_listener_register(cpu->tcg_as_listener, as);
50809daed84SEdgar E. Iglesias }
50909daed84SEdgar E. Iglesias #endif
51009daed84SEdgar E. Iglesias 
5119349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env)
512fd6ce8f6Sbellard {
5139f09e18aSAndreas Färber     CPUState *cpu = ENV_GET_CPU(env);
514b170fce3SAndreas Färber     CPUClass *cc = CPU_GET_CLASS(cpu);
515bdc44640SAndreas Färber     CPUState *some_cpu;
5166a00d601Sbellard     int cpu_index;
5176a00d601Sbellard 
518c2764719Spbrook #if defined(CONFIG_USER_ONLY)
519c2764719Spbrook     cpu_list_lock();
520c2764719Spbrook #endif
5216a00d601Sbellard     cpu_index = 0;
522bdc44640SAndreas Färber     CPU_FOREACH(some_cpu) {
5236a00d601Sbellard         cpu_index++;
5246a00d601Sbellard     }
52555e5c285SAndreas Färber     cpu->cpu_index = cpu_index;
5261b1ed8dcSAndreas Färber     cpu->numa_node = 0;
527f0c3c505SAndreas Färber     QTAILQ_INIT(&cpu->breakpoints);
528ff4700b0SAndreas Färber     QTAILQ_INIT(&cpu->watchpoints);
529dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
53009daed84SEdgar E. Iglesias     cpu->as = &address_space_memory;
5319f09e18aSAndreas Färber     cpu->thread_id = qemu_get_thread_id();
532dc7a09cfSJan Kiszka #endif
533bdc44640SAndreas Färber     QTAILQ_INSERT_TAIL(&cpus, cpu, node);
534c2764719Spbrook #if defined(CONFIG_USER_ONLY)
535c2764719Spbrook     cpu_list_unlock();
536c2764719Spbrook #endif
537e0d47944SAndreas Färber     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
538259186a7SAndreas Färber         vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
539e0d47944SAndreas Färber     }
540b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5410be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
542b3c7724cSpbrook                     cpu_save, cpu_load, env);
543b170fce3SAndreas Färber     assert(cc->vmsd == NULL);
544e0d47944SAndreas Färber     assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
545b3c7724cSpbrook #endif
546b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
547b170fce3SAndreas Färber         vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
548b170fce3SAndreas Färber     }
549fd6ce8f6Sbellard }
550fd6ce8f6Sbellard 
5511fddef4bSbellard #if defined(TARGET_HAS_ICE)
55294df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
55300b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
55494df27fdSPaul Brook {
55594df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
55694df27fdSPaul Brook }
55794df27fdSPaul Brook #else
55800b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
5591e7855a5SMax Filippov {
560e8262a1bSMax Filippov     hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
561e8262a1bSMax Filippov     if (phys != -1) {
56209daed84SEdgar E. Iglesias         tb_invalidate_phys_addr(cpu->as,
56329d8ec7bSEdgar E. Iglesias                                 phys | (pc & ~TARGET_PAGE_MASK));
564e8262a1bSMax Filippov     }
5651e7855a5SMax Filippov }
566c27004ecSbellard #endif
56794df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
568d720b93dSbellard 
569c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
57075a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
571c527ee8fSPaul Brook 
572c527ee8fSPaul Brook {
573c527ee8fSPaul Brook }
574c527ee8fSPaul Brook 
5753ee887e8SPeter Maydell int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
5763ee887e8SPeter Maydell                           int flags)
5773ee887e8SPeter Maydell {
5783ee887e8SPeter Maydell     return -ENOSYS;
5793ee887e8SPeter Maydell }
5803ee887e8SPeter Maydell 
5813ee887e8SPeter Maydell void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
5823ee887e8SPeter Maydell {
5833ee887e8SPeter Maydell }
5843ee887e8SPeter Maydell 
58575a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
586c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
587c527ee8fSPaul Brook {
588c527ee8fSPaul Brook     return -ENOSYS;
589c527ee8fSPaul Brook }
590c527ee8fSPaul Brook #else
5916658ffb8Spbrook /* Add a watchpoint.  */
59275a34036SAndreas Färber int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
593a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
5946658ffb8Spbrook {
595c0ce998eSaliguori     CPUWatchpoint *wp;
5966658ffb8Spbrook 
59705068c0dSPeter Maydell     /* forbid ranges which are empty or run off the end of the address space */
59807e2863dSMax Filippov     if (len == 0 || (addr + len - 1) < addr) {
59975a34036SAndreas Färber         error_report("tried to set invalid watchpoint at %"
60075a34036SAndreas Färber                      VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
601b4051334Saliguori         return -EINVAL;
602b4051334Saliguori     }
6037267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
6046658ffb8Spbrook 
605a1d1bb31Saliguori     wp->vaddr = addr;
60605068c0dSPeter Maydell     wp->len = len;
607a1d1bb31Saliguori     wp->flags = flags;
608a1d1bb31Saliguori 
6092dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
610ff4700b0SAndreas Färber     if (flags & BP_GDB) {
611ff4700b0SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
612ff4700b0SAndreas Färber     } else {
613ff4700b0SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
614ff4700b0SAndreas Färber     }
615a1d1bb31Saliguori 
61631b030d4SAndreas Färber     tlb_flush_page(cpu, addr);
617a1d1bb31Saliguori 
618a1d1bb31Saliguori     if (watchpoint)
619a1d1bb31Saliguori         *watchpoint = wp;
620a1d1bb31Saliguori     return 0;
6216658ffb8Spbrook }
6226658ffb8Spbrook 
623a1d1bb31Saliguori /* Remove a specific watchpoint.  */
62475a34036SAndreas Färber int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
625a1d1bb31Saliguori                           int flags)
6266658ffb8Spbrook {
627a1d1bb31Saliguori     CPUWatchpoint *wp;
6286658ffb8Spbrook 
629ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
63005068c0dSPeter Maydell         if (addr == wp->vaddr && len == wp->len
6316e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
63275a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
6336658ffb8Spbrook             return 0;
6346658ffb8Spbrook         }
6356658ffb8Spbrook     }
636a1d1bb31Saliguori     return -ENOENT;
6376658ffb8Spbrook }
6386658ffb8Spbrook 
639a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
64075a34036SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
641a1d1bb31Saliguori {
642ff4700b0SAndreas Färber     QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
6437d03f82fSedgar_igl 
64431b030d4SAndreas Färber     tlb_flush_page(cpu, watchpoint->vaddr);
645a1d1bb31Saliguori 
6467267c094SAnthony Liguori     g_free(watchpoint);
6477d03f82fSedgar_igl }
6487d03f82fSedgar_igl 
649a1d1bb31Saliguori /* Remove all matching watchpoints.  */
65075a34036SAndreas Färber void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
651a1d1bb31Saliguori {
652c0ce998eSaliguori     CPUWatchpoint *wp, *next;
653a1d1bb31Saliguori 
654ff4700b0SAndreas Färber     QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
65575a34036SAndreas Färber         if (wp->flags & mask) {
65675a34036SAndreas Färber             cpu_watchpoint_remove_by_ref(cpu, wp);
65775a34036SAndreas Färber         }
658a1d1bb31Saliguori     }
659c0ce998eSaliguori }
66005068c0dSPeter Maydell 
66105068c0dSPeter Maydell /* Return true if this watchpoint address matches the specified
66205068c0dSPeter Maydell  * access (ie the address range covered by the watchpoint overlaps
66305068c0dSPeter Maydell  * partially or completely with the address range covered by the
66405068c0dSPeter Maydell  * access).
66505068c0dSPeter Maydell  */
66605068c0dSPeter Maydell static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
66705068c0dSPeter Maydell                                                   vaddr addr,
66805068c0dSPeter Maydell                                                   vaddr len)
66905068c0dSPeter Maydell {
67005068c0dSPeter Maydell     /* We know the lengths are non-zero, but a little caution is
67105068c0dSPeter Maydell      * required to avoid errors in the case where the range ends
67205068c0dSPeter Maydell      * exactly at the top of the address space and so addr + len
67305068c0dSPeter Maydell      * wraps round to zero.
67405068c0dSPeter Maydell      */
67505068c0dSPeter Maydell     vaddr wpend = wp->vaddr + wp->len - 1;
67605068c0dSPeter Maydell     vaddr addrend = addr + len - 1;
67705068c0dSPeter Maydell 
67805068c0dSPeter Maydell     return !(addr > wpend || wp->vaddr > addrend);
67905068c0dSPeter Maydell }
68005068c0dSPeter Maydell 
681c527ee8fSPaul Brook #endif
682a1d1bb31Saliguori 
683a1d1bb31Saliguori /* Add a breakpoint.  */
684b3310ab3SAndreas Färber int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
685a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
6864c3a88a2Sbellard {
6871fddef4bSbellard #if defined(TARGET_HAS_ICE)
688c0ce998eSaliguori     CPUBreakpoint *bp;
6894c3a88a2Sbellard 
6907267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
6914c3a88a2Sbellard 
692a1d1bb31Saliguori     bp->pc = pc;
693a1d1bb31Saliguori     bp->flags = flags;
694a1d1bb31Saliguori 
6952dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
69600b941e5SAndreas Färber     if (flags & BP_GDB) {
697f0c3c505SAndreas Färber         QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
69800b941e5SAndreas Färber     } else {
699f0c3c505SAndreas Färber         QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
70000b941e5SAndreas Färber     }
701d720b93dSbellard 
702f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, pc);
703a1d1bb31Saliguori 
70400b941e5SAndreas Färber     if (breakpoint) {
705a1d1bb31Saliguori         *breakpoint = bp;
70600b941e5SAndreas Färber     }
7074c3a88a2Sbellard     return 0;
7084c3a88a2Sbellard #else
709a1d1bb31Saliguori     return -ENOSYS;
7104c3a88a2Sbellard #endif
7114c3a88a2Sbellard }
7124c3a88a2Sbellard 
713a1d1bb31Saliguori /* Remove a specific breakpoint.  */
714b3310ab3SAndreas Färber int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
715a1d1bb31Saliguori {
7167d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
717a1d1bb31Saliguori     CPUBreakpoint *bp;
718a1d1bb31Saliguori 
719f0c3c505SAndreas Färber     QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
720a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
721b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
722a1d1bb31Saliguori             return 0;
7237d03f82fSedgar_igl         }
724a1d1bb31Saliguori     }
725a1d1bb31Saliguori     return -ENOENT;
726a1d1bb31Saliguori #else
727a1d1bb31Saliguori     return -ENOSYS;
7287d03f82fSedgar_igl #endif
7297d03f82fSedgar_igl }
7307d03f82fSedgar_igl 
731a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
732b3310ab3SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
7334c3a88a2Sbellard {
7341fddef4bSbellard #if defined(TARGET_HAS_ICE)
735f0c3c505SAndreas Färber     QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
736f0c3c505SAndreas Färber 
737f0c3c505SAndreas Färber     breakpoint_invalidate(cpu, breakpoint->pc);
738a1d1bb31Saliguori 
7397267c094SAnthony Liguori     g_free(breakpoint);
740a1d1bb31Saliguori #endif
741a1d1bb31Saliguori }
742a1d1bb31Saliguori 
743a1d1bb31Saliguori /* Remove all matching breakpoints. */
744b3310ab3SAndreas Färber void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
745a1d1bb31Saliguori {
746a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
747c0ce998eSaliguori     CPUBreakpoint *bp, *next;
748a1d1bb31Saliguori 
749f0c3c505SAndreas Färber     QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
750b3310ab3SAndreas Färber         if (bp->flags & mask) {
751b3310ab3SAndreas Färber             cpu_breakpoint_remove_by_ref(cpu, bp);
752b3310ab3SAndreas Färber         }
753c0ce998eSaliguori     }
7544c3a88a2Sbellard #endif
7554c3a88a2Sbellard }
7564c3a88a2Sbellard 
757c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
758c33a346eSbellard    CPU loop after each instruction */
7593825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled)
760c33a346eSbellard {
7611fddef4bSbellard #if defined(TARGET_HAS_ICE)
762ed2803daSAndreas Färber     if (cpu->singlestep_enabled != enabled) {
763ed2803daSAndreas Färber         cpu->singlestep_enabled = enabled;
764ed2803daSAndreas Färber         if (kvm_enabled()) {
76538e478ecSStefan Weil             kvm_update_guest_debug(cpu, 0);
766ed2803daSAndreas Färber         } else {
767ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
7689fa3e853Sbellard             /* XXX: only flush what is necessary */
76938e478ecSStefan Weil             CPUArchState *env = cpu->env_ptr;
7700124311eSbellard             tb_flush(env);
771c33a346eSbellard         }
772e22a25c9Saliguori     }
773c33a346eSbellard #endif
774c33a346eSbellard }
775c33a346eSbellard 
776a47dddd7SAndreas Färber void cpu_abort(CPUState *cpu, const char *fmt, ...)
7777501267eSbellard {
7787501267eSbellard     va_list ap;
779493ae1f0Spbrook     va_list ap2;
7807501267eSbellard 
7817501267eSbellard     va_start(ap, fmt);
782493ae1f0Spbrook     va_copy(ap2, ap);
7837501267eSbellard     fprintf(stderr, "qemu: fatal: ");
7847501267eSbellard     vfprintf(stderr, fmt, ap);
7857501267eSbellard     fprintf(stderr, "\n");
786878096eeSAndreas Färber     cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
78793fcfe39Saliguori     if (qemu_log_enabled()) {
78893fcfe39Saliguori         qemu_log("qemu: fatal: ");
78993fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
79093fcfe39Saliguori         qemu_log("\n");
791a0762859SAndreas Färber         log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
79231b1a7b4Saliguori         qemu_log_flush();
79393fcfe39Saliguori         qemu_log_close();
794924edcaeSbalrog     }
795493ae1f0Spbrook     va_end(ap2);
796f9373291Sj_mayer     va_end(ap);
797fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
798fd052bf6SRiku Voipio     {
799fd052bf6SRiku Voipio         struct sigaction act;
800fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
801fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
802fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
803fd052bf6SRiku Voipio     }
804fd052bf6SRiku Voipio #endif
8057501267eSbellard     abort();
8067501267eSbellard }
8077501267eSbellard 
8080124311eSbellard #if !defined(CONFIG_USER_ONLY)
809041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
810041603feSPaolo Bonzini {
811041603feSPaolo Bonzini     RAMBlock *block;
812041603feSPaolo Bonzini 
813041603feSPaolo Bonzini     /* The list is protected by the iothread lock here.  */
814041603feSPaolo Bonzini     block = ram_list.mru_block;
815041603feSPaolo Bonzini     if (block && addr - block->offset < block->length) {
816041603feSPaolo Bonzini         goto found;
817041603feSPaolo Bonzini     }
818041603feSPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
819041603feSPaolo Bonzini         if (addr - block->offset < block->length) {
820041603feSPaolo Bonzini             goto found;
821041603feSPaolo Bonzini         }
822041603feSPaolo Bonzini     }
823041603feSPaolo Bonzini 
824041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
825041603feSPaolo Bonzini     abort();
826041603feSPaolo Bonzini 
827041603feSPaolo Bonzini found:
828041603feSPaolo Bonzini     ram_list.mru_block = block;
829041603feSPaolo Bonzini     return block;
830041603feSPaolo Bonzini }
831041603feSPaolo Bonzini 
832a2f4d5beSJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
8331ccde1cbSbellard {
834041603feSPaolo Bonzini     ram_addr_t start1;
835a2f4d5beSJuan Quintela     RAMBlock *block;
836a2f4d5beSJuan Quintela     ram_addr_t end;
837a2f4d5beSJuan Quintela 
838a2f4d5beSJuan Quintela     end = TARGET_PAGE_ALIGN(start + length);
839a2f4d5beSJuan Quintela     start &= TARGET_PAGE_MASK;
840f23db169Sbellard 
841041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
842041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
8431240be24SMichael S. Tsirkin     start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
844e5548617SBlue Swirl     cpu_tlb_reset_dirty_all(start1, length);
845d24981d3SJuan Quintela }
846d24981d3SJuan Quintela 
847d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
848a2f4d5beSJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
84952159192SJuan Quintela                                      unsigned client)
850d24981d3SJuan Quintela {
851d24981d3SJuan Quintela     if (length == 0)
852d24981d3SJuan Quintela         return;
853c8d6f66aSMichael S. Tsirkin     cpu_physical_memory_clear_dirty_range_type(start, length, client);
854d24981d3SJuan Quintela 
855d24981d3SJuan Quintela     if (tcg_enabled()) {
856a2f4d5beSJuan Quintela         tlb_reset_dirty_range_all(start, length);
857d24981d3SJuan Quintela     }
8581ccde1cbSbellard }
8591ccde1cbSbellard 
860981fdf23SJuan Quintela static void cpu_physical_memory_set_dirty_tracking(bool enable)
86174576198Saliguori {
86274576198Saliguori     in_migration = enable;
86374576198Saliguori }
86474576198Saliguori 
865bb0e627aSAndreas Färber hwaddr memory_region_section_get_iotlb(CPUState *cpu,
866e5548617SBlue Swirl                                        MemoryRegionSection *section,
867e5548617SBlue Swirl                                        target_ulong vaddr,
868149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
869e5548617SBlue Swirl                                        int prot,
870e5548617SBlue Swirl                                        target_ulong *address)
871e5548617SBlue Swirl {
872a8170e5eSAvi Kivity     hwaddr iotlb;
873e5548617SBlue Swirl     CPUWatchpoint *wp;
874e5548617SBlue Swirl 
875cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
876e5548617SBlue Swirl         /* Normal RAM.  */
877e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
878149f54b5SPaolo Bonzini             + xlat;
879e5548617SBlue Swirl         if (!section->readonly) {
880b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_NOTDIRTY;
881e5548617SBlue Swirl         } else {
882b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_ROM;
883e5548617SBlue Swirl         }
884e5548617SBlue Swirl     } else {
8851b3fb98fSEdgar E. Iglesias         iotlb = section - section->address_space->dispatch->map.sections;
886149f54b5SPaolo Bonzini         iotlb += xlat;
887e5548617SBlue Swirl     }
888e5548617SBlue Swirl 
889e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
890e5548617SBlue Swirl        watchpoint trap routines.  */
891ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
89205068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
893e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
894e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
895b41aac4fSLiu Ping Fan                 iotlb = PHYS_SECTION_WATCH + paddr;
896e5548617SBlue Swirl                 *address |= TLB_MMIO;
897e5548617SBlue Swirl                 break;
898e5548617SBlue Swirl             }
899e5548617SBlue Swirl         }
900e5548617SBlue Swirl     }
901e5548617SBlue Swirl 
902e5548617SBlue Swirl     return iotlb;
903e5548617SBlue Swirl }
9049fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
90533417e70Sbellard 
906e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
9078da3ff18Spbrook 
908c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
9095312bd8bSAvi Kivity                              uint16_t section);
910acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
91154688b1eSAvi Kivity 
912a2b257d6SIgor Mammedov static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
913a2b257d6SIgor Mammedov                                qemu_anon_ram_alloc;
91491138037SMarkus Armbruster 
91591138037SMarkus Armbruster /*
91691138037SMarkus Armbruster  * Set a custom physical guest memory alloator.
91791138037SMarkus Armbruster  * Accelerators with unusual needs may need this.  Hopefully, we can
91891138037SMarkus Armbruster  * get rid of it eventually.
91991138037SMarkus Armbruster  */
920a2b257d6SIgor Mammedov void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
92191138037SMarkus Armbruster {
92291138037SMarkus Armbruster     phys_mem_alloc = alloc;
92391138037SMarkus Armbruster }
92491138037SMarkus Armbruster 
92553cb28cbSMarcel Apfelbaum static uint16_t phys_section_add(PhysPageMap *map,
92653cb28cbSMarcel Apfelbaum                                  MemoryRegionSection *section)
9275312bd8bSAvi Kivity {
92868f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
92968f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
93068f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
93168f3f65bSPaolo Bonzini      */
93253cb28cbSMarcel Apfelbaum     assert(map->sections_nb < TARGET_PAGE_SIZE);
93368f3f65bSPaolo Bonzini 
93453cb28cbSMarcel Apfelbaum     if (map->sections_nb == map->sections_nb_alloc) {
93553cb28cbSMarcel Apfelbaum         map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
93653cb28cbSMarcel Apfelbaum         map->sections = g_renew(MemoryRegionSection, map->sections,
93753cb28cbSMarcel Apfelbaum                                 map->sections_nb_alloc);
9385312bd8bSAvi Kivity     }
93953cb28cbSMarcel Apfelbaum     map->sections[map->sections_nb] = *section;
940dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
94153cb28cbSMarcel Apfelbaum     return map->sections_nb++;
9425312bd8bSAvi Kivity }
9435312bd8bSAvi Kivity 
944058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
945058bc4b5SPaolo Bonzini {
946dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
947dfde4e6eSPaolo Bonzini 
948058bc4b5SPaolo Bonzini     if (mr->subpage) {
949058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
950b4fefef9SPeter Crosthwaite         object_unref(OBJECT(&subpage->iomem));
951058bc4b5SPaolo Bonzini         g_free(subpage);
952058bc4b5SPaolo Bonzini     }
953058bc4b5SPaolo Bonzini }
954058bc4b5SPaolo Bonzini 
9556092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
9565312bd8bSAvi Kivity {
9579affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
9589affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
959058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
960058bc4b5SPaolo Bonzini     }
9619affd6fcSPaolo Bonzini     g_free(map->sections);
9629affd6fcSPaolo Bonzini     g_free(map->nodes);
9635312bd8bSAvi Kivity }
9645312bd8bSAvi Kivity 
965ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
9660f0cb164SAvi Kivity {
9670f0cb164SAvi Kivity     subpage_t *subpage;
968a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
9690f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
97097115a8dSMichael S. Tsirkin     MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
97153cb28cbSMarcel Apfelbaum                                                    d->map.nodes, d->map.sections);
9720f0cb164SAvi Kivity     MemoryRegionSection subsection = {
9730f0cb164SAvi Kivity         .offset_within_address_space = base,
974052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
9750f0cb164SAvi Kivity     };
976a8170e5eSAvi Kivity     hwaddr start, end;
9770f0cb164SAvi Kivity 
978f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
9790f0cb164SAvi Kivity 
980f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
981acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
9823be91e86SEdgar E. Iglesias         subsection.address_space = d->as;
9830f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
984ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
98553cb28cbSMarcel Apfelbaum                       phys_section_add(&d->map, &subsection));
9860f0cb164SAvi Kivity     } else {
987f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
9880f0cb164SAvi Kivity     }
9890f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
990052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
99153cb28cbSMarcel Apfelbaum     subpage_register(subpage, start, end,
99253cb28cbSMarcel Apfelbaum                      phys_section_add(&d->map, section));
9930f0cb164SAvi Kivity }
9940f0cb164SAvi Kivity 
9950f0cb164SAvi Kivity 
996052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
997052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
99833417e70Sbellard {
999a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
100053cb28cbSMarcel Apfelbaum     uint16_t section_index = phys_section_add(&d->map, section);
1001052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
1002052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
1003dd81124bSAvi Kivity 
1004733d5ef5SPaolo Bonzini     assert(num_pages);
1005733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
100633417e70Sbellard }
100733417e70Sbellard 
1008ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
10090f0cb164SAvi Kivity {
101089ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
101100752703SPaolo Bonzini     AddressSpaceDispatch *d = as->next_dispatch;
101299b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
1013052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
10140f0cb164SAvi Kivity 
1015733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1016733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1017733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
1018733d5ef5SPaolo Bonzini 
1019052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
1020ac1970fbSAvi Kivity         register_subpage(d, &now);
1021733d5ef5SPaolo Bonzini     } else {
1022052e87b0SPaolo Bonzini         now.size = int128_zero();
1023733d5ef5SPaolo Bonzini     }
1024052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
1025052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
1026052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
1027052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
10280f0cb164SAvi Kivity         now = remain;
1029052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
1030733d5ef5SPaolo Bonzini             register_subpage(d, &now);
103188266249SHu Tao         } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1032052e87b0SPaolo Bonzini             now.size = page_size;
1033ac1970fbSAvi Kivity             register_subpage(d, &now);
103469b67646STyler Hall         } else {
1035052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
1036ac1970fbSAvi Kivity             register_multipage(d, &now);
103769b67646STyler Hall         }
10380f0cb164SAvi Kivity     }
10390f0cb164SAvi Kivity }
10400f0cb164SAvi Kivity 
104162a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
104262a2744cSSheng Yang {
104362a2744cSSheng Yang     if (kvm_enabled())
104462a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
104562a2744cSSheng Yang }
104662a2744cSSheng Yang 
1047b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
1048b2a8658eSUmesh Deshpande {
1049b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
1050b2a8658eSUmesh Deshpande }
1051b2a8658eSUmesh Deshpande 
1052b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
1053b2a8658eSUmesh Deshpande {
1054b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
1055b2a8658eSUmesh Deshpande }
1056b2a8658eSUmesh Deshpande 
1057e1e84ba0SMarkus Armbruster #ifdef __linux__
1058c902760fSMarcelo Tosatti 
1059c902760fSMarcelo Tosatti #include <sys/vfs.h>
1060c902760fSMarcelo Tosatti 
1061c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
1062c902760fSMarcelo Tosatti 
1063fc7a5800SHu Tao static long gethugepagesize(const char *path, Error **errp)
1064c902760fSMarcelo Tosatti {
1065c902760fSMarcelo Tosatti     struct statfs fs;
1066c902760fSMarcelo Tosatti     int ret;
1067c902760fSMarcelo Tosatti 
1068c902760fSMarcelo Tosatti     do {
1069c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
1070c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
1071c902760fSMarcelo Tosatti 
1072c902760fSMarcelo Tosatti     if (ret != 0) {
1073fc7a5800SHu Tao         error_setg_errno(errp, errno, "failed to get page size of file %s",
1074fc7a5800SHu Tao                          path);
1075c902760fSMarcelo Tosatti         return 0;
1076c902760fSMarcelo Tosatti     }
1077c902760fSMarcelo Tosatti 
1078c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
1079c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1080c902760fSMarcelo Tosatti 
1081c902760fSMarcelo Tosatti     return fs.f_bsize;
1082c902760fSMarcelo Tosatti }
1083c902760fSMarcelo Tosatti 
108404b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
108504b16653SAlex Williamson                             ram_addr_t memory,
10867f56e740SPaolo Bonzini                             const char *path,
10877f56e740SPaolo Bonzini                             Error **errp)
1088c902760fSMarcelo Tosatti {
1089c902760fSMarcelo Tosatti     char *filename;
10908ca761f6SPeter Feiner     char *sanitized_name;
10918ca761f6SPeter Feiner     char *c;
1092557529ddSHu Tao     void *area = NULL;
1093c902760fSMarcelo Tosatti     int fd;
1094557529ddSHu Tao     uint64_t hpagesize;
1095fc7a5800SHu Tao     Error *local_err = NULL;
1096c902760fSMarcelo Tosatti 
1097fc7a5800SHu Tao     hpagesize = gethugepagesize(path, &local_err);
1098fc7a5800SHu Tao     if (local_err) {
1099fc7a5800SHu Tao         error_propagate(errp, local_err);
1100f9a49dfaSMarcelo Tosatti         goto error;
1101c902760fSMarcelo Tosatti     }
1102a2b257d6SIgor Mammedov     block->mr->align = hpagesize;
1103c902760fSMarcelo Tosatti 
1104c902760fSMarcelo Tosatti     if (memory < hpagesize) {
1105557529ddSHu Tao         error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1106557529ddSHu Tao                    "or larger than huge page size 0x%" PRIx64,
1107557529ddSHu Tao                    memory, hpagesize);
1108557529ddSHu Tao         goto error;
1109c902760fSMarcelo Tosatti     }
1110c902760fSMarcelo Tosatti 
1111c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
11127f56e740SPaolo Bonzini         error_setg(errp,
11137f56e740SPaolo Bonzini                    "host lacks kvm mmu notifiers, -mem-path unsupported");
1114f9a49dfaSMarcelo Tosatti         goto error;
1115c902760fSMarcelo Tosatti     }
1116c902760fSMarcelo Tosatti 
11178ca761f6SPeter Feiner     /* Make name safe to use with mkstemp by replacing '/' with '_'. */
111883234bf2SPeter Crosthwaite     sanitized_name = g_strdup(memory_region_name(block->mr));
11198ca761f6SPeter Feiner     for (c = sanitized_name; *c != '\0'; c++) {
11208ca761f6SPeter Feiner         if (*c == '/')
11218ca761f6SPeter Feiner             *c = '_';
11228ca761f6SPeter Feiner     }
11238ca761f6SPeter Feiner 
11248ca761f6SPeter Feiner     filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
11258ca761f6SPeter Feiner                                sanitized_name);
11268ca761f6SPeter Feiner     g_free(sanitized_name);
1127c902760fSMarcelo Tosatti 
1128c902760fSMarcelo Tosatti     fd = mkstemp(filename);
1129c902760fSMarcelo Tosatti     if (fd < 0) {
11307f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
11317f56e740SPaolo Bonzini                          "unable to create backing store for hugepages");
1132e4ada482SStefan Weil         g_free(filename);
1133f9a49dfaSMarcelo Tosatti         goto error;
1134c902760fSMarcelo Tosatti     }
1135c902760fSMarcelo Tosatti     unlink(filename);
1136e4ada482SStefan Weil     g_free(filename);
1137c902760fSMarcelo Tosatti 
1138c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
1139c902760fSMarcelo Tosatti 
1140c902760fSMarcelo Tosatti     /*
1141c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1142c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1143c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1144c902760fSMarcelo Tosatti      * mmap will fail.
1145c902760fSMarcelo Tosatti      */
11467f56e740SPaolo Bonzini     if (ftruncate(fd, memory)) {
1147c902760fSMarcelo Tosatti         perror("ftruncate");
11487f56e740SPaolo Bonzini     }
1149c902760fSMarcelo Tosatti 
1150dbcb8981SPaolo Bonzini     area = mmap(0, memory, PROT_READ | PROT_WRITE,
1151dbcb8981SPaolo Bonzini                 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1152dbcb8981SPaolo Bonzini                 fd, 0);
1153c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
11547f56e740SPaolo Bonzini         error_setg_errno(errp, errno,
11557f56e740SPaolo Bonzini                          "unable to map backing store for hugepages");
1156c902760fSMarcelo Tosatti         close(fd);
1157f9a49dfaSMarcelo Tosatti         goto error;
1158c902760fSMarcelo Tosatti     }
1159ef36fa14SMarcelo Tosatti 
1160ef36fa14SMarcelo Tosatti     if (mem_prealloc) {
116138183310SPaolo Bonzini         os_mem_prealloc(fd, area, memory);
1162ef36fa14SMarcelo Tosatti     }
1163ef36fa14SMarcelo Tosatti 
116404b16653SAlex Williamson     block->fd = fd;
1165c902760fSMarcelo Tosatti     return area;
1166f9a49dfaSMarcelo Tosatti 
1167f9a49dfaSMarcelo Tosatti error:
1168f9a49dfaSMarcelo Tosatti     if (mem_prealloc) {
1169e4d9df4fSLuiz Capitulino         error_report("%s\n", error_get_pretty(*errp));
1170f9a49dfaSMarcelo Tosatti         exit(1);
1171f9a49dfaSMarcelo Tosatti     }
1172f9a49dfaSMarcelo Tosatti     return NULL;
1173c902760fSMarcelo Tosatti }
1174c902760fSMarcelo Tosatti #endif
1175c902760fSMarcelo Tosatti 
1176d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1177d17b5288SAlex Williamson {
117804b16653SAlex Williamson     RAMBlock *block, *next_block;
11793e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
118004b16653SAlex Williamson 
118149cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
118249cd9ac6SStefan Hajnoczi 
1183a3161038SPaolo Bonzini     if (QTAILQ_EMPTY(&ram_list.blocks))
118404b16653SAlex Williamson         return 0;
118504b16653SAlex Williamson 
1186a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1187f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
118804b16653SAlex Williamson 
118904b16653SAlex Williamson         end = block->offset + block->length;
119004b16653SAlex Williamson 
1191a3161038SPaolo Bonzini         QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
119204b16653SAlex Williamson             if (next_block->offset >= end) {
119304b16653SAlex Williamson                 next = MIN(next, next_block->offset);
119404b16653SAlex Williamson             }
119504b16653SAlex Williamson         }
119604b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
119704b16653SAlex Williamson             offset = end;
119804b16653SAlex Williamson             mingap = next - end;
119904b16653SAlex Williamson         }
120004b16653SAlex Williamson     }
12013e837b2cSAlex Williamson 
12023e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
12033e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
12043e837b2cSAlex Williamson                 (uint64_t)size);
12053e837b2cSAlex Williamson         abort();
12063e837b2cSAlex Williamson     }
12073e837b2cSAlex Williamson 
120804b16653SAlex Williamson     return offset;
120904b16653SAlex Williamson }
121004b16653SAlex Williamson 
1211652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
121204b16653SAlex Williamson {
1213d17b5288SAlex Williamson     RAMBlock *block;
1214d17b5288SAlex Williamson     ram_addr_t last = 0;
1215d17b5288SAlex Williamson 
1216a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next)
1217d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
1218d17b5288SAlex Williamson 
1219d17b5288SAlex Williamson     return last;
1220d17b5288SAlex Williamson }
1221d17b5288SAlex Williamson 
1222ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1223ddb97f1dSJason Baron {
1224ddb97f1dSJason Baron     int ret;
1225ddb97f1dSJason Baron 
1226ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
12272ff3de68SMarkus Armbruster     if (!qemu_opt_get_bool(qemu_get_machine_opts(),
12282ff3de68SMarkus Armbruster                            "dump-guest-core", true)) {
1229ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1230ddb97f1dSJason Baron         if (ret) {
1231ddb97f1dSJason Baron             perror("qemu_madvise");
1232ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1233ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1234ddb97f1dSJason Baron         }
1235ddb97f1dSJason Baron     }
1236ddb97f1dSJason Baron }
1237ddb97f1dSJason Baron 
123820cfe881SHu Tao static RAMBlock *find_ram_block(ram_addr_t addr)
123984b89d78SCam Macdonell {
124020cfe881SHu Tao     RAMBlock *block;
124184b89d78SCam Macdonell 
1242a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1243c5705a77SAvi Kivity         if (block->offset == addr) {
124420cfe881SHu Tao             return block;
1245c5705a77SAvi Kivity         }
1246c5705a77SAvi Kivity     }
124720cfe881SHu Tao 
124820cfe881SHu Tao     return NULL;
124920cfe881SHu Tao }
125020cfe881SHu Tao 
125120cfe881SHu Tao void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
125220cfe881SHu Tao {
125320cfe881SHu Tao     RAMBlock *new_block = find_ram_block(addr);
125420cfe881SHu Tao     RAMBlock *block;
125520cfe881SHu Tao 
1256c5705a77SAvi Kivity     assert(new_block);
1257c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
125884b89d78SCam Macdonell 
125909e5ab63SAnthony Liguori     if (dev) {
126009e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
126184b89d78SCam Macdonell         if (id) {
126284b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
12637267c094SAnthony Liguori             g_free(id);
126484b89d78SCam Macdonell         }
126584b89d78SCam Macdonell     }
126684b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
126784b89d78SCam Macdonell 
1268b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1269b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1270a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1271c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
127284b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
127384b89d78SCam Macdonell                     new_block->idstr);
127484b89d78SCam Macdonell             abort();
127584b89d78SCam Macdonell         }
127684b89d78SCam Macdonell     }
1277b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1278c5705a77SAvi Kivity }
1279c5705a77SAvi Kivity 
128020cfe881SHu Tao void qemu_ram_unset_idstr(ram_addr_t addr)
128120cfe881SHu Tao {
128220cfe881SHu Tao     RAMBlock *block = find_ram_block(addr);
128320cfe881SHu Tao 
128420cfe881SHu Tao     if (block) {
128520cfe881SHu Tao         memset(block->idstr, 0, sizeof(block->idstr));
128620cfe881SHu Tao     }
128720cfe881SHu Tao }
128820cfe881SHu Tao 
12898490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
12908490fc78SLuiz Capitulino {
12912ff3de68SMarkus Armbruster     if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
12928490fc78SLuiz Capitulino         /* disabled by the user */
12938490fc78SLuiz Capitulino         return 0;
12948490fc78SLuiz Capitulino     }
12958490fc78SLuiz Capitulino 
12968490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
12978490fc78SLuiz Capitulino }
12988490fc78SLuiz Capitulino 
1299ef701d7bSHu Tao static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1300c5705a77SAvi Kivity {
1301e1c57ab8SPaolo Bonzini     RAMBlock *block;
13022152f5caSJuan Quintela     ram_addr_t old_ram_size, new_ram_size;
13032152f5caSJuan Quintela 
13042152f5caSJuan Quintela     old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1305c5705a77SAvi Kivity 
1306b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1307b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1308e1c57ab8SPaolo Bonzini     new_block->offset = find_ram_offset(new_block->length);
1309e1c57ab8SPaolo Bonzini 
13100628c182SMarkus Armbruster     if (!new_block->host) {
1311e1c57ab8SPaolo Bonzini         if (xen_enabled()) {
1312e1c57ab8SPaolo Bonzini             xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1313e1c57ab8SPaolo Bonzini         } else {
1314a2b257d6SIgor Mammedov             new_block->host = phys_mem_alloc(new_block->length,
1315a2b257d6SIgor Mammedov                                              &new_block->mr->align);
131639228250SMarkus Armbruster             if (!new_block->host) {
1317ef701d7bSHu Tao                 error_setg_errno(errp, errno,
1318ef701d7bSHu Tao                                  "cannot set up guest memory '%s'",
1319ef701d7bSHu Tao                                  memory_region_name(new_block->mr));
1320ef701d7bSHu Tao                 qemu_mutex_unlock_ramlist();
1321ef701d7bSHu Tao                 return -1;
132239228250SMarkus Armbruster             }
1323e1c57ab8SPaolo Bonzini             memory_try_enable_merging(new_block->host, new_block->length);
1324c902760fSMarcelo Tosatti         }
13256977dfe6SYoshiaki Tamura     }
132694a6b54fSpbrook 
1327abb26d63SPaolo Bonzini     /* Keep the list sorted from biggest to smallest block.  */
1328abb26d63SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1329abb26d63SPaolo Bonzini         if (block->length < new_block->length) {
1330abb26d63SPaolo Bonzini             break;
1331abb26d63SPaolo Bonzini         }
1332abb26d63SPaolo Bonzini     }
1333abb26d63SPaolo Bonzini     if (block) {
1334abb26d63SPaolo Bonzini         QTAILQ_INSERT_BEFORE(block, new_block, next);
1335abb26d63SPaolo Bonzini     } else {
1336abb26d63SPaolo Bonzini         QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1337abb26d63SPaolo Bonzini     }
13380d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
133994a6b54fSpbrook 
1340f798b07fSUmesh Deshpande     ram_list.version++;
1341b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1342f798b07fSUmesh Deshpande 
13432152f5caSJuan Quintela     new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
13442152f5caSJuan Quintela 
13452152f5caSJuan Quintela     if (new_ram_size > old_ram_size) {
13461ab4c8ceSJuan Quintela         int i;
13471ab4c8ceSJuan Quintela         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
13481ab4c8ceSJuan Quintela             ram_list.dirty_memory[i] =
13491ab4c8ceSJuan Quintela                 bitmap_zero_extend(ram_list.dirty_memory[i],
13501ab4c8ceSJuan Quintela                                    old_ram_size, new_ram_size);
13511ab4c8ceSJuan Quintela        }
13522152f5caSJuan Quintela     }
1353e1c57ab8SPaolo Bonzini     cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
135494a6b54fSpbrook 
1355e1c57ab8SPaolo Bonzini     qemu_ram_setup_dump(new_block->host, new_block->length);
1356e1c57ab8SPaolo Bonzini     qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1357e1c57ab8SPaolo Bonzini     qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
1358ddb97f1dSJason Baron 
1359e1c57ab8SPaolo Bonzini     if (kvm_enabled()) {
1360e1c57ab8SPaolo Bonzini         kvm_setup_guest_memory(new_block->host, new_block->length);
1361e1c57ab8SPaolo Bonzini     }
13626f0437e8SJan Kiszka 
136394a6b54fSpbrook     return new_block->offset;
136494a6b54fSpbrook }
1365e9a1ab19Sbellard 
13660b183fc8SPaolo Bonzini #ifdef __linux__
1367e1c57ab8SPaolo Bonzini ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1368dbcb8981SPaolo Bonzini                                     bool share, const char *mem_path,
13697f56e740SPaolo Bonzini                                     Error **errp)
1370e1c57ab8SPaolo Bonzini {
1371e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1372ef701d7bSHu Tao     ram_addr_t addr;
1373ef701d7bSHu Tao     Error *local_err = NULL;
1374e1c57ab8SPaolo Bonzini 
1375e1c57ab8SPaolo Bonzini     if (xen_enabled()) {
13767f56e740SPaolo Bonzini         error_setg(errp, "-mem-path not supported with Xen");
13777f56e740SPaolo Bonzini         return -1;
1378e1c57ab8SPaolo Bonzini     }
1379e1c57ab8SPaolo Bonzini 
1380e1c57ab8SPaolo Bonzini     if (phys_mem_alloc != qemu_anon_ram_alloc) {
1381e1c57ab8SPaolo Bonzini         /*
1382e1c57ab8SPaolo Bonzini          * file_ram_alloc() needs to allocate just like
1383e1c57ab8SPaolo Bonzini          * phys_mem_alloc, but we haven't bothered to provide
1384e1c57ab8SPaolo Bonzini          * a hook there.
1385e1c57ab8SPaolo Bonzini          */
13867f56e740SPaolo Bonzini         error_setg(errp,
13877f56e740SPaolo Bonzini                    "-mem-path not supported with this accelerator");
13887f56e740SPaolo Bonzini         return -1;
1389e1c57ab8SPaolo Bonzini     }
1390e1c57ab8SPaolo Bonzini 
1391e1c57ab8SPaolo Bonzini     size = TARGET_PAGE_ALIGN(size);
1392e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1393e1c57ab8SPaolo Bonzini     new_block->mr = mr;
1394e1c57ab8SPaolo Bonzini     new_block->length = size;
1395dbcb8981SPaolo Bonzini     new_block->flags = share ? RAM_SHARED : 0;
13967f56e740SPaolo Bonzini     new_block->host = file_ram_alloc(new_block, size,
13977f56e740SPaolo Bonzini                                      mem_path, errp);
13987f56e740SPaolo Bonzini     if (!new_block->host) {
13997f56e740SPaolo Bonzini         g_free(new_block);
14007f56e740SPaolo Bonzini         return -1;
14017f56e740SPaolo Bonzini     }
14027f56e740SPaolo Bonzini 
1403ef701d7bSHu Tao     addr = ram_block_add(new_block, &local_err);
1404ef701d7bSHu Tao     if (local_err) {
1405ef701d7bSHu Tao         g_free(new_block);
1406ef701d7bSHu Tao         error_propagate(errp, local_err);
1407ef701d7bSHu Tao         return -1;
1408ef701d7bSHu Tao     }
1409ef701d7bSHu Tao     return addr;
1410e1c57ab8SPaolo Bonzini }
14110b183fc8SPaolo Bonzini #endif
1412e1c57ab8SPaolo Bonzini 
1413e1c57ab8SPaolo Bonzini ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1414ef701d7bSHu Tao                                    MemoryRegion *mr, Error **errp)
1415e1c57ab8SPaolo Bonzini {
1416e1c57ab8SPaolo Bonzini     RAMBlock *new_block;
1417ef701d7bSHu Tao     ram_addr_t addr;
1418ef701d7bSHu Tao     Error *local_err = NULL;
1419e1c57ab8SPaolo Bonzini 
1420e1c57ab8SPaolo Bonzini     size = TARGET_PAGE_ALIGN(size);
1421e1c57ab8SPaolo Bonzini     new_block = g_malloc0(sizeof(*new_block));
1422e1c57ab8SPaolo Bonzini     new_block->mr = mr;
1423e1c57ab8SPaolo Bonzini     new_block->length = size;
1424e1c57ab8SPaolo Bonzini     new_block->fd = -1;
1425e1c57ab8SPaolo Bonzini     new_block->host = host;
1426e1c57ab8SPaolo Bonzini     if (host) {
14277bd4f430SPaolo Bonzini         new_block->flags |= RAM_PREALLOC;
1428e1c57ab8SPaolo Bonzini     }
1429ef701d7bSHu Tao     addr = ram_block_add(new_block, &local_err);
1430ef701d7bSHu Tao     if (local_err) {
1431ef701d7bSHu Tao         g_free(new_block);
1432ef701d7bSHu Tao         error_propagate(errp, local_err);
1433ef701d7bSHu Tao         return -1;
1434ef701d7bSHu Tao     }
1435ef701d7bSHu Tao     return addr;
1436e1c57ab8SPaolo Bonzini }
1437e1c57ab8SPaolo Bonzini 
1438ef701d7bSHu Tao ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
14396977dfe6SYoshiaki Tamura {
1440ef701d7bSHu Tao     return qemu_ram_alloc_from_ptr(size, NULL, mr, errp);
14416977dfe6SYoshiaki Tamura }
14426977dfe6SYoshiaki Tamura 
14431f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
14441f2e98b6SAlex Williamson {
14451f2e98b6SAlex Williamson     RAMBlock *block;
14461f2e98b6SAlex Williamson 
1447b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1448b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1449a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
14501f2e98b6SAlex Williamson         if (addr == block->offset) {
1451a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
14520d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1453f798b07fSUmesh Deshpande             ram_list.version++;
14547267c094SAnthony Liguori             g_free(block);
1455b2a8658eSUmesh Deshpande             break;
14561f2e98b6SAlex Williamson         }
14571f2e98b6SAlex Williamson     }
1458b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
14591f2e98b6SAlex Williamson }
14601f2e98b6SAlex Williamson 
1461c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
1462e9a1ab19Sbellard {
146304b16653SAlex Williamson     RAMBlock *block;
146404b16653SAlex Williamson 
1465b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1466b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1467a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
146804b16653SAlex Williamson         if (addr == block->offset) {
1469a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
14700d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1471f798b07fSUmesh Deshpande             ram_list.version++;
14727bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1473cd19cfa2SHuang Ying                 ;
1474dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1475dfeaf2abSMarkus Armbruster                 xen_invalidate_map_cache_entry(block->host);
1476089f3f76SStefan Weil #ifndef _WIN32
14773435f395SMarkus Armbruster             } else if (block->fd >= 0) {
147804b16653SAlex Williamson                 munmap(block->host, block->length);
147904b16653SAlex Williamson                 close(block->fd);
1480089f3f76SStefan Weil #endif
148104b16653SAlex Williamson             } else {
1482e7a09b92SPaolo Bonzini                 qemu_anon_ram_free(block->host, block->length);
148304b16653SAlex Williamson             }
14847267c094SAnthony Liguori             g_free(block);
1485b2a8658eSUmesh Deshpande             break;
148604b16653SAlex Williamson         }
148704b16653SAlex Williamson     }
1488b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
148904b16653SAlex Williamson 
1490e9a1ab19Sbellard }
1491e9a1ab19Sbellard 
1492cd19cfa2SHuang Ying #ifndef _WIN32
1493cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1494cd19cfa2SHuang Ying {
1495cd19cfa2SHuang Ying     RAMBlock *block;
1496cd19cfa2SHuang Ying     ram_addr_t offset;
1497cd19cfa2SHuang Ying     int flags;
1498cd19cfa2SHuang Ying     void *area, *vaddr;
1499cd19cfa2SHuang Ying 
1500a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1501cd19cfa2SHuang Ying         offset = addr - block->offset;
1502cd19cfa2SHuang Ying         if (offset < block->length) {
15031240be24SMichael S. Tsirkin             vaddr = ramblock_ptr(block, offset);
15047bd4f430SPaolo Bonzini             if (block->flags & RAM_PREALLOC) {
1505cd19cfa2SHuang Ying                 ;
1506dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1507dfeaf2abSMarkus Armbruster                 abort();
1508cd19cfa2SHuang Ying             } else {
1509cd19cfa2SHuang Ying                 flags = MAP_FIXED;
1510cd19cfa2SHuang Ying                 munmap(vaddr, length);
15113435f395SMarkus Armbruster                 if (block->fd >= 0) {
1512dbcb8981SPaolo Bonzini                     flags |= (block->flags & RAM_SHARED ?
1513dbcb8981SPaolo Bonzini                               MAP_SHARED : MAP_PRIVATE);
1514cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1515cd19cfa2SHuang Ying                                 flags, block->fd, offset);
1516cd19cfa2SHuang Ying                 } else {
15172eb9fbaaSMarkus Armbruster                     /*
15182eb9fbaaSMarkus Armbruster                      * Remap needs to match alloc.  Accelerators that
15192eb9fbaaSMarkus Armbruster                      * set phys_mem_alloc never remap.  If they did,
15202eb9fbaaSMarkus Armbruster                      * we'd need a remap hook here.
15212eb9fbaaSMarkus Armbruster                      */
15222eb9fbaaSMarkus Armbruster                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
15232eb9fbaaSMarkus Armbruster 
1524cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1525cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1526cd19cfa2SHuang Ying                                 flags, -1, 0);
1527cd19cfa2SHuang Ying                 }
1528cd19cfa2SHuang Ying                 if (area != vaddr) {
1529f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1530f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1531cd19cfa2SHuang Ying                             length, addr);
1532cd19cfa2SHuang Ying                     exit(1);
1533cd19cfa2SHuang Ying                 }
15348490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1535ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1536cd19cfa2SHuang Ying             }
1537cd19cfa2SHuang Ying             return;
1538cd19cfa2SHuang Ying         }
1539cd19cfa2SHuang Ying     }
1540cd19cfa2SHuang Ying }
1541cd19cfa2SHuang Ying #endif /* !_WIN32 */
1542cd19cfa2SHuang Ying 
1543a35ba7beSPaolo Bonzini int qemu_get_ram_fd(ram_addr_t addr)
1544a35ba7beSPaolo Bonzini {
1545a35ba7beSPaolo Bonzini     RAMBlock *block = qemu_get_ram_block(addr);
1546a35ba7beSPaolo Bonzini 
1547a35ba7beSPaolo Bonzini     return block->fd;
1548a35ba7beSPaolo Bonzini }
1549a35ba7beSPaolo Bonzini 
15503fd74b84SDamjan Marion void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
15513fd74b84SDamjan Marion {
15523fd74b84SDamjan Marion     RAMBlock *block = qemu_get_ram_block(addr);
15533fd74b84SDamjan Marion 
15541240be24SMichael S. Tsirkin     return ramblock_ptr(block, 0);
15553fd74b84SDamjan Marion }
15563fd74b84SDamjan Marion 
15571b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.
15581b5ec234SPaolo Bonzini    With the exception of the softmmu code in this file, this should
15591b5ec234SPaolo Bonzini    only be used for local memory (e.g. video ram) that the device owns,
15601b5ec234SPaolo Bonzini    and knows it isn't going to access beyond the end of the block.
15611b5ec234SPaolo Bonzini 
15621b5ec234SPaolo Bonzini    It should not be used for general purpose DMA.
15631b5ec234SPaolo Bonzini    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
15641b5ec234SPaolo Bonzini  */
15651b5ec234SPaolo Bonzini void *qemu_get_ram_ptr(ram_addr_t addr)
15661b5ec234SPaolo Bonzini {
15671b5ec234SPaolo Bonzini     RAMBlock *block = qemu_get_ram_block(addr);
15681b5ec234SPaolo Bonzini 
1569868bb33fSJan Kiszka     if (xen_enabled()) {
1570432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1571432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1572712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1573432d268cSJun Nakajima          */
1574432d268cSJun Nakajima         if (block->offset == 0) {
1575e41d7c69SJan Kiszka             return xen_map_cache(addr, 0, 0);
1576432d268cSJun Nakajima         } else if (block->host == NULL) {
1577e41d7c69SJan Kiszka             block->host =
1578e41d7c69SJan Kiszka                 xen_map_cache(block->offset, block->length, 1);
1579432d268cSJun Nakajima         }
1580432d268cSJun Nakajima     }
15811240be24SMichael S. Tsirkin     return ramblock_ptr(block, addr - block->offset);
158294a6b54fSpbrook }
1583f471a17eSAlex Williamson 
158438bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
158538bee5dcSStefano Stabellini  * but takes a size argument */
1586cb85f7abSPeter Maydell static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
158738bee5dcSStefano Stabellini {
15888ab934f9SStefano Stabellini     if (*size == 0) {
15898ab934f9SStefano Stabellini         return NULL;
15908ab934f9SStefano Stabellini     }
1591868bb33fSJan Kiszka     if (xen_enabled()) {
1592e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
1593868bb33fSJan Kiszka     } else {
159438bee5dcSStefano Stabellini         RAMBlock *block;
159538bee5dcSStefano Stabellini 
1596a3161038SPaolo Bonzini         QTAILQ_FOREACH(block, &ram_list.blocks, next) {
159738bee5dcSStefano Stabellini             if (addr - block->offset < block->length) {
159838bee5dcSStefano Stabellini                 if (addr - block->offset + *size > block->length)
159938bee5dcSStefano Stabellini                     *size = block->length - addr + block->offset;
16001240be24SMichael S. Tsirkin                 return ramblock_ptr(block, addr - block->offset);
160138bee5dcSStefano Stabellini             }
160238bee5dcSStefano Stabellini         }
160338bee5dcSStefano Stabellini 
160438bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
160538bee5dcSStefano Stabellini         abort();
160638bee5dcSStefano Stabellini     }
160738bee5dcSStefano Stabellini }
160838bee5dcSStefano Stabellini 
16097443b437SPaolo Bonzini /* Some of the softmmu routines need to translate from a host pointer
16107443b437SPaolo Bonzini    (typically a TLB entry) back to a ram offset.  */
16111b5ec234SPaolo Bonzini MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
16125579c7f3Spbrook {
161394a6b54fSpbrook     RAMBlock *block;
161494a6b54fSpbrook     uint8_t *host = ptr;
161594a6b54fSpbrook 
1616868bb33fSJan Kiszka     if (xen_enabled()) {
1617e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
16181b5ec234SPaolo Bonzini         return qemu_get_ram_block(*ram_addr)->mr;
1619712c2b41SStefano Stabellini     }
1620712c2b41SStefano Stabellini 
162123887b79SPaolo Bonzini     block = ram_list.mru_block;
162223887b79SPaolo Bonzini     if (block && block->host && host - block->host < block->length) {
162323887b79SPaolo Bonzini         goto found;
162423887b79SPaolo Bonzini     }
162523887b79SPaolo Bonzini 
1626a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1627432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1628432d268cSJun Nakajima         if (block->host == NULL) {
1629432d268cSJun Nakajima             continue;
1630432d268cSJun Nakajima         }
1631f471a17eSAlex Williamson         if (host - block->host < block->length) {
163223887b79SPaolo Bonzini             goto found;
163394a6b54fSpbrook         }
1634f471a17eSAlex Williamson     }
1635432d268cSJun Nakajima 
16361b5ec234SPaolo Bonzini     return NULL;
163723887b79SPaolo Bonzini 
163823887b79SPaolo Bonzini found:
163923887b79SPaolo Bonzini     *ram_addr = block->offset + (host - block->host);
16401b5ec234SPaolo Bonzini     return block->mr;
1641e890261fSMarcelo Tosatti }
1642f471a17eSAlex Williamson 
1643a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
16440e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
16451ccde1cbSbellard {
164652159192SJuan Quintela     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
16470e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
16483a7d929eSbellard     }
16490e0df1e2SAvi Kivity     switch (size) {
16500e0df1e2SAvi Kivity     case 1:
16515579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
16520e0df1e2SAvi Kivity         break;
16530e0df1e2SAvi Kivity     case 2:
16545579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
16550e0df1e2SAvi Kivity         break;
16560e0df1e2SAvi Kivity     case 4:
16575579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
16580e0df1e2SAvi Kivity         break;
16590e0df1e2SAvi Kivity     default:
16600e0df1e2SAvi Kivity         abort();
16610e0df1e2SAvi Kivity     }
16626886867eSPaolo Bonzini     cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
1663f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1664f23db169Sbellard        flushed */
1665a2cd8c85SJuan Quintela     if (!cpu_physical_memory_is_clean(ram_addr)) {
16664917cf44SAndreas Färber         CPUArchState *env = current_cpu->env_ptr;
166793afeadeSAndreas Färber         tlb_set_dirty(env, current_cpu->mem_io_vaddr);
16684917cf44SAndreas Färber     }
16691ccde1cbSbellard }
16701ccde1cbSbellard 
1671b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1672b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
1673b018ddf6SPaolo Bonzini {
1674b018ddf6SPaolo Bonzini     return is_write;
1675b018ddf6SPaolo Bonzini }
1676b018ddf6SPaolo Bonzini 
16770e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
16780e0df1e2SAvi Kivity     .write = notdirty_mem_write,
1679b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
16800e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
16811ccde1cbSbellard };
16821ccde1cbSbellard 
16830f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
168405068c0dSPeter Maydell static void check_watchpoint(int offset, int len, int flags)
16850f459d16Spbrook {
168693afeadeSAndreas Färber     CPUState *cpu = current_cpu;
168793afeadeSAndreas Färber     CPUArchState *env = cpu->env_ptr;
168806d55cc1Saliguori     target_ulong pc, cs_base;
16890f459d16Spbrook     target_ulong vaddr;
1690a1d1bb31Saliguori     CPUWatchpoint *wp;
169106d55cc1Saliguori     int cpu_flags;
16920f459d16Spbrook 
1693ff4700b0SAndreas Färber     if (cpu->watchpoint_hit) {
169406d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
169506d55cc1Saliguori          * the debug interrupt so that is will trigger after the
169606d55cc1Saliguori          * current instruction. */
169793afeadeSAndreas Färber         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
169806d55cc1Saliguori         return;
169906d55cc1Saliguori     }
170093afeadeSAndreas Färber     vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1701ff4700b0SAndreas Färber     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
170205068c0dSPeter Maydell         if (cpu_watchpoint_address_matches(wp, vaddr, len)
170305068c0dSPeter Maydell             && (wp->flags & flags)) {
170408225676SPeter Maydell             if (flags == BP_MEM_READ) {
170508225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_READ;
170608225676SPeter Maydell             } else {
170708225676SPeter Maydell                 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
170808225676SPeter Maydell             }
170908225676SPeter Maydell             wp->hitaddr = vaddr;
1710ff4700b0SAndreas Färber             if (!cpu->watchpoint_hit) {
1711ff4700b0SAndreas Färber                 cpu->watchpoint_hit = wp;
1712239c51a5SAndreas Färber                 tb_check_watchpoint(cpu);
171306d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
171427103424SAndreas Färber                     cpu->exception_index = EXCP_DEBUG;
17155638d180SAndreas Färber                     cpu_loop_exit(cpu);
171606d55cc1Saliguori                 } else {
171706d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1718648f034cSAndreas Färber                     tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
17190ea8cb88SAndreas Färber                     cpu_resume_from_signal(cpu, NULL);
17200f459d16Spbrook                 }
1721488d6577SMax Filippov             }
17226e140f28Saliguori         } else {
17236e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
17246e140f28Saliguori         }
17250f459d16Spbrook     }
17260f459d16Spbrook }
17270f459d16Spbrook 
17286658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
17296658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
17306658ffb8Spbrook    phys routines.  */
1731a8170e5eSAvi Kivity static uint64_t watch_mem_read(void *opaque, hwaddr addr,
17321ec9b909SAvi Kivity                                unsigned size)
17336658ffb8Spbrook {
173405068c0dSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
17351ec9b909SAvi Kivity     switch (size) {
17362c17449bSEdgar E. Iglesias     case 1: return ldub_phys(&address_space_memory, addr);
173741701aa4SEdgar E. Iglesias     case 2: return lduw_phys(&address_space_memory, addr);
1738fdfba1a2SEdgar E. Iglesias     case 4: return ldl_phys(&address_space_memory, addr);
17391ec9b909SAvi Kivity     default: abort();
17401ec9b909SAvi Kivity     }
17416658ffb8Spbrook }
17426658ffb8Spbrook 
1743a8170e5eSAvi Kivity static void watch_mem_write(void *opaque, hwaddr addr,
17441ec9b909SAvi Kivity                             uint64_t val, unsigned size)
17456658ffb8Spbrook {
174605068c0dSPeter Maydell     check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
17471ec9b909SAvi Kivity     switch (size) {
174867364150SMax Filippov     case 1:
1749db3be60dSEdgar E. Iglesias         stb_phys(&address_space_memory, addr, val);
175067364150SMax Filippov         break;
175167364150SMax Filippov     case 2:
17525ce5944dSEdgar E. Iglesias         stw_phys(&address_space_memory, addr, val);
175367364150SMax Filippov         break;
175467364150SMax Filippov     case 4:
1755ab1da857SEdgar E. Iglesias         stl_phys(&address_space_memory, addr, val);
175667364150SMax Filippov         break;
17571ec9b909SAvi Kivity     default: abort();
17581ec9b909SAvi Kivity     }
17596658ffb8Spbrook }
17606658ffb8Spbrook 
17611ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
17621ec9b909SAvi Kivity     .read = watch_mem_read,
17631ec9b909SAvi Kivity     .write = watch_mem_write,
17641ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
17656658ffb8Spbrook };
17666658ffb8Spbrook 
1767a8170e5eSAvi Kivity static uint64_t subpage_read(void *opaque, hwaddr addr,
176870c68e44SAvi Kivity                              unsigned len)
1769db7b5426Sblueswir1 {
1770acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1771acc9d80bSJan Kiszka     uint8_t buf[4];
1772791af8c8SPaolo Bonzini 
1773db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1774016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1775acc9d80bSJan Kiszka            subpage, len, addr);
1776db7b5426Sblueswir1 #endif
1777acc9d80bSJan Kiszka     address_space_read(subpage->as, addr + subpage->base, buf, len);
1778acc9d80bSJan Kiszka     switch (len) {
1779acc9d80bSJan Kiszka     case 1:
1780acc9d80bSJan Kiszka         return ldub_p(buf);
1781acc9d80bSJan Kiszka     case 2:
1782acc9d80bSJan Kiszka         return lduw_p(buf);
1783acc9d80bSJan Kiszka     case 4:
1784acc9d80bSJan Kiszka         return ldl_p(buf);
1785acc9d80bSJan Kiszka     default:
1786acc9d80bSJan Kiszka         abort();
1787acc9d80bSJan Kiszka     }
1788db7b5426Sblueswir1 }
1789db7b5426Sblueswir1 
1790a8170e5eSAvi Kivity static void subpage_write(void *opaque, hwaddr addr,
179170c68e44SAvi Kivity                           uint64_t value, unsigned len)
1792db7b5426Sblueswir1 {
1793acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1794acc9d80bSJan Kiszka     uint8_t buf[4];
1795acc9d80bSJan Kiszka 
1796db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1797016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1798acc9d80bSJan Kiszka            " value %"PRIx64"\n",
1799acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
1800db7b5426Sblueswir1 #endif
1801acc9d80bSJan Kiszka     switch (len) {
1802acc9d80bSJan Kiszka     case 1:
1803acc9d80bSJan Kiszka         stb_p(buf, value);
1804acc9d80bSJan Kiszka         break;
1805acc9d80bSJan Kiszka     case 2:
1806acc9d80bSJan Kiszka         stw_p(buf, value);
1807acc9d80bSJan Kiszka         break;
1808acc9d80bSJan Kiszka     case 4:
1809acc9d80bSJan Kiszka         stl_p(buf, value);
1810acc9d80bSJan Kiszka         break;
1811acc9d80bSJan Kiszka     default:
1812acc9d80bSJan Kiszka         abort();
1813acc9d80bSJan Kiszka     }
1814acc9d80bSJan Kiszka     address_space_write(subpage->as, addr + subpage->base, buf, len);
1815db7b5426Sblueswir1 }
1816db7b5426Sblueswir1 
1817c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
1818016e9d62SAmos Kong                             unsigned len, bool is_write)
1819c353e4ccSPaolo Bonzini {
1820acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1821c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
1822016e9d62SAmos Kong     printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1823acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
1824c353e4ccSPaolo Bonzini #endif
1825c353e4ccSPaolo Bonzini 
1826acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
1827016e9d62SAmos Kong                                       len, is_write);
1828c353e4ccSPaolo Bonzini }
1829c353e4ccSPaolo Bonzini 
183070c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
183170c68e44SAvi Kivity     .read = subpage_read,
183270c68e44SAvi Kivity     .write = subpage_write,
1833c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
183470c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
1835db7b5426Sblueswir1 };
1836db7b5426Sblueswir1 
1837c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
18385312bd8bSAvi Kivity                              uint16_t section)
1839db7b5426Sblueswir1 {
1840db7b5426Sblueswir1     int idx, eidx;
1841db7b5426Sblueswir1 
1842db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1843db7b5426Sblueswir1         return -1;
1844db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
1845db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
1846db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1847016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1848016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
1849db7b5426Sblueswir1 #endif
1850db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
18515312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
1852db7b5426Sblueswir1     }
1853db7b5426Sblueswir1 
1854db7b5426Sblueswir1     return 0;
1855db7b5426Sblueswir1 }
1856db7b5426Sblueswir1 
1857acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1858db7b5426Sblueswir1 {
1859c227f099SAnthony Liguori     subpage_t *mmio;
1860db7b5426Sblueswir1 
18617267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
18621eec614bSaliguori 
1863acc9d80bSJan Kiszka     mmio->as = as;
1864db7b5426Sblueswir1     mmio->base = base;
18652c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1866b4fefef9SPeter Crosthwaite                           NULL, TARGET_PAGE_SIZE);
1867b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
1868db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1869016e9d62SAmos Kong     printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1870016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
1871db7b5426Sblueswir1 #endif
1872b41aac4fSLiu Ping Fan     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1873db7b5426Sblueswir1 
1874db7b5426Sblueswir1     return mmio;
1875db7b5426Sblueswir1 }
1876db7b5426Sblueswir1 
1877a656e22fSPeter Crosthwaite static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1878a656e22fSPeter Crosthwaite                               MemoryRegion *mr)
18795312bd8bSAvi Kivity {
1880a656e22fSPeter Crosthwaite     assert(as);
18815312bd8bSAvi Kivity     MemoryRegionSection section = {
1882a656e22fSPeter Crosthwaite         .address_space = as,
18835312bd8bSAvi Kivity         .mr = mr,
18845312bd8bSAvi Kivity         .offset_within_address_space = 0,
18855312bd8bSAvi Kivity         .offset_within_region = 0,
1886052e87b0SPaolo Bonzini         .size = int128_2_64(),
18875312bd8bSAvi Kivity     };
18885312bd8bSAvi Kivity 
188953cb28cbSMarcel Apfelbaum     return phys_section_add(map, &section);
18905312bd8bSAvi Kivity }
18915312bd8bSAvi Kivity 
189277717094SEdgar E. Iglesias MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
1893aa102231SAvi Kivity {
189477717094SEdgar E. Iglesias     return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
1895aa102231SAvi Kivity }
1896aa102231SAvi Kivity 
1897e9179ce1SAvi Kivity static void io_mem_init(void)
1898e9179ce1SAvi Kivity {
18991f6245e5SPaolo Bonzini     memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
19002c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
19011f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
19022c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
19031f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
19042c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
19051f6245e5SPaolo Bonzini                           NULL, UINT64_MAX);
1906e9179ce1SAvi Kivity }
1907e9179ce1SAvi Kivity 
1908ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
1909ac1970fbSAvi Kivity {
191089ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
191153cb28cbSMarcel Apfelbaum     AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
191253cb28cbSMarcel Apfelbaum     uint16_t n;
191353cb28cbSMarcel Apfelbaum 
1914a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_unassigned);
191553cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_UNASSIGNED);
1916a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_notdirty);
191753cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_NOTDIRTY);
1918a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_rom);
191953cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_ROM);
1920a656e22fSPeter Crosthwaite     n = dummy_section(&d->map, as, &io_mem_watch);
192153cb28cbSMarcel Apfelbaum     assert(n == PHYS_SECTION_WATCH);
192200752703SPaolo Bonzini 
19239736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
192400752703SPaolo Bonzini     d->as = as;
192500752703SPaolo Bonzini     as->next_dispatch = d;
192600752703SPaolo Bonzini }
192700752703SPaolo Bonzini 
192800752703SPaolo Bonzini static void mem_commit(MemoryListener *listener)
192900752703SPaolo Bonzini {
193000752703SPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
19310475d94fSPaolo Bonzini     AddressSpaceDispatch *cur = as->dispatch;
19320475d94fSPaolo Bonzini     AddressSpaceDispatch *next = as->next_dispatch;
1933ac1970fbSAvi Kivity 
193453cb28cbSMarcel Apfelbaum     phys_page_compact_all(next, next->map.nodes_nb);
1935b35ba30fSMichael S. Tsirkin 
19360475d94fSPaolo Bonzini     as->dispatch = next;
193753cb28cbSMarcel Apfelbaum 
193853cb28cbSMarcel Apfelbaum     if (cur) {
193953cb28cbSMarcel Apfelbaum         phys_sections_free(&cur->map);
19400475d94fSPaolo Bonzini         g_free(cur);
1941ac1970fbSAvi Kivity     }
19429affd6fcSPaolo Bonzini }
19439affd6fcSPaolo Bonzini 
19441d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
194550c1e149SAvi Kivity {
1946182735efSAndreas Färber     CPUState *cpu;
1947117712c3SAvi Kivity 
1948117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
1949117712c3SAvi Kivity        reset the modified entries */
1950117712c3SAvi Kivity     /* XXX: slow ! */
1951bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
195233bde2e1SEdgar E. Iglesias         /* FIXME: Disentangle the cpu.h circular files deps so we can
195333bde2e1SEdgar E. Iglesias            directly get the right CPU from listener.  */
195433bde2e1SEdgar E. Iglesias         if (cpu->tcg_as_listener != listener) {
195533bde2e1SEdgar E. Iglesias             continue;
195633bde2e1SEdgar E. Iglesias         }
195700c8cb0aSAndreas Färber         tlb_flush(cpu, 1);
1958117712c3SAvi Kivity     }
195950c1e149SAvi Kivity }
196050c1e149SAvi Kivity 
196193632747SAvi Kivity static void core_log_global_start(MemoryListener *listener)
196293632747SAvi Kivity {
1963981fdf23SJuan Quintela     cpu_physical_memory_set_dirty_tracking(true);
196493632747SAvi Kivity }
196593632747SAvi Kivity 
196693632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener)
196793632747SAvi Kivity {
1968981fdf23SJuan Quintela     cpu_physical_memory_set_dirty_tracking(false);
196993632747SAvi Kivity }
197093632747SAvi Kivity 
197193632747SAvi Kivity static MemoryListener core_memory_listener = {
197293632747SAvi Kivity     .log_global_start = core_log_global_start,
197393632747SAvi Kivity     .log_global_stop = core_log_global_stop,
1974ac1970fbSAvi Kivity     .priority = 1,
197593632747SAvi Kivity };
197693632747SAvi Kivity 
1977ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
1978ac1970fbSAvi Kivity {
197900752703SPaolo Bonzini     as->dispatch = NULL;
198089ae337aSPaolo Bonzini     as->dispatch_listener = (MemoryListener) {
1981ac1970fbSAvi Kivity         .begin = mem_begin,
198200752703SPaolo Bonzini         .commit = mem_commit,
1983ac1970fbSAvi Kivity         .region_add = mem_add,
1984ac1970fbSAvi Kivity         .region_nop = mem_add,
1985ac1970fbSAvi Kivity         .priority = 0,
1986ac1970fbSAvi Kivity     };
198789ae337aSPaolo Bonzini     memory_listener_register(&as->dispatch_listener, as);
1988ac1970fbSAvi Kivity }
1989ac1970fbSAvi Kivity 
199083f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
199183f3c251SAvi Kivity {
199283f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
199383f3c251SAvi Kivity 
199489ae337aSPaolo Bonzini     memory_listener_unregister(&as->dispatch_listener);
199583f3c251SAvi Kivity     g_free(d);
199683f3c251SAvi Kivity     as->dispatch = NULL;
199783f3c251SAvi Kivity }
199883f3c251SAvi Kivity 
199962152b8aSAvi Kivity static void memory_map_init(void)
200062152b8aSAvi Kivity {
20017267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
200203f49957SPaolo Bonzini 
200357271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
20047dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
2005309cb471SAvi Kivity 
20067267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
20073bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
20083bb28b72SJan Kiszka                           65536);
20097dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
201093632747SAvi Kivity 
2011f6790af6SAvi Kivity     memory_listener_register(&core_memory_listener, &address_space_memory);
20122641689aSliguang }
201362152b8aSAvi Kivity 
201462152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
201562152b8aSAvi Kivity {
201662152b8aSAvi Kivity     return system_memory;
201762152b8aSAvi Kivity }
201862152b8aSAvi Kivity 
2019309cb471SAvi Kivity MemoryRegion *get_system_io(void)
2020309cb471SAvi Kivity {
2021309cb471SAvi Kivity     return system_io;
2022309cb471SAvi Kivity }
2023309cb471SAvi Kivity 
2024e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2025e2eef170Spbrook 
202613eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
202713eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
2028f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2029a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
203013eb76e0Sbellard {
203113eb76e0Sbellard     int l, flags;
203213eb76e0Sbellard     target_ulong page;
203353a5960aSpbrook     void * p;
203413eb76e0Sbellard 
203513eb76e0Sbellard     while (len > 0) {
203613eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
203713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
203813eb76e0Sbellard         if (l > len)
203913eb76e0Sbellard             l = len;
204013eb76e0Sbellard         flags = page_get_flags(page);
204113eb76e0Sbellard         if (!(flags & PAGE_VALID))
2042a68fe89cSPaul Brook             return -1;
204313eb76e0Sbellard         if (is_write) {
204413eb76e0Sbellard             if (!(flags & PAGE_WRITE))
2045a68fe89cSPaul Brook                 return -1;
2046579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
204772fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2048a68fe89cSPaul Brook                 return -1;
204972fb7daaSaurel32             memcpy(p, buf, l);
205072fb7daaSaurel32             unlock_user(p, addr, l);
205113eb76e0Sbellard         } else {
205213eb76e0Sbellard             if (!(flags & PAGE_READ))
2053a68fe89cSPaul Brook                 return -1;
2054579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
205572fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2056a68fe89cSPaul Brook                 return -1;
205772fb7daaSaurel32             memcpy(buf, p, l);
20585b257578Saurel32             unlock_user(p, addr, 0);
205913eb76e0Sbellard         }
206013eb76e0Sbellard         len -= l;
206113eb76e0Sbellard         buf += l;
206213eb76e0Sbellard         addr += l;
206313eb76e0Sbellard     }
2064a68fe89cSPaul Brook     return 0;
206513eb76e0Sbellard }
20668df1cd07Sbellard 
206713eb76e0Sbellard #else
206851d7a9ebSAnthony PERARD 
2069a8170e5eSAvi Kivity static void invalidate_and_set_dirty(hwaddr addr,
2070a8170e5eSAvi Kivity                                      hwaddr length)
207151d7a9ebSAnthony PERARD {
2072f874bf90SPeter Maydell     if (cpu_physical_memory_range_includes_clean(addr, length)) {
2073f874bf90SPeter Maydell         tb_invalidate_phys_range(addr, addr + length, 0);
20746886867eSPaolo Bonzini         cpu_physical_memory_set_dirty_range_nocode(addr, length);
207551d7a9ebSAnthony PERARD     }
2076e226939dSAnthony PERARD     xen_modified_memory(addr, length);
207751d7a9ebSAnthony PERARD }
207851d7a9ebSAnthony PERARD 
207923326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
208082f2563fSPaolo Bonzini {
2081e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
208223326164SRichard Henderson 
208323326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
208423326164SRichard Henderson        otherwise specified.  */
208523326164SRichard Henderson     if (access_size_max == 0) {
208623326164SRichard Henderson         access_size_max = 4;
208782f2563fSPaolo Bonzini     }
208823326164SRichard Henderson 
208923326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
209023326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
209123326164SRichard Henderson         unsigned align_size_max = addr & -addr;
209223326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
209323326164SRichard Henderson             access_size_max = align_size_max;
209423326164SRichard Henderson         }
209523326164SRichard Henderson     }
209623326164SRichard Henderson 
209723326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
209823326164SRichard Henderson     if (l > access_size_max) {
209923326164SRichard Henderson         l = access_size_max;
210023326164SRichard Henderson     }
2101098178f2SPaolo Bonzini     if (l & (l - 1)) {
2102098178f2SPaolo Bonzini         l = 1 << (qemu_fls(l) - 1);
2103098178f2SPaolo Bonzini     }
210423326164SRichard Henderson 
210523326164SRichard Henderson     return l;
210682f2563fSPaolo Bonzini }
210782f2563fSPaolo Bonzini 
2108fd8aaa76SPaolo Bonzini bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
2109ac1970fbSAvi Kivity                       int len, bool is_write)
211013eb76e0Sbellard {
2111149f54b5SPaolo Bonzini     hwaddr l;
211213eb76e0Sbellard     uint8_t *ptr;
2113791af8c8SPaolo Bonzini     uint64_t val;
2114149f54b5SPaolo Bonzini     hwaddr addr1;
21155c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2116fd8aaa76SPaolo Bonzini     bool error = false;
211713eb76e0Sbellard 
211813eb76e0Sbellard     while (len > 0) {
211913eb76e0Sbellard         l = len;
21205c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, is_write);
212113eb76e0Sbellard 
212213eb76e0Sbellard         if (is_write) {
21235c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
21245c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
21254917cf44SAndreas Färber                 /* XXX: could force current_cpu to NULL to avoid
21266a00d601Sbellard                    potential bugs */
212723326164SRichard Henderson                 switch (l) {
212823326164SRichard Henderson                 case 8:
212923326164SRichard Henderson                     /* 64 bit write access */
213023326164SRichard Henderson                     val = ldq_p(buf);
213123326164SRichard Henderson                     error |= io_mem_write(mr, addr1, val, 8);
213223326164SRichard Henderson                     break;
213323326164SRichard Henderson                 case 4:
21341c213d19Sbellard                     /* 32 bit write access */
2135c27004ecSbellard                     val = ldl_p(buf);
21365c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 4);
213723326164SRichard Henderson                     break;
213823326164SRichard Henderson                 case 2:
21391c213d19Sbellard                     /* 16 bit write access */
2140c27004ecSbellard                     val = lduw_p(buf);
21415c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 2);
214223326164SRichard Henderson                     break;
214323326164SRichard Henderson                 case 1:
21441c213d19Sbellard                     /* 8 bit write access */
2145c27004ecSbellard                     val = ldub_p(buf);
21465c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 1);
214723326164SRichard Henderson                     break;
214823326164SRichard Henderson                 default:
214923326164SRichard Henderson                     abort();
215013eb76e0Sbellard                 }
21512bbfa05dSPaolo Bonzini             } else {
21525c8a00ceSPaolo Bonzini                 addr1 += memory_region_get_ram_addr(mr);
215313eb76e0Sbellard                 /* RAM case */
21545579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
215513eb76e0Sbellard                 memcpy(ptr, buf, l);
215651d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
21573a7d929eSbellard             }
215813eb76e0Sbellard         } else {
21595c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
216013eb76e0Sbellard                 /* I/O case */
21615c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
216223326164SRichard Henderson                 switch (l) {
216323326164SRichard Henderson                 case 8:
216423326164SRichard Henderson                     /* 64 bit read access */
216523326164SRichard Henderson                     error |= io_mem_read(mr, addr1, &val, 8);
216623326164SRichard Henderson                     stq_p(buf, val);
216723326164SRichard Henderson                     break;
216823326164SRichard Henderson                 case 4:
216913eb76e0Sbellard                     /* 32 bit read access */
21705c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 4);
2171c27004ecSbellard                     stl_p(buf, val);
217223326164SRichard Henderson                     break;
217323326164SRichard Henderson                 case 2:
217413eb76e0Sbellard                     /* 16 bit read access */
21755c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 2);
2176c27004ecSbellard                     stw_p(buf, val);
217723326164SRichard Henderson                     break;
217823326164SRichard Henderson                 case 1:
21791c213d19Sbellard                     /* 8 bit read access */
21805c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 1);
2181c27004ecSbellard                     stb_p(buf, val);
218223326164SRichard Henderson                     break;
218323326164SRichard Henderson                 default:
218423326164SRichard Henderson                     abort();
218513eb76e0Sbellard                 }
218613eb76e0Sbellard             } else {
218713eb76e0Sbellard                 /* RAM case */
21885c8a00ceSPaolo Bonzini                 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2189f3705d53SAvi Kivity                 memcpy(buf, ptr, l);
219013eb76e0Sbellard             }
219113eb76e0Sbellard         }
219213eb76e0Sbellard         len -= l;
219313eb76e0Sbellard         buf += l;
219413eb76e0Sbellard         addr += l;
219513eb76e0Sbellard     }
2196fd8aaa76SPaolo Bonzini 
2197fd8aaa76SPaolo Bonzini     return error;
219813eb76e0Sbellard }
21998df1cd07Sbellard 
2200fd8aaa76SPaolo Bonzini bool address_space_write(AddressSpace *as, hwaddr addr,
2201ac1970fbSAvi Kivity                          const uint8_t *buf, int len)
2202ac1970fbSAvi Kivity {
2203fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2204ac1970fbSAvi Kivity }
2205ac1970fbSAvi Kivity 
2206fd8aaa76SPaolo Bonzini bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2207ac1970fbSAvi Kivity {
2208fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, buf, len, false);
2209ac1970fbSAvi Kivity }
2210ac1970fbSAvi Kivity 
2211ac1970fbSAvi Kivity 
2212a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2213ac1970fbSAvi Kivity                             int len, int is_write)
2214ac1970fbSAvi Kivity {
2215fd8aaa76SPaolo Bonzini     address_space_rw(&address_space_memory, addr, buf, len, is_write);
2216ac1970fbSAvi Kivity }
2217ac1970fbSAvi Kivity 
2218582b55a9SAlexander Graf enum write_rom_type {
2219582b55a9SAlexander Graf     WRITE_DATA,
2220582b55a9SAlexander Graf     FLUSH_CACHE,
2221582b55a9SAlexander Graf };
2222582b55a9SAlexander Graf 
22232a221651SEdgar E. Iglesias static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2224582b55a9SAlexander Graf     hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2225d0ecd2aaSbellard {
2226149f54b5SPaolo Bonzini     hwaddr l;
2227d0ecd2aaSbellard     uint8_t *ptr;
2228149f54b5SPaolo Bonzini     hwaddr addr1;
22295c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2230d0ecd2aaSbellard 
2231d0ecd2aaSbellard     while (len > 0) {
2232d0ecd2aaSbellard         l = len;
22332a221651SEdgar E. Iglesias         mr = address_space_translate(as, addr, &addr1, &l, true);
2234d0ecd2aaSbellard 
22355c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
22365c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2237d0ecd2aaSbellard             /* do nothing */
2238d0ecd2aaSbellard         } else {
22395c8a00ceSPaolo Bonzini             addr1 += memory_region_get_ram_addr(mr);
2240d0ecd2aaSbellard             /* ROM/RAM case */
22415579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
2242582b55a9SAlexander Graf             switch (type) {
2243582b55a9SAlexander Graf             case WRITE_DATA:
2244d0ecd2aaSbellard                 memcpy(ptr, buf, l);
224551d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
2246582b55a9SAlexander Graf                 break;
2247582b55a9SAlexander Graf             case FLUSH_CACHE:
2248582b55a9SAlexander Graf                 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2249582b55a9SAlexander Graf                 break;
2250582b55a9SAlexander Graf             }
2251d0ecd2aaSbellard         }
2252d0ecd2aaSbellard         len -= l;
2253d0ecd2aaSbellard         buf += l;
2254d0ecd2aaSbellard         addr += l;
2255d0ecd2aaSbellard     }
2256d0ecd2aaSbellard }
2257d0ecd2aaSbellard 
2258582b55a9SAlexander Graf /* used for ROM loading : can write in RAM and ROM */
22592a221651SEdgar E. Iglesias void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2260582b55a9SAlexander Graf                                    const uint8_t *buf, int len)
2261582b55a9SAlexander Graf {
22622a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2263582b55a9SAlexander Graf }
2264582b55a9SAlexander Graf 
2265582b55a9SAlexander Graf void cpu_flush_icache_range(hwaddr start, int len)
2266582b55a9SAlexander Graf {
2267582b55a9SAlexander Graf     /*
2268582b55a9SAlexander Graf      * This function should do the same thing as an icache flush that was
2269582b55a9SAlexander Graf      * triggered from within the guest. For TCG we are always cache coherent,
2270582b55a9SAlexander Graf      * so there is no need to flush anything. For KVM / Xen we need to flush
2271582b55a9SAlexander Graf      * the host's instruction cache at least.
2272582b55a9SAlexander Graf      */
2273582b55a9SAlexander Graf     if (tcg_enabled()) {
2274582b55a9SAlexander Graf         return;
2275582b55a9SAlexander Graf     }
2276582b55a9SAlexander Graf 
22772a221651SEdgar E. Iglesias     cpu_physical_memory_write_rom_internal(&address_space_memory,
22782a221651SEdgar E. Iglesias                                            start, NULL, len, FLUSH_CACHE);
2279582b55a9SAlexander Graf }
2280582b55a9SAlexander Graf 
22816d16c2f8Saliguori typedef struct {
2282d3e71559SPaolo Bonzini     MemoryRegion *mr;
22836d16c2f8Saliguori     void *buffer;
2284a8170e5eSAvi Kivity     hwaddr addr;
2285a8170e5eSAvi Kivity     hwaddr len;
22866d16c2f8Saliguori } BounceBuffer;
22876d16c2f8Saliguori 
22886d16c2f8Saliguori static BounceBuffer bounce;
22896d16c2f8Saliguori 
2290ba223c29Saliguori typedef struct MapClient {
2291ba223c29Saliguori     void *opaque;
2292ba223c29Saliguori     void (*callback)(void *opaque);
229372cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2294ba223c29Saliguori } MapClient;
2295ba223c29Saliguori 
229672cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
229772cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2298ba223c29Saliguori 
2299ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2300ba223c29Saliguori {
23017267c094SAnthony Liguori     MapClient *client = g_malloc(sizeof(*client));
2302ba223c29Saliguori 
2303ba223c29Saliguori     client->opaque = opaque;
2304ba223c29Saliguori     client->callback = callback;
230572cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
2306ba223c29Saliguori     return client;
2307ba223c29Saliguori }
2308ba223c29Saliguori 
23098b9c99d9SBlue Swirl static void cpu_unregister_map_client(void *_client)
2310ba223c29Saliguori {
2311ba223c29Saliguori     MapClient *client = (MapClient *)_client;
2312ba223c29Saliguori 
231372cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
23147267c094SAnthony Liguori     g_free(client);
2315ba223c29Saliguori }
2316ba223c29Saliguori 
2317ba223c29Saliguori static void cpu_notify_map_clients(void)
2318ba223c29Saliguori {
2319ba223c29Saliguori     MapClient *client;
2320ba223c29Saliguori 
232172cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
232272cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2323ba223c29Saliguori         client->callback(client->opaque);
232434d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
2325ba223c29Saliguori     }
2326ba223c29Saliguori }
2327ba223c29Saliguori 
232851644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
232951644ab7SPaolo Bonzini {
23305c8a00ceSPaolo Bonzini     MemoryRegion *mr;
233151644ab7SPaolo Bonzini     hwaddr l, xlat;
233251644ab7SPaolo Bonzini 
233351644ab7SPaolo Bonzini     while (len > 0) {
233451644ab7SPaolo Bonzini         l = len;
23355c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
23365c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
23375c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
23385c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
233951644ab7SPaolo Bonzini                 return false;
234051644ab7SPaolo Bonzini             }
234151644ab7SPaolo Bonzini         }
234251644ab7SPaolo Bonzini 
234351644ab7SPaolo Bonzini         len -= l;
234451644ab7SPaolo Bonzini         addr += l;
234551644ab7SPaolo Bonzini     }
234651644ab7SPaolo Bonzini     return true;
234751644ab7SPaolo Bonzini }
234851644ab7SPaolo Bonzini 
23496d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
23506d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
23516d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
23526d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2353ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2354ba223c29Saliguori  * likely to succeed.
23556d16c2f8Saliguori  */
2356ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2357a8170e5eSAvi Kivity                         hwaddr addr,
2358a8170e5eSAvi Kivity                         hwaddr *plen,
2359ac1970fbSAvi Kivity                         bool is_write)
23606d16c2f8Saliguori {
2361a8170e5eSAvi Kivity     hwaddr len = *plen;
2362e3127ae0SPaolo Bonzini     hwaddr done = 0;
2363e3127ae0SPaolo Bonzini     hwaddr l, xlat, base;
2364e3127ae0SPaolo Bonzini     MemoryRegion *mr, *this_mr;
2365e3127ae0SPaolo Bonzini     ram_addr_t raddr;
23666d16c2f8Saliguori 
2367e3127ae0SPaolo Bonzini     if (len == 0) {
2368e3127ae0SPaolo Bonzini         return NULL;
2369e3127ae0SPaolo Bonzini     }
2370e3127ae0SPaolo Bonzini 
23716d16c2f8Saliguori     l = len;
23725c8a00ceSPaolo Bonzini     mr = address_space_translate(as, addr, &xlat, &l, is_write);
23735c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
2374e3127ae0SPaolo Bonzini         if (bounce.buffer) {
2375e3127ae0SPaolo Bonzini             return NULL;
23766d16c2f8Saliguori         }
2377e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
2378e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
2379e85d9db5SKevin Wolf         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
23806d16c2f8Saliguori         bounce.addr = addr;
23816d16c2f8Saliguori         bounce.len = l;
2382d3e71559SPaolo Bonzini 
2383d3e71559SPaolo Bonzini         memory_region_ref(mr);
2384d3e71559SPaolo Bonzini         bounce.mr = mr;
23856d16c2f8Saliguori         if (!is_write) {
2386ac1970fbSAvi Kivity             address_space_read(as, addr, bounce.buffer, l);
23876d16c2f8Saliguori         }
238838bee5dcSStefano Stabellini 
238938bee5dcSStefano Stabellini         *plen = l;
239038bee5dcSStefano Stabellini         return bounce.buffer;
23916d16c2f8Saliguori     }
2392e3127ae0SPaolo Bonzini 
2393e3127ae0SPaolo Bonzini     base = xlat;
2394e3127ae0SPaolo Bonzini     raddr = memory_region_get_ram_addr(mr);
2395e3127ae0SPaolo Bonzini 
2396e3127ae0SPaolo Bonzini     for (;;) {
2397e3127ae0SPaolo Bonzini         len -= l;
2398e3127ae0SPaolo Bonzini         addr += l;
2399e3127ae0SPaolo Bonzini         done += l;
2400e3127ae0SPaolo Bonzini         if (len == 0) {
2401e3127ae0SPaolo Bonzini             break;
2402e3127ae0SPaolo Bonzini         }
2403e3127ae0SPaolo Bonzini 
2404e3127ae0SPaolo Bonzini         l = len;
2405e3127ae0SPaolo Bonzini         this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2406e3127ae0SPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
2407149f54b5SPaolo Bonzini             break;
2408149f54b5SPaolo Bonzini         }
24098ab934f9SStefano Stabellini     }
24106d16c2f8Saliguori 
2411d3e71559SPaolo Bonzini     memory_region_ref(mr);
2412e3127ae0SPaolo Bonzini     *plen = done;
2413e3127ae0SPaolo Bonzini     return qemu_ram_ptr_length(raddr + base, plen);
24146d16c2f8Saliguori }
24156d16c2f8Saliguori 
2416ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
24176d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
24186d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
24196d16c2f8Saliguori  */
2420a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2421a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
24226d16c2f8Saliguori {
24236d16c2f8Saliguori     if (buffer != bounce.buffer) {
2424d3e71559SPaolo Bonzini         MemoryRegion *mr;
24257443b437SPaolo Bonzini         ram_addr_t addr1;
2426d3e71559SPaolo Bonzini 
2427d3e71559SPaolo Bonzini         mr = qemu_ram_addr_from_host(buffer, &addr1);
24281b5ec234SPaolo Bonzini         assert(mr != NULL);
2429d3e71559SPaolo Bonzini         if (is_write) {
24306886867eSPaolo Bonzini             invalidate_and_set_dirty(addr1, access_len);
24316d16c2f8Saliguori         }
2432868bb33fSJan Kiszka         if (xen_enabled()) {
2433e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
2434050a0ddfSAnthony PERARD         }
2435d3e71559SPaolo Bonzini         memory_region_unref(mr);
24366d16c2f8Saliguori         return;
24376d16c2f8Saliguori     }
24386d16c2f8Saliguori     if (is_write) {
2439ac1970fbSAvi Kivity         address_space_write(as, bounce.addr, bounce.buffer, access_len);
24406d16c2f8Saliguori     }
2441f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
24426d16c2f8Saliguori     bounce.buffer = NULL;
2443d3e71559SPaolo Bonzini     memory_region_unref(bounce.mr);
2444ba223c29Saliguori     cpu_notify_map_clients();
24456d16c2f8Saliguori }
2446d0ecd2aaSbellard 
2447a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
2448a8170e5eSAvi Kivity                               hwaddr *plen,
2449ac1970fbSAvi Kivity                               int is_write)
2450ac1970fbSAvi Kivity {
2451ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
2452ac1970fbSAvi Kivity }
2453ac1970fbSAvi Kivity 
2454a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2455a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
2456ac1970fbSAvi Kivity {
2457ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2458ac1970fbSAvi Kivity }
2459ac1970fbSAvi Kivity 
24608df1cd07Sbellard /* warning: addr must be aligned */
2461fdfba1a2SEdgar E. Iglesias static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
24621e78bcc1SAlexander Graf                                          enum device_endian endian)
24638df1cd07Sbellard {
24648df1cd07Sbellard     uint8_t *ptr;
2465791af8c8SPaolo Bonzini     uint64_t val;
24665c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2467149f54b5SPaolo Bonzini     hwaddr l = 4;
2468149f54b5SPaolo Bonzini     hwaddr addr1;
24698df1cd07Sbellard 
2470fdfba1a2SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, false);
24715c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
24728df1cd07Sbellard         /* I/O case */
24735c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 4);
24741e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
24751e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
24761e78bcc1SAlexander Graf             val = bswap32(val);
24771e78bcc1SAlexander Graf         }
24781e78bcc1SAlexander Graf #else
24791e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
24801e78bcc1SAlexander Graf             val = bswap32(val);
24811e78bcc1SAlexander Graf         }
24821e78bcc1SAlexander Graf #endif
24838df1cd07Sbellard     } else {
24848df1cd07Sbellard         /* RAM case */
24855c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
248606ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2487149f54b5SPaolo Bonzini                                + addr1);
24881e78bcc1SAlexander Graf         switch (endian) {
24891e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
24901e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
24911e78bcc1SAlexander Graf             break;
24921e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
24931e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
24941e78bcc1SAlexander Graf             break;
24951e78bcc1SAlexander Graf         default:
24968df1cd07Sbellard             val = ldl_p(ptr);
24971e78bcc1SAlexander Graf             break;
24981e78bcc1SAlexander Graf         }
24998df1cd07Sbellard     }
25008df1cd07Sbellard     return val;
25018df1cd07Sbellard }
25028df1cd07Sbellard 
2503fdfba1a2SEdgar E. Iglesias uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
25041e78bcc1SAlexander Graf {
2505fdfba1a2SEdgar E. Iglesias     return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
25061e78bcc1SAlexander Graf }
25071e78bcc1SAlexander Graf 
2508fdfba1a2SEdgar E. Iglesias uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
25091e78bcc1SAlexander Graf {
2510fdfba1a2SEdgar E. Iglesias     return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
25111e78bcc1SAlexander Graf }
25121e78bcc1SAlexander Graf 
2513fdfba1a2SEdgar E. Iglesias uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
25141e78bcc1SAlexander Graf {
2515fdfba1a2SEdgar E. Iglesias     return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
25161e78bcc1SAlexander Graf }
25171e78bcc1SAlexander Graf 
251884b7b8e7Sbellard /* warning: addr must be aligned */
25192c17449bSEdgar E. Iglesias static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
25201e78bcc1SAlexander Graf                                          enum device_endian endian)
252184b7b8e7Sbellard {
252284b7b8e7Sbellard     uint8_t *ptr;
252384b7b8e7Sbellard     uint64_t val;
25245c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2525149f54b5SPaolo Bonzini     hwaddr l = 8;
2526149f54b5SPaolo Bonzini     hwaddr addr1;
252784b7b8e7Sbellard 
25282c17449bSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2529149f54b5SPaolo Bonzini                                  false);
25305c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
253184b7b8e7Sbellard         /* I/O case */
25325c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 8);
2533968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
2534968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
2535968a5627SPaolo Bonzini             val = bswap64(val);
2536968a5627SPaolo Bonzini         }
2537968a5627SPaolo Bonzini #else
2538968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
2539968a5627SPaolo Bonzini             val = bswap64(val);
2540968a5627SPaolo Bonzini         }
2541968a5627SPaolo Bonzini #endif
254284b7b8e7Sbellard     } else {
254384b7b8e7Sbellard         /* RAM case */
25445c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
254506ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2546149f54b5SPaolo Bonzini                                + addr1);
25471e78bcc1SAlexander Graf         switch (endian) {
25481e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
25491e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
25501e78bcc1SAlexander Graf             break;
25511e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
25521e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
25531e78bcc1SAlexander Graf             break;
25541e78bcc1SAlexander Graf         default:
255584b7b8e7Sbellard             val = ldq_p(ptr);
25561e78bcc1SAlexander Graf             break;
25571e78bcc1SAlexander Graf         }
255884b7b8e7Sbellard     }
255984b7b8e7Sbellard     return val;
256084b7b8e7Sbellard }
256184b7b8e7Sbellard 
25622c17449bSEdgar E. Iglesias uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
25631e78bcc1SAlexander Graf {
25642c17449bSEdgar E. Iglesias     return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
25651e78bcc1SAlexander Graf }
25661e78bcc1SAlexander Graf 
25672c17449bSEdgar E. Iglesias uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
25681e78bcc1SAlexander Graf {
25692c17449bSEdgar E. Iglesias     return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
25701e78bcc1SAlexander Graf }
25711e78bcc1SAlexander Graf 
25722c17449bSEdgar E. Iglesias uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
25731e78bcc1SAlexander Graf {
25742c17449bSEdgar E. Iglesias     return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
25751e78bcc1SAlexander Graf }
25761e78bcc1SAlexander Graf 
2577aab33094Sbellard /* XXX: optimize */
25782c17449bSEdgar E. Iglesias uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2579aab33094Sbellard {
2580aab33094Sbellard     uint8_t val;
25812c17449bSEdgar E. Iglesias     address_space_rw(as, addr, &val, 1, 0);
2582aab33094Sbellard     return val;
2583aab33094Sbellard }
2584aab33094Sbellard 
2585733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
258641701aa4SEdgar E. Iglesias static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
25871e78bcc1SAlexander Graf                                           enum device_endian endian)
2588aab33094Sbellard {
2589733f0b02SMichael S. Tsirkin     uint8_t *ptr;
2590733f0b02SMichael S. Tsirkin     uint64_t val;
25915c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2592149f54b5SPaolo Bonzini     hwaddr l = 2;
2593149f54b5SPaolo Bonzini     hwaddr addr1;
2594733f0b02SMichael S. Tsirkin 
259541701aa4SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2596149f54b5SPaolo Bonzini                                  false);
25975c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
2598733f0b02SMichael S. Tsirkin         /* I/O case */
25995c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 2);
26001e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
26011e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
26021e78bcc1SAlexander Graf             val = bswap16(val);
26031e78bcc1SAlexander Graf         }
26041e78bcc1SAlexander Graf #else
26051e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
26061e78bcc1SAlexander Graf             val = bswap16(val);
26071e78bcc1SAlexander Graf         }
26081e78bcc1SAlexander Graf #endif
2609733f0b02SMichael S. Tsirkin     } else {
2610733f0b02SMichael S. Tsirkin         /* RAM case */
26115c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
261206ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2613149f54b5SPaolo Bonzini                                + addr1);
26141e78bcc1SAlexander Graf         switch (endian) {
26151e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
26161e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
26171e78bcc1SAlexander Graf             break;
26181e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
26191e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
26201e78bcc1SAlexander Graf             break;
26211e78bcc1SAlexander Graf         default:
2622733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
26231e78bcc1SAlexander Graf             break;
26241e78bcc1SAlexander Graf         }
2625733f0b02SMichael S. Tsirkin     }
2626733f0b02SMichael S. Tsirkin     return val;
2627aab33094Sbellard }
2628aab33094Sbellard 
262941701aa4SEdgar E. Iglesias uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
26301e78bcc1SAlexander Graf {
263141701aa4SEdgar E. Iglesias     return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
26321e78bcc1SAlexander Graf }
26331e78bcc1SAlexander Graf 
263441701aa4SEdgar E. Iglesias uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
26351e78bcc1SAlexander Graf {
263641701aa4SEdgar E. Iglesias     return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
26371e78bcc1SAlexander Graf }
26381e78bcc1SAlexander Graf 
263941701aa4SEdgar E. Iglesias uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
26401e78bcc1SAlexander Graf {
264141701aa4SEdgar E. Iglesias     return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
26421e78bcc1SAlexander Graf }
26431e78bcc1SAlexander Graf 
26448df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
26458df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
26468df1cd07Sbellard    bits are used to track modified PTEs */
26472198a121SEdgar E. Iglesias void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
26488df1cd07Sbellard {
26498df1cd07Sbellard     uint8_t *ptr;
26505c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2651149f54b5SPaolo Bonzini     hwaddr l = 4;
2652149f54b5SPaolo Bonzini     hwaddr addr1;
26538df1cd07Sbellard 
26542198a121SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2655149f54b5SPaolo Bonzini                                  true);
26565c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
26575c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
26588df1cd07Sbellard     } else {
26595c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
26605579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
26618df1cd07Sbellard         stl_p(ptr, val);
266274576198Saliguori 
266374576198Saliguori         if (unlikely(in_migration)) {
2664a2cd8c85SJuan Quintela             if (cpu_physical_memory_is_clean(addr1)) {
266574576198Saliguori                 /* invalidate code */
266674576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
266774576198Saliguori                 /* set dirty bit */
26686886867eSPaolo Bonzini                 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
266974576198Saliguori             }
267074576198Saliguori         }
26718df1cd07Sbellard     }
26728df1cd07Sbellard }
26738df1cd07Sbellard 
26748df1cd07Sbellard /* warning: addr must be aligned */
2675ab1da857SEdgar E. Iglesias static inline void stl_phys_internal(AddressSpace *as,
2676ab1da857SEdgar E. Iglesias                                      hwaddr addr, uint32_t val,
26771e78bcc1SAlexander Graf                                      enum device_endian endian)
26788df1cd07Sbellard {
26798df1cd07Sbellard     uint8_t *ptr;
26805c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2681149f54b5SPaolo Bonzini     hwaddr l = 4;
2682149f54b5SPaolo Bonzini     hwaddr addr1;
26838df1cd07Sbellard 
2684ab1da857SEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l,
2685149f54b5SPaolo Bonzini                                  true);
26865c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
26871e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
26881e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
26891e78bcc1SAlexander Graf             val = bswap32(val);
26901e78bcc1SAlexander Graf         }
26911e78bcc1SAlexander Graf #else
26921e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
26931e78bcc1SAlexander Graf             val = bswap32(val);
26941e78bcc1SAlexander Graf         }
26951e78bcc1SAlexander Graf #endif
26965c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
26978df1cd07Sbellard     } else {
26988df1cd07Sbellard         /* RAM case */
26995c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
27005579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
27011e78bcc1SAlexander Graf         switch (endian) {
27021e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
27031e78bcc1SAlexander Graf             stl_le_p(ptr, val);
27041e78bcc1SAlexander Graf             break;
27051e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
27061e78bcc1SAlexander Graf             stl_be_p(ptr, val);
27071e78bcc1SAlexander Graf             break;
27081e78bcc1SAlexander Graf         default:
27098df1cd07Sbellard             stl_p(ptr, val);
27101e78bcc1SAlexander Graf             break;
27111e78bcc1SAlexander Graf         }
271251d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 4);
27138df1cd07Sbellard     }
27143a7d929eSbellard }
27158df1cd07Sbellard 
2716ab1da857SEdgar E. Iglesias void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
27171e78bcc1SAlexander Graf {
2718ab1da857SEdgar E. Iglesias     stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
27191e78bcc1SAlexander Graf }
27201e78bcc1SAlexander Graf 
2721ab1da857SEdgar E. Iglesias void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
27221e78bcc1SAlexander Graf {
2723ab1da857SEdgar E. Iglesias     stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
27241e78bcc1SAlexander Graf }
27251e78bcc1SAlexander Graf 
2726ab1da857SEdgar E. Iglesias void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
27271e78bcc1SAlexander Graf {
2728ab1da857SEdgar E. Iglesias     stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
27291e78bcc1SAlexander Graf }
27301e78bcc1SAlexander Graf 
2731aab33094Sbellard /* XXX: optimize */
2732db3be60dSEdgar E. Iglesias void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2733aab33094Sbellard {
2734aab33094Sbellard     uint8_t v = val;
2735db3be60dSEdgar E. Iglesias     address_space_rw(as, addr, &v, 1, 1);
2736aab33094Sbellard }
2737aab33094Sbellard 
2738733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
27395ce5944dSEdgar E. Iglesias static inline void stw_phys_internal(AddressSpace *as,
27405ce5944dSEdgar E. Iglesias                                      hwaddr addr, uint32_t val,
27411e78bcc1SAlexander Graf                                      enum device_endian endian)
2742aab33094Sbellard {
2743733f0b02SMichael S. Tsirkin     uint8_t *ptr;
27445c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2745149f54b5SPaolo Bonzini     hwaddr l = 2;
2746149f54b5SPaolo Bonzini     hwaddr addr1;
2747733f0b02SMichael S. Tsirkin 
27485ce5944dSEdgar E. Iglesias     mr = address_space_translate(as, addr, &addr1, &l, true);
27495c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
27501e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
27511e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
27521e78bcc1SAlexander Graf             val = bswap16(val);
27531e78bcc1SAlexander Graf         }
27541e78bcc1SAlexander Graf #else
27551e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
27561e78bcc1SAlexander Graf             val = bswap16(val);
27571e78bcc1SAlexander Graf         }
27581e78bcc1SAlexander Graf #endif
27595c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 2);
2760733f0b02SMichael S. Tsirkin     } else {
2761733f0b02SMichael S. Tsirkin         /* RAM case */
27625c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2763733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
27641e78bcc1SAlexander Graf         switch (endian) {
27651e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
27661e78bcc1SAlexander Graf             stw_le_p(ptr, val);
27671e78bcc1SAlexander Graf             break;
27681e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
27691e78bcc1SAlexander Graf             stw_be_p(ptr, val);
27701e78bcc1SAlexander Graf             break;
27711e78bcc1SAlexander Graf         default:
2772733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
27731e78bcc1SAlexander Graf             break;
27741e78bcc1SAlexander Graf         }
277551d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 2);
2776733f0b02SMichael S. Tsirkin     }
2777aab33094Sbellard }
2778aab33094Sbellard 
27795ce5944dSEdgar E. Iglesias void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
27801e78bcc1SAlexander Graf {
27815ce5944dSEdgar E. Iglesias     stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
27821e78bcc1SAlexander Graf }
27831e78bcc1SAlexander Graf 
27845ce5944dSEdgar E. Iglesias void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
27851e78bcc1SAlexander Graf {
27865ce5944dSEdgar E. Iglesias     stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
27871e78bcc1SAlexander Graf }
27881e78bcc1SAlexander Graf 
27895ce5944dSEdgar E. Iglesias void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
27901e78bcc1SAlexander Graf {
27915ce5944dSEdgar E. Iglesias     stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
27921e78bcc1SAlexander Graf }
27931e78bcc1SAlexander Graf 
2794aab33094Sbellard /* XXX: optimize */
2795f606604fSEdgar E. Iglesias void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2796aab33094Sbellard {
2797aab33094Sbellard     val = tswap64(val);
2798f606604fSEdgar E. Iglesias     address_space_rw(as, addr, (void *) &val, 8, 1);
2799aab33094Sbellard }
2800aab33094Sbellard 
2801f606604fSEdgar E. Iglesias void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
28021e78bcc1SAlexander Graf {
28031e78bcc1SAlexander Graf     val = cpu_to_le64(val);
2804f606604fSEdgar E. Iglesias     address_space_rw(as, addr, (void *) &val, 8, 1);
28051e78bcc1SAlexander Graf }
28061e78bcc1SAlexander Graf 
2807f606604fSEdgar E. Iglesias void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
28081e78bcc1SAlexander Graf {
28091e78bcc1SAlexander Graf     val = cpu_to_be64(val);
2810f606604fSEdgar E. Iglesias     address_space_rw(as, addr, (void *) &val, 8, 1);
28111e78bcc1SAlexander Graf }
28121e78bcc1SAlexander Graf 
28135e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
2814f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2815b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
281613eb76e0Sbellard {
281713eb76e0Sbellard     int l;
2818a8170e5eSAvi Kivity     hwaddr phys_addr;
28199b3c35e0Sj_mayer     target_ulong page;
282013eb76e0Sbellard 
282113eb76e0Sbellard     while (len > 0) {
282213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
2823f17ec444SAndreas Färber         phys_addr = cpu_get_phys_page_debug(cpu, page);
282413eb76e0Sbellard         /* if no physical page mapped, return an error */
282513eb76e0Sbellard         if (phys_addr == -1)
282613eb76e0Sbellard             return -1;
282713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
282813eb76e0Sbellard         if (l > len)
282913eb76e0Sbellard             l = len;
28305e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
28312e38847bSEdgar E. Iglesias         if (is_write) {
28322e38847bSEdgar E. Iglesias             cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
28332e38847bSEdgar E. Iglesias         } else {
28342e38847bSEdgar E. Iglesias             address_space_rw(cpu->as, phys_addr, buf, l, 0);
28352e38847bSEdgar E. Iglesias         }
283613eb76e0Sbellard         len -= l;
283713eb76e0Sbellard         buf += l;
283813eb76e0Sbellard         addr += l;
283913eb76e0Sbellard     }
284013eb76e0Sbellard     return 0;
284113eb76e0Sbellard }
2842a68fe89cSPaul Brook #endif
284313eb76e0Sbellard 
28448e4a424bSBlue Swirl /*
28458e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
28468e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
28478e4a424bSBlue Swirl  */
284898ed8ecfSGreg Kurz bool target_words_bigendian(void);
284998ed8ecfSGreg Kurz bool target_words_bigendian(void)
28508e4a424bSBlue Swirl {
28518e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
28528e4a424bSBlue Swirl     return true;
28538e4a424bSBlue Swirl #else
28548e4a424bSBlue Swirl     return false;
28558e4a424bSBlue Swirl #endif
28568e4a424bSBlue Swirl }
28578e4a424bSBlue Swirl 
285876f35538SWen Congyang #ifndef CONFIG_USER_ONLY
2859a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
286076f35538SWen Congyang {
28615c8a00ceSPaolo Bonzini     MemoryRegion*mr;
2862149f54b5SPaolo Bonzini     hwaddr l = 1;
286376f35538SWen Congyang 
28645c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
2865149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
286676f35538SWen Congyang 
28675c8a00ceSPaolo Bonzini     return !(memory_region_is_ram(mr) ||
28685c8a00ceSPaolo Bonzini              memory_region_is_romd(mr));
286976f35538SWen Congyang }
2870bd2fa51fSMichael R. Hines 
2871bd2fa51fSMichael R. Hines void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2872bd2fa51fSMichael R. Hines {
2873bd2fa51fSMichael R. Hines     RAMBlock *block;
2874bd2fa51fSMichael R. Hines 
2875bd2fa51fSMichael R. Hines     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2876bd2fa51fSMichael R. Hines         func(block->host, block->offset, block->length, opaque);
2877bd2fa51fSMichael R. Hines     }
2878bd2fa51fSMichael R. Hines }
2879ec3f8c99SPeter Maydell #endif
2880