xref: /qemu/system/physmem.c (revision 026736cebfe0e4a96f0761a2bae62cca92ce2a4e)
154936004Sbellard /*
25b6dd868SBlue Swirl  *  Virtual page mapping
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard 
27055403b2SStefan Weil #include "qemu-common.h"
286180a181Sbellard #include "cpu.h"
29b67d9a52Sbellard #include "tcg.h"
30b3c7724cSpbrook #include "hw/hw.h"
31cc9e98cbSAlex Williamson #include "hw/qdev.h"
321de7afc9SPaolo Bonzini #include "qemu/osdep.h"
339c17d615SPaolo Bonzini #include "sysemu/kvm.h"
342ff3de68SMarkus Armbruster #include "sysemu/sysemu.h"
350d09e41aSPaolo Bonzini #include "hw/xen/xen.h"
361de7afc9SPaolo Bonzini #include "qemu/timer.h"
371de7afc9SPaolo Bonzini #include "qemu/config-file.h"
38022c62cbSPaolo Bonzini #include "exec/memory.h"
399c17d615SPaolo Bonzini #include "sysemu/dma.h"
40022c62cbSPaolo Bonzini #include "exec/address-spaces.h"
4153a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4253a5960aSpbrook #include <qemu.h>
43432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
449c17d615SPaolo Bonzini #include "sysemu/xen-mapcache.h"
456506e4f9SStefano Stabellini #include "trace.h"
4653a5960aSpbrook #endif
470d6d3c87SPaolo Bonzini #include "exec/cpu-all.h"
4854936004Sbellard 
49022c62cbSPaolo Bonzini #include "exec/cputlb.h"
505b6dd868SBlue Swirl #include "translate-all.h"
510cac1b66SBlue Swirl 
52022c62cbSPaolo Bonzini #include "exec/memory-internal.h"
5367d95c15SAvi Kivity 
54b35ba30fSMichael S. Tsirkin #include "qemu/range.h"
55b35ba30fSMichael S. Tsirkin 
56db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
571196be37Sths 
5899773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
5974576198Saliguori static int in_migration;
6094a6b54fSpbrook 
61a3161038SPaolo Bonzini RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
6262152b8aSAvi Kivity 
6362152b8aSAvi Kivity static MemoryRegion *system_memory;
64309cb471SAvi Kivity static MemoryRegion *system_io;
6562152b8aSAvi Kivity 
66f6790af6SAvi Kivity AddressSpace address_space_io;
67f6790af6SAvi Kivity AddressSpace address_space_memory;
682673a5daSAvi Kivity 
690844e007SPaolo Bonzini MemoryRegion io_mem_rom, io_mem_notdirty;
70acc9d80bSJan Kiszka static MemoryRegion io_mem_unassigned;
710e0df1e2SAvi Kivity 
72e2eef170Spbrook #endif
739fa3e853Sbellard 
74bdc44640SAndreas Färber struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
756a00d601Sbellard /* current CPU in the current thread. It is only valid inside
766a00d601Sbellard    cpu_exec() */
774917cf44SAndreas Färber DEFINE_TLS(CPUState *, current_cpu);
782e70f6efSpbrook /* 0 = Do not count executed instructions.
79bf20dc07Sths    1 = Precise instruction counting.
802e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
815708fc66SPaolo Bonzini int use_icount;
826a00d601Sbellard 
83e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
844346ae3eSAvi Kivity 
851db8abb1SPaolo Bonzini typedef struct PhysPageEntry PhysPageEntry;
861db8abb1SPaolo Bonzini 
871db8abb1SPaolo Bonzini struct PhysPageEntry {
889736e55bSMichael S. Tsirkin     /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
898b795765SMichael S. Tsirkin     uint32_t skip : 6;
909736e55bSMichael S. Tsirkin      /* index into phys_sections (!skip) or phys_map_nodes (skip) */
918b795765SMichael S. Tsirkin     uint32_t ptr : 26;
921db8abb1SPaolo Bonzini };
931db8abb1SPaolo Bonzini 
948b795765SMichael S. Tsirkin #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
958b795765SMichael S. Tsirkin 
9603f49957SPaolo Bonzini /* Size of the L2 (and L3, etc) page tables.  */
9757271d63SPaolo Bonzini #define ADDR_SPACE_BITS 64
9803f49957SPaolo Bonzini 
99026736ceSMichael S. Tsirkin #define P_L2_BITS 9
10003f49957SPaolo Bonzini #define P_L2_SIZE (1 << P_L2_BITS)
10103f49957SPaolo Bonzini 
10203f49957SPaolo Bonzini #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
10303f49957SPaolo Bonzini 
10403f49957SPaolo Bonzini typedef PhysPageEntry Node[P_L2_SIZE];
1050475d94fSPaolo Bonzini 
1061db8abb1SPaolo Bonzini struct AddressSpaceDispatch {
1071db8abb1SPaolo Bonzini     /* This is a multi-level map on the physical address space.
1081db8abb1SPaolo Bonzini      * The bottom level has pointers to MemoryRegionSections.
1091db8abb1SPaolo Bonzini      */
1101db8abb1SPaolo Bonzini     PhysPageEntry phys_map;
1110475d94fSPaolo Bonzini     Node *nodes;
1120475d94fSPaolo Bonzini     MemoryRegionSection *sections;
113acc9d80bSJan Kiszka     AddressSpace *as;
1141db8abb1SPaolo Bonzini };
1151db8abb1SPaolo Bonzini 
11690260c6cSJan Kiszka #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
11790260c6cSJan Kiszka typedef struct subpage_t {
11890260c6cSJan Kiszka     MemoryRegion iomem;
119acc9d80bSJan Kiszka     AddressSpace *as;
12090260c6cSJan Kiszka     hwaddr base;
12190260c6cSJan Kiszka     uint16_t sub_section[TARGET_PAGE_SIZE];
12290260c6cSJan Kiszka } subpage_t;
12390260c6cSJan Kiszka 
124b41aac4fSLiu Ping Fan #define PHYS_SECTION_UNASSIGNED 0
125b41aac4fSLiu Ping Fan #define PHYS_SECTION_NOTDIRTY 1
126b41aac4fSLiu Ping Fan #define PHYS_SECTION_ROM 2
127b41aac4fSLiu Ping Fan #define PHYS_SECTION_WATCH 3
1285312bd8bSAvi Kivity 
1299affd6fcSPaolo Bonzini typedef struct PhysPageMap {
1309affd6fcSPaolo Bonzini     unsigned sections_nb;
1319affd6fcSPaolo Bonzini     unsigned sections_nb_alloc;
1329affd6fcSPaolo Bonzini     unsigned nodes_nb;
1339affd6fcSPaolo Bonzini     unsigned nodes_nb_alloc;
1349affd6fcSPaolo Bonzini     Node *nodes;
1359affd6fcSPaolo Bonzini     MemoryRegionSection *sections;
1369affd6fcSPaolo Bonzini } PhysPageMap;
1379affd6fcSPaolo Bonzini 
1386092666eSPaolo Bonzini static PhysPageMap *prev_map;
1399affd6fcSPaolo Bonzini static PhysPageMap next_map;
140d6f2ea22SAvi Kivity 
141e2eef170Spbrook static void io_mem_init(void);
14262152b8aSAvi Kivity static void memory_map_init(void);
143e2eef170Spbrook 
1441ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
1456658ffb8Spbrook #endif
14654936004Sbellard 
1476d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
148d6f2ea22SAvi Kivity 
149f7bf5461SAvi Kivity static void phys_map_node_reserve(unsigned nodes)
150f7bf5461SAvi Kivity {
1519affd6fcSPaolo Bonzini     if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
1529affd6fcSPaolo Bonzini         next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
1539affd6fcSPaolo Bonzini                                             16);
1549affd6fcSPaolo Bonzini         next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
1559affd6fcSPaolo Bonzini                                       next_map.nodes_nb + nodes);
1569affd6fcSPaolo Bonzini         next_map.nodes = g_renew(Node, next_map.nodes,
1579affd6fcSPaolo Bonzini                                  next_map.nodes_nb_alloc);
158f7bf5461SAvi Kivity     }
159f7bf5461SAvi Kivity }
160f7bf5461SAvi Kivity 
1618b795765SMichael S. Tsirkin static uint32_t phys_map_node_alloc(void)
162d6f2ea22SAvi Kivity {
163d6f2ea22SAvi Kivity     unsigned i;
1648b795765SMichael S. Tsirkin     uint32_t ret;
165d6f2ea22SAvi Kivity 
1669affd6fcSPaolo Bonzini     ret = next_map.nodes_nb++;
167d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
1689affd6fcSPaolo Bonzini     assert(ret != next_map.nodes_nb_alloc);
16903f49957SPaolo Bonzini     for (i = 0; i < P_L2_SIZE; ++i) {
1709736e55bSMichael S. Tsirkin         next_map.nodes[ret][i].skip = 1;
1719affd6fcSPaolo Bonzini         next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
172d6f2ea22SAvi Kivity     }
173f7bf5461SAvi Kivity     return ret;
174d6f2ea22SAvi Kivity }
175d6f2ea22SAvi Kivity 
176a8170e5eSAvi Kivity static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
177a8170e5eSAvi Kivity                                 hwaddr *nb, uint16_t leaf,
1782999097bSAvi Kivity                                 int level)
17992e873b9Sbellard {
180f7bf5461SAvi Kivity     PhysPageEntry *p;
181f7bf5461SAvi Kivity     int i;
18203f49957SPaolo Bonzini     hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
1835cd2c5b6SRichard Henderson 
1849736e55bSMichael S. Tsirkin     if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
185c19e8800SAvi Kivity         lp->ptr = phys_map_node_alloc();
1869affd6fcSPaolo Bonzini         p = next_map.nodes[lp->ptr];
187f7bf5461SAvi Kivity         if (level == 0) {
18803f49957SPaolo Bonzini             for (i = 0; i < P_L2_SIZE; i++) {
1899736e55bSMichael S. Tsirkin                 p[i].skip = 0;
190b41aac4fSLiu Ping Fan                 p[i].ptr = PHYS_SECTION_UNASSIGNED;
19167c4d23cSpbrook             }
19292e873b9Sbellard         }
193d6f2ea22SAvi Kivity     } else {
1949affd6fcSPaolo Bonzini         p = next_map.nodes[lp->ptr];
1954346ae3eSAvi Kivity     }
19603f49957SPaolo Bonzini     lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
197f7bf5461SAvi Kivity 
19803f49957SPaolo Bonzini     while (*nb && lp < &p[P_L2_SIZE]) {
19907f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
2009736e55bSMichael S. Tsirkin             lp->skip = 0;
201c19e8800SAvi Kivity             lp->ptr = leaf;
20207f07b31SAvi Kivity             *index += step;
20307f07b31SAvi Kivity             *nb -= step;
204f7bf5461SAvi Kivity         } else {
2052999097bSAvi Kivity             phys_page_set_level(lp, index, nb, leaf, level - 1);
2062999097bSAvi Kivity         }
2072999097bSAvi Kivity         ++lp;
208f7bf5461SAvi Kivity     }
2094346ae3eSAvi Kivity }
2105cd2c5b6SRichard Henderson 
211ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
212a8170e5eSAvi Kivity                           hwaddr index, hwaddr nb,
2132999097bSAvi Kivity                           uint16_t leaf)
214f7bf5461SAvi Kivity {
2152999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
21607f07b31SAvi Kivity     phys_map_node_reserve(3 * P_L2_LEVELS);
217f7bf5461SAvi Kivity 
218ac1970fbSAvi Kivity     phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
21992e873b9Sbellard }
22092e873b9Sbellard 
221b35ba30fSMichael S. Tsirkin /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222b35ba30fSMichael S. Tsirkin  * and update our entry so we can skip it and go directly to the destination.
223b35ba30fSMichael S. Tsirkin  */
224b35ba30fSMichael S. Tsirkin static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225b35ba30fSMichael S. Tsirkin {
226b35ba30fSMichael S. Tsirkin     unsigned valid_ptr = P_L2_SIZE;
227b35ba30fSMichael S. Tsirkin     int valid = 0;
228b35ba30fSMichael S. Tsirkin     PhysPageEntry *p;
229b35ba30fSMichael S. Tsirkin     int i;
230b35ba30fSMichael S. Tsirkin 
231b35ba30fSMichael S. Tsirkin     if (lp->ptr == PHYS_MAP_NODE_NIL) {
232b35ba30fSMichael S. Tsirkin         return;
233b35ba30fSMichael S. Tsirkin     }
234b35ba30fSMichael S. Tsirkin 
235b35ba30fSMichael S. Tsirkin     p = nodes[lp->ptr];
236b35ba30fSMichael S. Tsirkin     for (i = 0; i < P_L2_SIZE; i++) {
237b35ba30fSMichael S. Tsirkin         if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238b35ba30fSMichael S. Tsirkin             continue;
239b35ba30fSMichael S. Tsirkin         }
240b35ba30fSMichael S. Tsirkin 
241b35ba30fSMichael S. Tsirkin         valid_ptr = i;
242b35ba30fSMichael S. Tsirkin         valid++;
243b35ba30fSMichael S. Tsirkin         if (p[i].skip) {
244b35ba30fSMichael S. Tsirkin             phys_page_compact(&p[i], nodes, compacted);
245b35ba30fSMichael S. Tsirkin         }
246b35ba30fSMichael S. Tsirkin     }
247b35ba30fSMichael S. Tsirkin 
248b35ba30fSMichael S. Tsirkin     /* We can only compress if there's only one child. */
249b35ba30fSMichael S. Tsirkin     if (valid != 1) {
250b35ba30fSMichael S. Tsirkin         return;
251b35ba30fSMichael S. Tsirkin     }
252b35ba30fSMichael S. Tsirkin 
253b35ba30fSMichael S. Tsirkin     assert(valid_ptr < P_L2_SIZE);
254b35ba30fSMichael S. Tsirkin 
255b35ba30fSMichael S. Tsirkin     /* Don't compress if it won't fit in the # of bits we have. */
256b35ba30fSMichael S. Tsirkin     if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257b35ba30fSMichael S. Tsirkin         return;
258b35ba30fSMichael S. Tsirkin     }
259b35ba30fSMichael S. Tsirkin 
260b35ba30fSMichael S. Tsirkin     lp->ptr = p[valid_ptr].ptr;
261b35ba30fSMichael S. Tsirkin     if (!p[valid_ptr].skip) {
262b35ba30fSMichael S. Tsirkin         /* If our only child is a leaf, make this a leaf. */
263b35ba30fSMichael S. Tsirkin         /* By design, we should have made this node a leaf to begin with so we
264b35ba30fSMichael S. Tsirkin          * should never reach here.
265b35ba30fSMichael S. Tsirkin          * But since it's so simple to handle this, let's do it just in case we
266b35ba30fSMichael S. Tsirkin          * change this rule.
267b35ba30fSMichael S. Tsirkin          */
268b35ba30fSMichael S. Tsirkin         lp->skip = 0;
269b35ba30fSMichael S. Tsirkin     } else {
270b35ba30fSMichael S. Tsirkin         lp->skip += p[valid_ptr].skip;
271b35ba30fSMichael S. Tsirkin     }
272b35ba30fSMichael S. Tsirkin }
273b35ba30fSMichael S. Tsirkin 
274b35ba30fSMichael S. Tsirkin static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275b35ba30fSMichael S. Tsirkin {
276b35ba30fSMichael S. Tsirkin     DECLARE_BITMAP(compacted, nodes_nb);
277b35ba30fSMichael S. Tsirkin 
278b35ba30fSMichael S. Tsirkin     if (d->phys_map.skip) {
279b35ba30fSMichael S. Tsirkin         phys_page_compact(&d->phys_map, d->nodes, compacted);
280b35ba30fSMichael S. Tsirkin     }
281b35ba30fSMichael S. Tsirkin }
282b35ba30fSMichael S. Tsirkin 
28397115a8dSMichael S. Tsirkin static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
2849affd6fcSPaolo Bonzini                                            Node *nodes, MemoryRegionSection *sections)
28592e873b9Sbellard {
28631ab2b4aSAvi Kivity     PhysPageEntry *p;
28797115a8dSMichael S. Tsirkin     hwaddr index = addr >> TARGET_PAGE_BITS;
28831ab2b4aSAvi Kivity     int i;
289f1f6e3b8SAvi Kivity 
2909736e55bSMichael S. Tsirkin     for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
291c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
2929affd6fcSPaolo Bonzini             return &sections[PHYS_SECTION_UNASSIGNED];
293f1f6e3b8SAvi Kivity         }
2949affd6fcSPaolo Bonzini         p = nodes[lp.ptr];
29503f49957SPaolo Bonzini         lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
29631ab2b4aSAvi Kivity     }
297b35ba30fSMichael S. Tsirkin 
298b35ba30fSMichael S. Tsirkin     if (sections[lp.ptr].size.hi ||
299b35ba30fSMichael S. Tsirkin         range_covers_byte(sections[lp.ptr].offset_within_address_space,
300b35ba30fSMichael S. Tsirkin                           sections[lp.ptr].size.lo, addr)) {
3019affd6fcSPaolo Bonzini         return &sections[lp.ptr];
302b35ba30fSMichael S. Tsirkin     } else {
303b35ba30fSMichael S. Tsirkin         return &sections[PHYS_SECTION_UNASSIGNED];
304b35ba30fSMichael S. Tsirkin     }
305f3705d53SAvi Kivity }
306f3705d53SAvi Kivity 
307e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
308e5548617SBlue Swirl {
3092a8e7499SPaolo Bonzini     return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
310e5548617SBlue Swirl         && mr != &io_mem_watch;
311e5548617SBlue Swirl }
312149f54b5SPaolo Bonzini 
313c7086b4aSPaolo Bonzini static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
31490260c6cSJan Kiszka                                                         hwaddr addr,
31590260c6cSJan Kiszka                                                         bool resolve_subpage)
3169f029603SJan Kiszka {
31790260c6cSJan Kiszka     MemoryRegionSection *section;
31890260c6cSJan Kiszka     subpage_t *subpage;
31990260c6cSJan Kiszka 
32097115a8dSMichael S. Tsirkin     section = phys_page_find(d->phys_map, addr, d->nodes, d->sections);
32190260c6cSJan Kiszka     if (resolve_subpage && section->mr->subpage) {
32290260c6cSJan Kiszka         subpage = container_of(section->mr, subpage_t, iomem);
3230475d94fSPaolo Bonzini         section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
32490260c6cSJan Kiszka     }
32590260c6cSJan Kiszka     return section;
3269f029603SJan Kiszka }
3279f029603SJan Kiszka 
32890260c6cSJan Kiszka static MemoryRegionSection *
329c7086b4aSPaolo Bonzini address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
33090260c6cSJan Kiszka                                  hwaddr *plen, bool resolve_subpage)
331149f54b5SPaolo Bonzini {
332149f54b5SPaolo Bonzini     MemoryRegionSection *section;
333149f54b5SPaolo Bonzini     Int128 diff;
334149f54b5SPaolo Bonzini 
335c7086b4aSPaolo Bonzini     section = address_space_lookup_region(d, addr, resolve_subpage);
336149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegionSection */
337149f54b5SPaolo Bonzini     addr -= section->offset_within_address_space;
338149f54b5SPaolo Bonzini 
339149f54b5SPaolo Bonzini     /* Compute offset within MemoryRegion */
340149f54b5SPaolo Bonzini     *xlat = addr + section->offset_within_region;
341149f54b5SPaolo Bonzini 
342149f54b5SPaolo Bonzini     diff = int128_sub(section->mr->size, int128_make64(addr));
3433752a036SPeter Maydell     *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
344149f54b5SPaolo Bonzini     return section;
345149f54b5SPaolo Bonzini }
34690260c6cSJan Kiszka 
3475c8a00ceSPaolo Bonzini MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
34890260c6cSJan Kiszka                                       hwaddr *xlat, hwaddr *plen,
34990260c6cSJan Kiszka                                       bool is_write)
35090260c6cSJan Kiszka {
35130951157SAvi Kivity     IOMMUTLBEntry iotlb;
35230951157SAvi Kivity     MemoryRegionSection *section;
35330951157SAvi Kivity     MemoryRegion *mr;
35430951157SAvi Kivity     hwaddr len = *plen;
35530951157SAvi Kivity 
35630951157SAvi Kivity     for (;;) {
357c7086b4aSPaolo Bonzini         section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
35830951157SAvi Kivity         mr = section->mr;
35930951157SAvi Kivity 
36030951157SAvi Kivity         if (!mr->iommu_ops) {
36130951157SAvi Kivity             break;
36230951157SAvi Kivity         }
36330951157SAvi Kivity 
36430951157SAvi Kivity         iotlb = mr->iommu_ops->translate(mr, addr);
36530951157SAvi Kivity         addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
36630951157SAvi Kivity                 | (addr & iotlb.addr_mask));
36730951157SAvi Kivity         len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
36830951157SAvi Kivity         if (!(iotlb.perm & (1 << is_write))) {
36930951157SAvi Kivity             mr = &io_mem_unassigned;
37030951157SAvi Kivity             break;
37130951157SAvi Kivity         }
37230951157SAvi Kivity 
37330951157SAvi Kivity         as = iotlb.target_as;
37430951157SAvi Kivity     }
37530951157SAvi Kivity 
37630951157SAvi Kivity     *plen = len;
37730951157SAvi Kivity     *xlat = addr;
37830951157SAvi Kivity     return mr;
37990260c6cSJan Kiszka }
38090260c6cSJan Kiszka 
38190260c6cSJan Kiszka MemoryRegionSection *
38290260c6cSJan Kiszka address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
38390260c6cSJan Kiszka                                   hwaddr *plen)
38490260c6cSJan Kiszka {
38530951157SAvi Kivity     MemoryRegionSection *section;
386c7086b4aSPaolo Bonzini     section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
38730951157SAvi Kivity 
38830951157SAvi Kivity     assert(!section->mr->iommu_ops);
38930951157SAvi Kivity     return section;
39090260c6cSJan Kiszka }
3919fa3e853Sbellard #endif
392fd6ce8f6Sbellard 
393d5ab9713SJan Kiszka void cpu_exec_init_all(void)
394d5ab9713SJan Kiszka {
395d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY)
396b2a8658eSUmesh Deshpande     qemu_mutex_init(&ram_list.mutex);
397d5ab9713SJan Kiszka     memory_map_init();
398d5ab9713SJan Kiszka     io_mem_init();
399d5ab9713SJan Kiszka #endif
400d5ab9713SJan Kiszka }
401d5ab9713SJan Kiszka 
402b170fce3SAndreas Färber #if !defined(CONFIG_USER_ONLY)
4039656f324Spbrook 
404e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
405e7f4eff7SJuan Quintela {
406259186a7SAndreas Färber     CPUState *cpu = opaque;
407e7f4eff7SJuan Quintela 
4083098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
4093098dba0Saurel32        version_id is increased. */
410259186a7SAndreas Färber     cpu->interrupt_request &= ~0x01;
411259186a7SAndreas Färber     tlb_flush(cpu->env_ptr, 1);
4129656f324Spbrook 
4139656f324Spbrook     return 0;
4149656f324Spbrook }
415e7f4eff7SJuan Quintela 
4161a1562f5SAndreas Färber const VMStateDescription vmstate_cpu_common = {
417e7f4eff7SJuan Quintela     .name = "cpu_common",
418e7f4eff7SJuan Quintela     .version_id = 1,
419e7f4eff7SJuan Quintela     .minimum_version_id = 1,
420e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
421e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
422e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
423259186a7SAndreas Färber         VMSTATE_UINT32(halted, CPUState),
424259186a7SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUState),
425e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
426e7f4eff7SJuan Quintela     }
427e7f4eff7SJuan Quintela };
4281a1562f5SAndreas Färber 
4299656f324Spbrook #endif
4309656f324Spbrook 
43138d8f5c8SAndreas Färber CPUState *qemu_get_cpu(int index)
432950f1472SGlauber Costa {
433bdc44640SAndreas Färber     CPUState *cpu;
434950f1472SGlauber Costa 
435bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
43655e5c285SAndreas Färber         if (cpu->cpu_index == index) {
437bdc44640SAndreas Färber             return cpu;
43855e5c285SAndreas Färber         }
439950f1472SGlauber Costa     }
440950f1472SGlauber Costa 
441bdc44640SAndreas Färber     return NULL;
442950f1472SGlauber Costa }
443950f1472SGlauber Costa 
4449349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env)
445fd6ce8f6Sbellard {
4469f09e18aSAndreas Färber     CPUState *cpu = ENV_GET_CPU(env);
447b170fce3SAndreas Färber     CPUClass *cc = CPU_GET_CLASS(cpu);
448bdc44640SAndreas Färber     CPUState *some_cpu;
4496a00d601Sbellard     int cpu_index;
4506a00d601Sbellard 
451c2764719Spbrook #if defined(CONFIG_USER_ONLY)
452c2764719Spbrook     cpu_list_lock();
453c2764719Spbrook #endif
4546a00d601Sbellard     cpu_index = 0;
455bdc44640SAndreas Färber     CPU_FOREACH(some_cpu) {
4566a00d601Sbellard         cpu_index++;
4576a00d601Sbellard     }
45855e5c285SAndreas Färber     cpu->cpu_index = cpu_index;
4591b1ed8dcSAndreas Färber     cpu->numa_node = 0;
46072cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
46172cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
462dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
4639f09e18aSAndreas Färber     cpu->thread_id = qemu_get_thread_id();
464dc7a09cfSJan Kiszka #endif
465bdc44640SAndreas Färber     QTAILQ_INSERT_TAIL(&cpus, cpu, node);
466c2764719Spbrook #if defined(CONFIG_USER_ONLY)
467c2764719Spbrook     cpu_list_unlock();
468c2764719Spbrook #endif
469e0d47944SAndreas Färber     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
470259186a7SAndreas Färber         vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
471e0d47944SAndreas Färber     }
472b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
4730be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
474b3c7724cSpbrook                     cpu_save, cpu_load, env);
475b170fce3SAndreas Färber     assert(cc->vmsd == NULL);
476e0d47944SAndreas Färber     assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
477b3c7724cSpbrook #endif
478b170fce3SAndreas Färber     if (cc->vmsd != NULL) {
479b170fce3SAndreas Färber         vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
480b170fce3SAndreas Färber     }
481fd6ce8f6Sbellard }
482fd6ce8f6Sbellard 
4831fddef4bSbellard #if defined(TARGET_HAS_ICE)
48494df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
48500b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
48694df27fdSPaul Brook {
48794df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
48894df27fdSPaul Brook }
48994df27fdSPaul Brook #else
49000b941e5SAndreas Färber static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
4911e7855a5SMax Filippov {
492e8262a1bSMax Filippov     hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
493e8262a1bSMax Filippov     if (phys != -1) {
494e8262a1bSMax Filippov         tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
495e8262a1bSMax Filippov     }
4961e7855a5SMax Filippov }
497c27004ecSbellard #endif
49894df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
499d720b93dSbellard 
500c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
5019349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
502c527ee8fSPaul Brook 
503c527ee8fSPaul Brook {
504c527ee8fSPaul Brook }
505c527ee8fSPaul Brook 
5069349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
507c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
508c527ee8fSPaul Brook {
509c527ee8fSPaul Brook     return -ENOSYS;
510c527ee8fSPaul Brook }
511c527ee8fSPaul Brook #else
5126658ffb8Spbrook /* Add a watchpoint.  */
5139349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
514a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
5156658ffb8Spbrook {
516b4051334Saliguori     target_ulong len_mask = ~(len - 1);
517c0ce998eSaliguori     CPUWatchpoint *wp;
5186658ffb8Spbrook 
519b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
5200dc23828SMax Filippov     if ((len & (len - 1)) || (addr & ~len_mask) ||
5210dc23828SMax Filippov             len == 0 || len > TARGET_PAGE_SIZE) {
522b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
523b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
524b4051334Saliguori         return -EINVAL;
525b4051334Saliguori     }
5267267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
5276658ffb8Spbrook 
528a1d1bb31Saliguori     wp->vaddr = addr;
529b4051334Saliguori     wp->len_mask = len_mask;
530a1d1bb31Saliguori     wp->flags = flags;
531a1d1bb31Saliguori 
5322dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
533c0ce998eSaliguori     if (flags & BP_GDB)
53472cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
535c0ce998eSaliguori     else
53672cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
537a1d1bb31Saliguori 
5386658ffb8Spbrook     tlb_flush_page(env, addr);
539a1d1bb31Saliguori 
540a1d1bb31Saliguori     if (watchpoint)
541a1d1bb31Saliguori         *watchpoint = wp;
542a1d1bb31Saliguori     return 0;
5436658ffb8Spbrook }
5446658ffb8Spbrook 
545a1d1bb31Saliguori /* Remove a specific watchpoint.  */
5469349b4f9SAndreas Färber int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
547a1d1bb31Saliguori                           int flags)
5486658ffb8Spbrook {
549b4051334Saliguori     target_ulong len_mask = ~(len - 1);
550a1d1bb31Saliguori     CPUWatchpoint *wp;
5516658ffb8Spbrook 
55272cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
553b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
5546e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
555a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
5566658ffb8Spbrook             return 0;
5576658ffb8Spbrook         }
5586658ffb8Spbrook     }
559a1d1bb31Saliguori     return -ENOENT;
5606658ffb8Spbrook }
5616658ffb8Spbrook 
562a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
5639349b4f9SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
564a1d1bb31Saliguori {
56572cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
5667d03f82fSedgar_igl 
567a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
568a1d1bb31Saliguori 
5697267c094SAnthony Liguori     g_free(watchpoint);
5707d03f82fSedgar_igl }
5717d03f82fSedgar_igl 
572a1d1bb31Saliguori /* Remove all matching watchpoints.  */
5739349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
574a1d1bb31Saliguori {
575c0ce998eSaliguori     CPUWatchpoint *wp, *next;
576a1d1bb31Saliguori 
57772cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
578a1d1bb31Saliguori         if (wp->flags & mask)
579a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
580a1d1bb31Saliguori     }
581c0ce998eSaliguori }
582c527ee8fSPaul Brook #endif
583a1d1bb31Saliguori 
584a1d1bb31Saliguori /* Add a breakpoint.  */
5859349b4f9SAndreas Färber int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
586a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
5874c3a88a2Sbellard {
5881fddef4bSbellard #if defined(TARGET_HAS_ICE)
589c0ce998eSaliguori     CPUBreakpoint *bp;
5904c3a88a2Sbellard 
5917267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
5924c3a88a2Sbellard 
593a1d1bb31Saliguori     bp->pc = pc;
594a1d1bb31Saliguori     bp->flags = flags;
595a1d1bb31Saliguori 
5962dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
59700b941e5SAndreas Färber     if (flags & BP_GDB) {
59872cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
59900b941e5SAndreas Färber     } else {
60072cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
60100b941e5SAndreas Färber     }
602d720b93dSbellard 
60300b941e5SAndreas Färber     breakpoint_invalidate(ENV_GET_CPU(env), pc);
604a1d1bb31Saliguori 
60500b941e5SAndreas Färber     if (breakpoint) {
606a1d1bb31Saliguori         *breakpoint = bp;
60700b941e5SAndreas Färber     }
6084c3a88a2Sbellard     return 0;
6094c3a88a2Sbellard #else
610a1d1bb31Saliguori     return -ENOSYS;
6114c3a88a2Sbellard #endif
6124c3a88a2Sbellard }
6134c3a88a2Sbellard 
614a1d1bb31Saliguori /* Remove a specific breakpoint.  */
6159349b4f9SAndreas Färber int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
616a1d1bb31Saliguori {
6177d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
618a1d1bb31Saliguori     CPUBreakpoint *bp;
619a1d1bb31Saliguori 
62072cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
621a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
622a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
623a1d1bb31Saliguori             return 0;
6247d03f82fSedgar_igl         }
625a1d1bb31Saliguori     }
626a1d1bb31Saliguori     return -ENOENT;
627a1d1bb31Saliguori #else
628a1d1bb31Saliguori     return -ENOSYS;
6297d03f82fSedgar_igl #endif
6307d03f82fSedgar_igl }
6317d03f82fSedgar_igl 
632a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
6339349b4f9SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
6344c3a88a2Sbellard {
6351fddef4bSbellard #if defined(TARGET_HAS_ICE)
63672cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
637d720b93dSbellard 
63800b941e5SAndreas Färber     breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
639a1d1bb31Saliguori 
6407267c094SAnthony Liguori     g_free(breakpoint);
641a1d1bb31Saliguori #endif
642a1d1bb31Saliguori }
643a1d1bb31Saliguori 
644a1d1bb31Saliguori /* Remove all matching breakpoints. */
6459349b4f9SAndreas Färber void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
646a1d1bb31Saliguori {
647a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
648c0ce998eSaliguori     CPUBreakpoint *bp, *next;
649a1d1bb31Saliguori 
65072cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
651a1d1bb31Saliguori         if (bp->flags & mask)
652a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
653c0ce998eSaliguori     }
6544c3a88a2Sbellard #endif
6554c3a88a2Sbellard }
6564c3a88a2Sbellard 
657c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
658c33a346eSbellard    CPU loop after each instruction */
6593825b28fSAndreas Färber void cpu_single_step(CPUState *cpu, int enabled)
660c33a346eSbellard {
6611fddef4bSbellard #if defined(TARGET_HAS_ICE)
662ed2803daSAndreas Färber     if (cpu->singlestep_enabled != enabled) {
663ed2803daSAndreas Färber         cpu->singlestep_enabled = enabled;
664ed2803daSAndreas Färber         if (kvm_enabled()) {
66538e478ecSStefan Weil             kvm_update_guest_debug(cpu, 0);
666ed2803daSAndreas Färber         } else {
667ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
6689fa3e853Sbellard             /* XXX: only flush what is necessary */
66938e478ecSStefan Weil             CPUArchState *env = cpu->env_ptr;
6700124311eSbellard             tb_flush(env);
671c33a346eSbellard         }
672e22a25c9Saliguori     }
673c33a346eSbellard #endif
674c33a346eSbellard }
675c33a346eSbellard 
6769349b4f9SAndreas Färber void cpu_abort(CPUArchState *env, const char *fmt, ...)
6777501267eSbellard {
678878096eeSAndreas Färber     CPUState *cpu = ENV_GET_CPU(env);
6797501267eSbellard     va_list ap;
680493ae1f0Spbrook     va_list ap2;
6817501267eSbellard 
6827501267eSbellard     va_start(ap, fmt);
683493ae1f0Spbrook     va_copy(ap2, ap);
6847501267eSbellard     fprintf(stderr, "qemu: fatal: ");
6857501267eSbellard     vfprintf(stderr, fmt, ap);
6867501267eSbellard     fprintf(stderr, "\n");
687878096eeSAndreas Färber     cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
68893fcfe39Saliguori     if (qemu_log_enabled()) {
68993fcfe39Saliguori         qemu_log("qemu: fatal: ");
69093fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
69193fcfe39Saliguori         qemu_log("\n");
692a0762859SAndreas Färber         log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
69331b1a7b4Saliguori         qemu_log_flush();
69493fcfe39Saliguori         qemu_log_close();
695924edcaeSbalrog     }
696493ae1f0Spbrook     va_end(ap2);
697f9373291Sj_mayer     va_end(ap);
698fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
699fd052bf6SRiku Voipio     {
700fd052bf6SRiku Voipio         struct sigaction act;
701fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
702fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
703fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
704fd052bf6SRiku Voipio     }
705fd052bf6SRiku Voipio #endif
7067501267eSbellard     abort();
7077501267eSbellard }
7087501267eSbellard 
7090124311eSbellard #if !defined(CONFIG_USER_ONLY)
710041603feSPaolo Bonzini static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
711041603feSPaolo Bonzini {
712041603feSPaolo Bonzini     RAMBlock *block;
713041603feSPaolo Bonzini 
714041603feSPaolo Bonzini     /* The list is protected by the iothread lock here.  */
715041603feSPaolo Bonzini     block = ram_list.mru_block;
716041603feSPaolo Bonzini     if (block && addr - block->offset < block->length) {
717041603feSPaolo Bonzini         goto found;
718041603feSPaolo Bonzini     }
719041603feSPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
720041603feSPaolo Bonzini         if (addr - block->offset < block->length) {
721041603feSPaolo Bonzini             goto found;
722041603feSPaolo Bonzini         }
723041603feSPaolo Bonzini     }
724041603feSPaolo Bonzini 
725041603feSPaolo Bonzini     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
726041603feSPaolo Bonzini     abort();
727041603feSPaolo Bonzini 
728041603feSPaolo Bonzini found:
729041603feSPaolo Bonzini     ram_list.mru_block = block;
730041603feSPaolo Bonzini     return block;
731041603feSPaolo Bonzini }
732041603feSPaolo Bonzini 
733d24981d3SJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
734d24981d3SJuan Quintela                                       uintptr_t length)
7351ccde1cbSbellard {
736041603feSPaolo Bonzini     RAMBlock *block;
737041603feSPaolo Bonzini     ram_addr_t start1;
738f23db169Sbellard 
739041603feSPaolo Bonzini     block = qemu_get_ram_block(start);
740041603feSPaolo Bonzini     assert(block == qemu_get_ram_block(end - 1));
741041603feSPaolo Bonzini     start1 = (uintptr_t)block->host + (start - block->offset);
742e5548617SBlue Swirl     cpu_tlb_reset_dirty_all(start1, length);
743d24981d3SJuan Quintela }
744d24981d3SJuan Quintela 
745d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
746d24981d3SJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
747d24981d3SJuan Quintela                                      int dirty_flags)
748d24981d3SJuan Quintela {
749d24981d3SJuan Quintela     uintptr_t length;
750d24981d3SJuan Quintela 
751d24981d3SJuan Quintela     start &= TARGET_PAGE_MASK;
752d24981d3SJuan Quintela     end = TARGET_PAGE_ALIGN(end);
753d24981d3SJuan Quintela 
754d24981d3SJuan Quintela     length = end - start;
755d24981d3SJuan Quintela     if (length == 0)
756d24981d3SJuan Quintela         return;
757d24981d3SJuan Quintela     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
758d24981d3SJuan Quintela 
759d24981d3SJuan Quintela     if (tcg_enabled()) {
760d24981d3SJuan Quintela         tlb_reset_dirty_range_all(start, end, length);
761d24981d3SJuan Quintela     }
7621ccde1cbSbellard }
7631ccde1cbSbellard 
7648b9c99d9SBlue Swirl static int cpu_physical_memory_set_dirty_tracking(int enable)
76574576198Saliguori {
766f6f3fbcaSMichael S. Tsirkin     int ret = 0;
76774576198Saliguori     in_migration = enable;
768f6f3fbcaSMichael S. Tsirkin     return ret;
76974576198Saliguori }
77074576198Saliguori 
771a8170e5eSAvi Kivity hwaddr memory_region_section_get_iotlb(CPUArchState *env,
772e5548617SBlue Swirl                                        MemoryRegionSection *section,
773e5548617SBlue Swirl                                        target_ulong vaddr,
774149f54b5SPaolo Bonzini                                        hwaddr paddr, hwaddr xlat,
775e5548617SBlue Swirl                                        int prot,
776e5548617SBlue Swirl                                        target_ulong *address)
777e5548617SBlue Swirl {
778a8170e5eSAvi Kivity     hwaddr iotlb;
779e5548617SBlue Swirl     CPUWatchpoint *wp;
780e5548617SBlue Swirl 
781cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
782e5548617SBlue Swirl         /* Normal RAM.  */
783e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
784149f54b5SPaolo Bonzini             + xlat;
785e5548617SBlue Swirl         if (!section->readonly) {
786b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_NOTDIRTY;
787e5548617SBlue Swirl         } else {
788b41aac4fSLiu Ping Fan             iotlb |= PHYS_SECTION_ROM;
789e5548617SBlue Swirl         }
790e5548617SBlue Swirl     } else {
7910475d94fSPaolo Bonzini         iotlb = section - address_space_memory.dispatch->sections;
792149f54b5SPaolo Bonzini         iotlb += xlat;
793e5548617SBlue Swirl     }
794e5548617SBlue Swirl 
795e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
796e5548617SBlue Swirl        watchpoint trap routines.  */
797e5548617SBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
798e5548617SBlue Swirl         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
799e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
800e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
801b41aac4fSLiu Ping Fan                 iotlb = PHYS_SECTION_WATCH + paddr;
802e5548617SBlue Swirl                 *address |= TLB_MMIO;
803e5548617SBlue Swirl                 break;
804e5548617SBlue Swirl             }
805e5548617SBlue Swirl         }
806e5548617SBlue Swirl     }
807e5548617SBlue Swirl 
808e5548617SBlue Swirl     return iotlb;
809e5548617SBlue Swirl }
8109fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
81133417e70Sbellard 
812e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
8138da3ff18Spbrook 
814c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8155312bd8bSAvi Kivity                              uint16_t section);
816acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
81754688b1eSAvi Kivity 
818575ddeb4SStefan Weil static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
81991138037SMarkus Armbruster 
82091138037SMarkus Armbruster /*
82191138037SMarkus Armbruster  * Set a custom physical guest memory alloator.
82291138037SMarkus Armbruster  * Accelerators with unusual needs may need this.  Hopefully, we can
82391138037SMarkus Armbruster  * get rid of it eventually.
82491138037SMarkus Armbruster  */
825575ddeb4SStefan Weil void phys_mem_set_alloc(void *(*alloc)(size_t))
82691138037SMarkus Armbruster {
82791138037SMarkus Armbruster     phys_mem_alloc = alloc;
82891138037SMarkus Armbruster }
82991138037SMarkus Armbruster 
8305312bd8bSAvi Kivity static uint16_t phys_section_add(MemoryRegionSection *section)
8315312bd8bSAvi Kivity {
83268f3f65bSPaolo Bonzini     /* The physical section number is ORed with a page-aligned
83368f3f65bSPaolo Bonzini      * pointer to produce the iotlb entries.  Thus it should
83468f3f65bSPaolo Bonzini      * never overflow into the page-aligned value.
83568f3f65bSPaolo Bonzini      */
8369affd6fcSPaolo Bonzini     assert(next_map.sections_nb < TARGET_PAGE_SIZE);
83768f3f65bSPaolo Bonzini 
8389affd6fcSPaolo Bonzini     if (next_map.sections_nb == next_map.sections_nb_alloc) {
8399affd6fcSPaolo Bonzini         next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
8409affd6fcSPaolo Bonzini                                          16);
8419affd6fcSPaolo Bonzini         next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
8429affd6fcSPaolo Bonzini                                     next_map.sections_nb_alloc);
8435312bd8bSAvi Kivity     }
8449affd6fcSPaolo Bonzini     next_map.sections[next_map.sections_nb] = *section;
845dfde4e6eSPaolo Bonzini     memory_region_ref(section->mr);
8469affd6fcSPaolo Bonzini     return next_map.sections_nb++;
8475312bd8bSAvi Kivity }
8485312bd8bSAvi Kivity 
849058bc4b5SPaolo Bonzini static void phys_section_destroy(MemoryRegion *mr)
850058bc4b5SPaolo Bonzini {
851dfde4e6eSPaolo Bonzini     memory_region_unref(mr);
852dfde4e6eSPaolo Bonzini 
853058bc4b5SPaolo Bonzini     if (mr->subpage) {
854058bc4b5SPaolo Bonzini         subpage_t *subpage = container_of(mr, subpage_t, iomem);
855058bc4b5SPaolo Bonzini         memory_region_destroy(&subpage->iomem);
856058bc4b5SPaolo Bonzini         g_free(subpage);
857058bc4b5SPaolo Bonzini     }
858058bc4b5SPaolo Bonzini }
859058bc4b5SPaolo Bonzini 
8606092666eSPaolo Bonzini static void phys_sections_free(PhysPageMap *map)
8615312bd8bSAvi Kivity {
8629affd6fcSPaolo Bonzini     while (map->sections_nb > 0) {
8639affd6fcSPaolo Bonzini         MemoryRegionSection *section = &map->sections[--map->sections_nb];
864058bc4b5SPaolo Bonzini         phys_section_destroy(section->mr);
865058bc4b5SPaolo Bonzini     }
8669affd6fcSPaolo Bonzini     g_free(map->sections);
8679affd6fcSPaolo Bonzini     g_free(map->nodes);
8686092666eSPaolo Bonzini     g_free(map);
8695312bd8bSAvi Kivity }
8705312bd8bSAvi Kivity 
871ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
8720f0cb164SAvi Kivity {
8730f0cb164SAvi Kivity     subpage_t *subpage;
874a8170e5eSAvi Kivity     hwaddr base = section->offset_within_address_space
8750f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
87697115a8dSMichael S. Tsirkin     MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
8779affd6fcSPaolo Bonzini                                                    next_map.nodes, next_map.sections);
8780f0cb164SAvi Kivity     MemoryRegionSection subsection = {
8790f0cb164SAvi Kivity         .offset_within_address_space = base,
880052e87b0SPaolo Bonzini         .size = int128_make64(TARGET_PAGE_SIZE),
8810f0cb164SAvi Kivity     };
882a8170e5eSAvi Kivity     hwaddr start, end;
8830f0cb164SAvi Kivity 
884f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
8850f0cb164SAvi Kivity 
886f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
887acc9d80bSJan Kiszka         subpage = subpage_init(d->as, base);
8880f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
889ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
8902999097bSAvi Kivity                       phys_section_add(&subsection));
8910f0cb164SAvi Kivity     } else {
892f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
8930f0cb164SAvi Kivity     }
8940f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
895052e87b0SPaolo Bonzini     end = start + int128_get64(section->size) - 1;
8960f0cb164SAvi Kivity     subpage_register(subpage, start, end, phys_section_add(section));
8970f0cb164SAvi Kivity }
8980f0cb164SAvi Kivity 
8990f0cb164SAvi Kivity 
900052e87b0SPaolo Bonzini static void register_multipage(AddressSpaceDispatch *d,
901052e87b0SPaolo Bonzini                                MemoryRegionSection *section)
90233417e70Sbellard {
903a8170e5eSAvi Kivity     hwaddr start_addr = section->offset_within_address_space;
9045312bd8bSAvi Kivity     uint16_t section_index = phys_section_add(section);
905052e87b0SPaolo Bonzini     uint64_t num_pages = int128_get64(int128_rshift(section->size,
906052e87b0SPaolo Bonzini                                                     TARGET_PAGE_BITS));
907dd81124bSAvi Kivity 
908733d5ef5SPaolo Bonzini     assert(num_pages);
909733d5ef5SPaolo Bonzini     phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
91033417e70Sbellard }
91133417e70Sbellard 
912ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
9130f0cb164SAvi Kivity {
91489ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
91500752703SPaolo Bonzini     AddressSpaceDispatch *d = as->next_dispatch;
91699b9cc06SPaolo Bonzini     MemoryRegionSection now = *section, remain = *section;
917052e87b0SPaolo Bonzini     Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
9180f0cb164SAvi Kivity 
919733d5ef5SPaolo Bonzini     if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
920733d5ef5SPaolo Bonzini         uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
921733d5ef5SPaolo Bonzini                        - now.offset_within_address_space;
922733d5ef5SPaolo Bonzini 
923052e87b0SPaolo Bonzini         now.size = int128_min(int128_make64(left), now.size);
924ac1970fbSAvi Kivity         register_subpage(d, &now);
925733d5ef5SPaolo Bonzini     } else {
926052e87b0SPaolo Bonzini         now.size = int128_zero();
927733d5ef5SPaolo Bonzini     }
928052e87b0SPaolo Bonzini     while (int128_ne(remain.size, now.size)) {
929052e87b0SPaolo Bonzini         remain.size = int128_sub(remain.size, now.size);
930052e87b0SPaolo Bonzini         remain.offset_within_address_space += int128_get64(now.size);
931052e87b0SPaolo Bonzini         remain.offset_within_region += int128_get64(now.size);
9320f0cb164SAvi Kivity         now = remain;
933052e87b0SPaolo Bonzini         if (int128_lt(remain.size, page_size)) {
934733d5ef5SPaolo Bonzini             register_subpage(d, &now);
93588266249SHu Tao         } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
936052e87b0SPaolo Bonzini             now.size = page_size;
937ac1970fbSAvi Kivity             register_subpage(d, &now);
93869b67646STyler Hall         } else {
939052e87b0SPaolo Bonzini             now.size = int128_and(now.size, int128_neg(page_size));
940ac1970fbSAvi Kivity             register_multipage(d, &now);
94169b67646STyler Hall         }
9420f0cb164SAvi Kivity     }
9430f0cb164SAvi Kivity }
9440f0cb164SAvi Kivity 
94562a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
94662a2744cSSheng Yang {
94762a2744cSSheng Yang     if (kvm_enabled())
94862a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
94962a2744cSSheng Yang }
95062a2744cSSheng Yang 
951b2a8658eSUmesh Deshpande void qemu_mutex_lock_ramlist(void)
952b2a8658eSUmesh Deshpande {
953b2a8658eSUmesh Deshpande     qemu_mutex_lock(&ram_list.mutex);
954b2a8658eSUmesh Deshpande }
955b2a8658eSUmesh Deshpande 
956b2a8658eSUmesh Deshpande void qemu_mutex_unlock_ramlist(void)
957b2a8658eSUmesh Deshpande {
958b2a8658eSUmesh Deshpande     qemu_mutex_unlock(&ram_list.mutex);
959b2a8658eSUmesh Deshpande }
960b2a8658eSUmesh Deshpande 
961e1e84ba0SMarkus Armbruster #ifdef __linux__
962c902760fSMarcelo Tosatti 
963c902760fSMarcelo Tosatti #include <sys/vfs.h>
964c902760fSMarcelo Tosatti 
965c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
966c902760fSMarcelo Tosatti 
967c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
968c902760fSMarcelo Tosatti {
969c902760fSMarcelo Tosatti     struct statfs fs;
970c902760fSMarcelo Tosatti     int ret;
971c902760fSMarcelo Tosatti 
972c902760fSMarcelo Tosatti     do {
973c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
974c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
975c902760fSMarcelo Tosatti 
976c902760fSMarcelo Tosatti     if (ret != 0) {
9776adc0549SMichael Tokarev         perror(path);
978c902760fSMarcelo Tosatti         return 0;
979c902760fSMarcelo Tosatti     }
980c902760fSMarcelo Tosatti 
981c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
982c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
983c902760fSMarcelo Tosatti 
984c902760fSMarcelo Tosatti     return fs.f_bsize;
985c902760fSMarcelo Tosatti }
986c902760fSMarcelo Tosatti 
987ef36fa14SMarcelo Tosatti static sigjmp_buf sigjump;
988ef36fa14SMarcelo Tosatti 
989ef36fa14SMarcelo Tosatti static void sigbus_handler(int signal)
990ef36fa14SMarcelo Tosatti {
991ef36fa14SMarcelo Tosatti     siglongjmp(sigjump, 1);
992ef36fa14SMarcelo Tosatti }
993ef36fa14SMarcelo Tosatti 
99404b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
99504b16653SAlex Williamson                             ram_addr_t memory,
99604b16653SAlex Williamson                             const char *path)
997c902760fSMarcelo Tosatti {
998c902760fSMarcelo Tosatti     char *filename;
9998ca761f6SPeter Feiner     char *sanitized_name;
10008ca761f6SPeter Feiner     char *c;
1001c902760fSMarcelo Tosatti     void *area;
1002c902760fSMarcelo Tosatti     int fd;
1003c902760fSMarcelo Tosatti     unsigned long hpagesize;
1004c902760fSMarcelo Tosatti 
1005c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
1006c902760fSMarcelo Tosatti     if (!hpagesize) {
1007c902760fSMarcelo Tosatti         return NULL;
1008c902760fSMarcelo Tosatti     }
1009c902760fSMarcelo Tosatti 
1010c902760fSMarcelo Tosatti     if (memory < hpagesize) {
1011c902760fSMarcelo Tosatti         return NULL;
1012c902760fSMarcelo Tosatti     }
1013c902760fSMarcelo Tosatti 
1014c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
1015c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1016c902760fSMarcelo Tosatti         return NULL;
1017c902760fSMarcelo Tosatti     }
1018c902760fSMarcelo Tosatti 
10198ca761f6SPeter Feiner     /* Make name safe to use with mkstemp by replacing '/' with '_'. */
10208ca761f6SPeter Feiner     sanitized_name = g_strdup(block->mr->name);
10218ca761f6SPeter Feiner     for (c = sanitized_name; *c != '\0'; c++) {
10228ca761f6SPeter Feiner         if (*c == '/')
10238ca761f6SPeter Feiner             *c = '_';
10248ca761f6SPeter Feiner     }
10258ca761f6SPeter Feiner 
10268ca761f6SPeter Feiner     filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
10278ca761f6SPeter Feiner                                sanitized_name);
10288ca761f6SPeter Feiner     g_free(sanitized_name);
1029c902760fSMarcelo Tosatti 
1030c902760fSMarcelo Tosatti     fd = mkstemp(filename);
1031c902760fSMarcelo Tosatti     if (fd < 0) {
10326adc0549SMichael Tokarev         perror("unable to create backing store for hugepages");
1033e4ada482SStefan Weil         g_free(filename);
1034c902760fSMarcelo Tosatti         return NULL;
1035c902760fSMarcelo Tosatti     }
1036c902760fSMarcelo Tosatti     unlink(filename);
1037e4ada482SStefan Weil     g_free(filename);
1038c902760fSMarcelo Tosatti 
1039c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
1040c902760fSMarcelo Tosatti 
1041c902760fSMarcelo Tosatti     /*
1042c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
1043c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
1044c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
1045c902760fSMarcelo Tosatti      * mmap will fail.
1046c902760fSMarcelo Tosatti      */
1047c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
1048c902760fSMarcelo Tosatti         perror("ftruncate");
1049c902760fSMarcelo Tosatti 
1050c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1051c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
1052c902760fSMarcelo Tosatti         perror("file_ram_alloc: can't mmap RAM pages");
1053c902760fSMarcelo Tosatti         close(fd);
1054c902760fSMarcelo Tosatti         return (NULL);
1055c902760fSMarcelo Tosatti     }
1056ef36fa14SMarcelo Tosatti 
1057ef36fa14SMarcelo Tosatti     if (mem_prealloc) {
1058ef36fa14SMarcelo Tosatti         int ret, i;
1059ef36fa14SMarcelo Tosatti         struct sigaction act, oldact;
1060ef36fa14SMarcelo Tosatti         sigset_t set, oldset;
1061ef36fa14SMarcelo Tosatti 
1062ef36fa14SMarcelo Tosatti         memset(&act, 0, sizeof(act));
1063ef36fa14SMarcelo Tosatti         act.sa_handler = &sigbus_handler;
1064ef36fa14SMarcelo Tosatti         act.sa_flags = 0;
1065ef36fa14SMarcelo Tosatti 
1066ef36fa14SMarcelo Tosatti         ret = sigaction(SIGBUS, &act, &oldact);
1067ef36fa14SMarcelo Tosatti         if (ret) {
1068ef36fa14SMarcelo Tosatti             perror("file_ram_alloc: failed to install signal handler");
1069ef36fa14SMarcelo Tosatti             exit(1);
1070ef36fa14SMarcelo Tosatti         }
1071ef36fa14SMarcelo Tosatti 
1072ef36fa14SMarcelo Tosatti         /* unblock SIGBUS */
1073ef36fa14SMarcelo Tosatti         sigemptyset(&set);
1074ef36fa14SMarcelo Tosatti         sigaddset(&set, SIGBUS);
1075ef36fa14SMarcelo Tosatti         pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1076ef36fa14SMarcelo Tosatti 
1077ef36fa14SMarcelo Tosatti         if (sigsetjmp(sigjump, 1)) {
1078ef36fa14SMarcelo Tosatti             fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1079ef36fa14SMarcelo Tosatti             exit(1);
1080ef36fa14SMarcelo Tosatti         }
1081ef36fa14SMarcelo Tosatti 
1082ef36fa14SMarcelo Tosatti         /* MAP_POPULATE silently ignores failures */
1083ef36fa14SMarcelo Tosatti         for (i = 0; i < (memory/hpagesize)-1; i++) {
1084ef36fa14SMarcelo Tosatti             memset(area + (hpagesize*i), 0, 1);
1085ef36fa14SMarcelo Tosatti         }
1086ef36fa14SMarcelo Tosatti 
1087ef36fa14SMarcelo Tosatti         ret = sigaction(SIGBUS, &oldact, NULL);
1088ef36fa14SMarcelo Tosatti         if (ret) {
1089ef36fa14SMarcelo Tosatti             perror("file_ram_alloc: failed to reinstall signal handler");
1090ef36fa14SMarcelo Tosatti             exit(1);
1091ef36fa14SMarcelo Tosatti         }
1092ef36fa14SMarcelo Tosatti 
1093ef36fa14SMarcelo Tosatti         pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1094ef36fa14SMarcelo Tosatti     }
1095ef36fa14SMarcelo Tosatti 
109604b16653SAlex Williamson     block->fd = fd;
1097c902760fSMarcelo Tosatti     return area;
1098c902760fSMarcelo Tosatti }
1099e1e84ba0SMarkus Armbruster #else
1100e1e84ba0SMarkus Armbruster static void *file_ram_alloc(RAMBlock *block,
1101e1e84ba0SMarkus Armbruster                             ram_addr_t memory,
1102e1e84ba0SMarkus Armbruster                             const char *path)
1103e1e84ba0SMarkus Armbruster {
1104e1e84ba0SMarkus Armbruster     fprintf(stderr, "-mem-path not supported on this host\n");
1105e1e84ba0SMarkus Armbruster     exit(1);
1106e1e84ba0SMarkus Armbruster }
1107c902760fSMarcelo Tosatti #endif
1108c902760fSMarcelo Tosatti 
1109d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
1110d17b5288SAlex Williamson {
111104b16653SAlex Williamson     RAMBlock *block, *next_block;
11123e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
111304b16653SAlex Williamson 
111449cd9ac6SStefan Hajnoczi     assert(size != 0); /* it would hand out same offset multiple times */
111549cd9ac6SStefan Hajnoczi 
1116a3161038SPaolo Bonzini     if (QTAILQ_EMPTY(&ram_list.blocks))
111704b16653SAlex Williamson         return 0;
111804b16653SAlex Williamson 
1119a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1120f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
112104b16653SAlex Williamson 
112204b16653SAlex Williamson         end = block->offset + block->length;
112304b16653SAlex Williamson 
1124a3161038SPaolo Bonzini         QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
112504b16653SAlex Williamson             if (next_block->offset >= end) {
112604b16653SAlex Williamson                 next = MIN(next, next_block->offset);
112704b16653SAlex Williamson             }
112804b16653SAlex Williamson         }
112904b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
113004b16653SAlex Williamson             offset = end;
113104b16653SAlex Williamson             mingap = next - end;
113204b16653SAlex Williamson         }
113304b16653SAlex Williamson     }
11343e837b2cSAlex Williamson 
11353e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
11363e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
11373e837b2cSAlex Williamson                 (uint64_t)size);
11383e837b2cSAlex Williamson         abort();
11393e837b2cSAlex Williamson     }
11403e837b2cSAlex Williamson 
114104b16653SAlex Williamson     return offset;
114204b16653SAlex Williamson }
114304b16653SAlex Williamson 
1144652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
114504b16653SAlex Williamson {
1146d17b5288SAlex Williamson     RAMBlock *block;
1147d17b5288SAlex Williamson     ram_addr_t last = 0;
1148d17b5288SAlex Williamson 
1149a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next)
1150d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
1151d17b5288SAlex Williamson 
1152d17b5288SAlex Williamson     return last;
1153d17b5288SAlex Williamson }
1154d17b5288SAlex Williamson 
1155ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1156ddb97f1dSJason Baron {
1157ddb97f1dSJason Baron     int ret;
1158ddb97f1dSJason Baron 
1159ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
11602ff3de68SMarkus Armbruster     if (!qemu_opt_get_bool(qemu_get_machine_opts(),
11612ff3de68SMarkus Armbruster                            "dump-guest-core", true)) {
1162ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1163ddb97f1dSJason Baron         if (ret) {
1164ddb97f1dSJason Baron             perror("qemu_madvise");
1165ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1166ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
1167ddb97f1dSJason Baron         }
1168ddb97f1dSJason Baron     }
1169ddb97f1dSJason Baron }
1170ddb97f1dSJason Baron 
1171c5705a77SAvi Kivity void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
117284b89d78SCam Macdonell {
117384b89d78SCam Macdonell     RAMBlock *new_block, *block;
117484b89d78SCam Macdonell 
1175c5705a77SAvi Kivity     new_block = NULL;
1176a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1177c5705a77SAvi Kivity         if (block->offset == addr) {
1178c5705a77SAvi Kivity             new_block = block;
1179c5705a77SAvi Kivity             break;
1180c5705a77SAvi Kivity         }
1181c5705a77SAvi Kivity     }
1182c5705a77SAvi Kivity     assert(new_block);
1183c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
118484b89d78SCam Macdonell 
118509e5ab63SAnthony Liguori     if (dev) {
118609e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
118784b89d78SCam Macdonell         if (id) {
118884b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
11897267c094SAnthony Liguori             g_free(id);
119084b89d78SCam Macdonell         }
119184b89d78SCam Macdonell     }
119284b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
119384b89d78SCam Macdonell 
1194b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1195b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1196a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1197c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
119884b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
119984b89d78SCam Macdonell                     new_block->idstr);
120084b89d78SCam Macdonell             abort();
120184b89d78SCam Macdonell         }
120284b89d78SCam Macdonell     }
1203b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1204c5705a77SAvi Kivity }
1205c5705a77SAvi Kivity 
12068490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
12078490fc78SLuiz Capitulino {
12082ff3de68SMarkus Armbruster     if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
12098490fc78SLuiz Capitulino         /* disabled by the user */
12108490fc78SLuiz Capitulino         return 0;
12118490fc78SLuiz Capitulino     }
12128490fc78SLuiz Capitulino 
12138490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
12148490fc78SLuiz Capitulino }
12158490fc78SLuiz Capitulino 
1216c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1217c5705a77SAvi Kivity                                    MemoryRegion *mr)
1218c5705a77SAvi Kivity {
1219abb26d63SPaolo Bonzini     RAMBlock *block, *new_block;
1220c5705a77SAvi Kivity 
1221c5705a77SAvi Kivity     size = TARGET_PAGE_ALIGN(size);
1222c5705a77SAvi Kivity     new_block = g_malloc0(sizeof(*new_block));
12233435f395SMarkus Armbruster     new_block->fd = -1;
122484b89d78SCam Macdonell 
1225b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1226b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
12277c637366SAvi Kivity     new_block->mr = mr;
1228432d268cSJun Nakajima     new_block->offset = find_ram_offset(size);
12296977dfe6SYoshiaki Tamura     if (host) {
123084b89d78SCam Macdonell         new_block->host = host;
1231cd19cfa2SHuang Ying         new_block->flags |= RAM_PREALLOC_MASK;
1232dfeaf2abSMarkus Armbruster     } else if (xen_enabled()) {
1233dfeaf2abSMarkus Armbruster         if (mem_path) {
1234dfeaf2abSMarkus Armbruster             fprintf(stderr, "-mem-path not supported with Xen\n");
1235dfeaf2abSMarkus Armbruster             exit(1);
1236dfeaf2abSMarkus Armbruster         }
1237dfeaf2abSMarkus Armbruster         xen_ram_alloc(new_block->offset, size, mr);
12386977dfe6SYoshiaki Tamura     } else {
1239c902760fSMarcelo Tosatti         if (mem_path) {
1240e1e84ba0SMarkus Armbruster             if (phys_mem_alloc != qemu_anon_ram_alloc) {
1241e1e84ba0SMarkus Armbruster                 /*
1242e1e84ba0SMarkus Armbruster                  * file_ram_alloc() needs to allocate just like
1243e1e84ba0SMarkus Armbruster                  * phys_mem_alloc, but we haven't bothered to provide
1244e1e84ba0SMarkus Armbruster                  * a hook there.
1245e1e84ba0SMarkus Armbruster                  */
1246e1e84ba0SMarkus Armbruster                 fprintf(stderr,
1247e1e84ba0SMarkus Armbruster                         "-mem-path not supported with this accelerator\n");
1248c902760fSMarcelo Tosatti                 exit(1);
1249e1e84ba0SMarkus Armbruster             }
1250e1e84ba0SMarkus Armbruster             new_block->host = file_ram_alloc(new_block, size, mem_path);
12510628c182SMarkus Armbruster         }
12520628c182SMarkus Armbruster         if (!new_block->host) {
125391138037SMarkus Armbruster             new_block->host = phys_mem_alloc(size);
125439228250SMarkus Armbruster             if (!new_block->host) {
125539228250SMarkus Armbruster                 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
125639228250SMarkus Armbruster                         new_block->mr->name, strerror(errno));
125739228250SMarkus Armbruster                 exit(1);
125839228250SMarkus Armbruster             }
12598490fc78SLuiz Capitulino             memory_try_enable_merging(new_block->host, size);
1260c902760fSMarcelo Tosatti         }
12616977dfe6SYoshiaki Tamura     }
126294a6b54fSpbrook     new_block->length = size;
126394a6b54fSpbrook 
1264abb26d63SPaolo Bonzini     /* Keep the list sorted from biggest to smallest block.  */
1265abb26d63SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1266abb26d63SPaolo Bonzini         if (block->length < new_block->length) {
1267abb26d63SPaolo Bonzini             break;
1268abb26d63SPaolo Bonzini         }
1269abb26d63SPaolo Bonzini     }
1270abb26d63SPaolo Bonzini     if (block) {
1271abb26d63SPaolo Bonzini         QTAILQ_INSERT_BEFORE(block, new_block, next);
1272abb26d63SPaolo Bonzini     } else {
1273abb26d63SPaolo Bonzini         QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1274abb26d63SPaolo Bonzini     }
12750d6d3c87SPaolo Bonzini     ram_list.mru_block = NULL;
127694a6b54fSpbrook 
1277f798b07fSUmesh Deshpande     ram_list.version++;
1278b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
1279f798b07fSUmesh Deshpande 
12807267c094SAnthony Liguori     ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
128104b16653SAlex Williamson                                        last_ram_offset() >> TARGET_PAGE_BITS);
12825fda043fSIgor Mitsyanko     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
12835fda043fSIgor Mitsyanko            0, size >> TARGET_PAGE_BITS);
12841720aeeeSJuan Quintela     cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
128594a6b54fSpbrook 
1286ddb97f1dSJason Baron     qemu_ram_setup_dump(new_block->host, size);
1287ad0b5321SLuiz Capitulino     qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
12883e469dbfSAndrea Arcangeli     qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
1289ddb97f1dSJason Baron 
12906f0437e8SJan Kiszka     if (kvm_enabled())
12916f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
12926f0437e8SJan Kiszka 
129394a6b54fSpbrook     return new_block->offset;
129494a6b54fSpbrook }
1295e9a1ab19Sbellard 
1296c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
12976977dfe6SYoshiaki Tamura {
1298c5705a77SAvi Kivity     return qemu_ram_alloc_from_ptr(size, NULL, mr);
12996977dfe6SYoshiaki Tamura }
13006977dfe6SYoshiaki Tamura 
13011f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
13021f2e98b6SAlex Williamson {
13031f2e98b6SAlex Williamson     RAMBlock *block;
13041f2e98b6SAlex Williamson 
1305b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1306b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1307a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
13081f2e98b6SAlex Williamson         if (addr == block->offset) {
1309a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
13100d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1311f798b07fSUmesh Deshpande             ram_list.version++;
13127267c094SAnthony Liguori             g_free(block);
1313b2a8658eSUmesh Deshpande             break;
13141f2e98b6SAlex Williamson         }
13151f2e98b6SAlex Williamson     }
1316b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
13171f2e98b6SAlex Williamson }
13181f2e98b6SAlex Williamson 
1319c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
1320e9a1ab19Sbellard {
132104b16653SAlex Williamson     RAMBlock *block;
132204b16653SAlex Williamson 
1323b2a8658eSUmesh Deshpande     /* This assumes the iothread lock is taken here too.  */
1324b2a8658eSUmesh Deshpande     qemu_mutex_lock_ramlist();
1325a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
132604b16653SAlex Williamson         if (addr == block->offset) {
1327a3161038SPaolo Bonzini             QTAILQ_REMOVE(&ram_list.blocks, block, next);
13280d6d3c87SPaolo Bonzini             ram_list.mru_block = NULL;
1329f798b07fSUmesh Deshpande             ram_list.version++;
1330cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
1331cd19cfa2SHuang Ying                 ;
1332dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1333dfeaf2abSMarkus Armbruster                 xen_invalidate_map_cache_entry(block->host);
1334089f3f76SStefan Weil #ifndef _WIN32
13353435f395SMarkus Armbruster             } else if (block->fd >= 0) {
133604b16653SAlex Williamson                 munmap(block->host, block->length);
133704b16653SAlex Williamson                 close(block->fd);
1338089f3f76SStefan Weil #endif
133904b16653SAlex Williamson             } else {
1340e7a09b92SPaolo Bonzini                 qemu_anon_ram_free(block->host, block->length);
134104b16653SAlex Williamson             }
13427267c094SAnthony Liguori             g_free(block);
1343b2a8658eSUmesh Deshpande             break;
134404b16653SAlex Williamson         }
134504b16653SAlex Williamson     }
1346b2a8658eSUmesh Deshpande     qemu_mutex_unlock_ramlist();
134704b16653SAlex Williamson 
1348e9a1ab19Sbellard }
1349e9a1ab19Sbellard 
1350cd19cfa2SHuang Ying #ifndef _WIN32
1351cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1352cd19cfa2SHuang Ying {
1353cd19cfa2SHuang Ying     RAMBlock *block;
1354cd19cfa2SHuang Ying     ram_addr_t offset;
1355cd19cfa2SHuang Ying     int flags;
1356cd19cfa2SHuang Ying     void *area, *vaddr;
1357cd19cfa2SHuang Ying 
1358a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1359cd19cfa2SHuang Ying         offset = addr - block->offset;
1360cd19cfa2SHuang Ying         if (offset < block->length) {
1361cd19cfa2SHuang Ying             vaddr = block->host + offset;
1362cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
1363cd19cfa2SHuang Ying                 ;
1364dfeaf2abSMarkus Armbruster             } else if (xen_enabled()) {
1365dfeaf2abSMarkus Armbruster                 abort();
1366cd19cfa2SHuang Ying             } else {
1367cd19cfa2SHuang Ying                 flags = MAP_FIXED;
1368cd19cfa2SHuang Ying                 munmap(vaddr, length);
13693435f395SMarkus Armbruster                 if (block->fd >= 0) {
1370cd19cfa2SHuang Ying #ifdef MAP_POPULATE
1371cd19cfa2SHuang Ying                     flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1372cd19cfa2SHuang Ying                         MAP_PRIVATE;
1373cd19cfa2SHuang Ying #else
1374cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE;
1375cd19cfa2SHuang Ying #endif
1376cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1377cd19cfa2SHuang Ying                                 flags, block->fd, offset);
1378cd19cfa2SHuang Ying                 } else {
13792eb9fbaaSMarkus Armbruster                     /*
13802eb9fbaaSMarkus Armbruster                      * Remap needs to match alloc.  Accelerators that
13812eb9fbaaSMarkus Armbruster                      * set phys_mem_alloc never remap.  If they did,
13822eb9fbaaSMarkus Armbruster                      * we'd need a remap hook here.
13832eb9fbaaSMarkus Armbruster                      */
13842eb9fbaaSMarkus Armbruster                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
13852eb9fbaaSMarkus Armbruster 
1386cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1387cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1388cd19cfa2SHuang Ying                                 flags, -1, 0);
1389cd19cfa2SHuang Ying                 }
1390cd19cfa2SHuang Ying                 if (area != vaddr) {
1391f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
1392f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1393cd19cfa2SHuang Ying                             length, addr);
1394cd19cfa2SHuang Ying                     exit(1);
1395cd19cfa2SHuang Ying                 }
13968490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
1397ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
1398cd19cfa2SHuang Ying             }
1399cd19cfa2SHuang Ying             return;
1400cd19cfa2SHuang Ying         }
1401cd19cfa2SHuang Ying     }
1402cd19cfa2SHuang Ying }
1403cd19cfa2SHuang Ying #endif /* !_WIN32 */
1404cd19cfa2SHuang Ying 
14051b5ec234SPaolo Bonzini /* Return a host pointer to ram allocated with qemu_ram_alloc.
14061b5ec234SPaolo Bonzini    With the exception of the softmmu code in this file, this should
14071b5ec234SPaolo Bonzini    only be used for local memory (e.g. video ram) that the device owns,
14081b5ec234SPaolo Bonzini    and knows it isn't going to access beyond the end of the block.
14091b5ec234SPaolo Bonzini 
14101b5ec234SPaolo Bonzini    It should not be used for general purpose DMA.
14111b5ec234SPaolo Bonzini    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
14121b5ec234SPaolo Bonzini  */
14131b5ec234SPaolo Bonzini void *qemu_get_ram_ptr(ram_addr_t addr)
14141b5ec234SPaolo Bonzini {
14151b5ec234SPaolo Bonzini     RAMBlock *block = qemu_get_ram_block(addr);
14161b5ec234SPaolo Bonzini 
1417868bb33fSJan Kiszka     if (xen_enabled()) {
1418432d268cSJun Nakajima         /* We need to check if the requested address is in the RAM
1419432d268cSJun Nakajima          * because we don't want to map the entire memory in QEMU.
1420712c2b41SStefano Stabellini          * In that case just map until the end of the page.
1421432d268cSJun Nakajima          */
1422432d268cSJun Nakajima         if (block->offset == 0) {
1423e41d7c69SJan Kiszka             return xen_map_cache(addr, 0, 0);
1424432d268cSJun Nakajima         } else if (block->host == NULL) {
1425e41d7c69SJan Kiszka             block->host =
1426e41d7c69SJan Kiszka                 xen_map_cache(block->offset, block->length, 1);
1427432d268cSJun Nakajima         }
1428432d268cSJun Nakajima     }
1429f471a17eSAlex Williamson     return block->host + (addr - block->offset);
143094a6b54fSpbrook }
1431f471a17eSAlex Williamson 
143238bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
143338bee5dcSStefano Stabellini  * but takes a size argument */
1434cb85f7abSPeter Maydell static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
143538bee5dcSStefano Stabellini {
14368ab934f9SStefano Stabellini     if (*size == 0) {
14378ab934f9SStefano Stabellini         return NULL;
14388ab934f9SStefano Stabellini     }
1439868bb33fSJan Kiszka     if (xen_enabled()) {
1440e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
1441868bb33fSJan Kiszka     } else {
144238bee5dcSStefano Stabellini         RAMBlock *block;
144338bee5dcSStefano Stabellini 
1444a3161038SPaolo Bonzini         QTAILQ_FOREACH(block, &ram_list.blocks, next) {
144538bee5dcSStefano Stabellini             if (addr - block->offset < block->length) {
144638bee5dcSStefano Stabellini                 if (addr - block->offset + *size > block->length)
144738bee5dcSStefano Stabellini                     *size = block->length - addr + block->offset;
144838bee5dcSStefano Stabellini                 return block->host + (addr - block->offset);
144938bee5dcSStefano Stabellini             }
145038bee5dcSStefano Stabellini         }
145138bee5dcSStefano Stabellini 
145238bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
145338bee5dcSStefano Stabellini         abort();
145438bee5dcSStefano Stabellini     }
145538bee5dcSStefano Stabellini }
145638bee5dcSStefano Stabellini 
14577443b437SPaolo Bonzini /* Some of the softmmu routines need to translate from a host pointer
14587443b437SPaolo Bonzini    (typically a TLB entry) back to a ram offset.  */
14591b5ec234SPaolo Bonzini MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
14605579c7f3Spbrook {
146194a6b54fSpbrook     RAMBlock *block;
146294a6b54fSpbrook     uint8_t *host = ptr;
146394a6b54fSpbrook 
1464868bb33fSJan Kiszka     if (xen_enabled()) {
1465e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
14661b5ec234SPaolo Bonzini         return qemu_get_ram_block(*ram_addr)->mr;
1467712c2b41SStefano Stabellini     }
1468712c2b41SStefano Stabellini 
146923887b79SPaolo Bonzini     block = ram_list.mru_block;
147023887b79SPaolo Bonzini     if (block && block->host && host - block->host < block->length) {
147123887b79SPaolo Bonzini         goto found;
147223887b79SPaolo Bonzini     }
147323887b79SPaolo Bonzini 
1474a3161038SPaolo Bonzini     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1475432d268cSJun Nakajima         /* This case append when the block is not mapped. */
1476432d268cSJun Nakajima         if (block->host == NULL) {
1477432d268cSJun Nakajima             continue;
1478432d268cSJun Nakajima         }
1479f471a17eSAlex Williamson         if (host - block->host < block->length) {
148023887b79SPaolo Bonzini             goto found;
148194a6b54fSpbrook         }
1482f471a17eSAlex Williamson     }
1483432d268cSJun Nakajima 
14841b5ec234SPaolo Bonzini     return NULL;
148523887b79SPaolo Bonzini 
148623887b79SPaolo Bonzini found:
148723887b79SPaolo Bonzini     *ram_addr = block->offset + (host - block->host);
14881b5ec234SPaolo Bonzini     return block->mr;
1489e890261fSMarcelo Tosatti }
1490f471a17eSAlex Williamson 
1491a8170e5eSAvi Kivity static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
14920e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
14931ccde1cbSbellard {
14943a7d929eSbellard     int dirty_flags;
1495f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
14963a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
14970e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
1498f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
14993a7d929eSbellard     }
15000e0df1e2SAvi Kivity     switch (size) {
15010e0df1e2SAvi Kivity     case 1:
15025579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
15030e0df1e2SAvi Kivity         break;
15040e0df1e2SAvi Kivity     case 2:
15055579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
15060e0df1e2SAvi Kivity         break;
15070e0df1e2SAvi Kivity     case 4:
15085579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
15090e0df1e2SAvi Kivity         break;
15100e0df1e2SAvi Kivity     default:
15110e0df1e2SAvi Kivity         abort();
15120e0df1e2SAvi Kivity     }
1513f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1514f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1515f23db169Sbellard     /* we remove the notdirty callback only if the code has been
1516f23db169Sbellard        flushed */
15174917cf44SAndreas Färber     if (dirty_flags == 0xff) {
15184917cf44SAndreas Färber         CPUArchState *env = current_cpu->env_ptr;
15194917cf44SAndreas Färber         tlb_set_dirty(env, env->mem_io_vaddr);
15204917cf44SAndreas Färber     }
15211ccde1cbSbellard }
15221ccde1cbSbellard 
1523b018ddf6SPaolo Bonzini static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1524b018ddf6SPaolo Bonzini                                  unsigned size, bool is_write)
1525b018ddf6SPaolo Bonzini {
1526b018ddf6SPaolo Bonzini     return is_write;
1527b018ddf6SPaolo Bonzini }
1528b018ddf6SPaolo Bonzini 
15290e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
15300e0df1e2SAvi Kivity     .write = notdirty_mem_write,
1531b018ddf6SPaolo Bonzini     .valid.accepts = notdirty_mem_accepts,
15320e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
15331ccde1cbSbellard };
15341ccde1cbSbellard 
15350f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
1536b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
15370f459d16Spbrook {
15384917cf44SAndreas Färber     CPUArchState *env = current_cpu->env_ptr;
153906d55cc1Saliguori     target_ulong pc, cs_base;
15400f459d16Spbrook     target_ulong vaddr;
1541a1d1bb31Saliguori     CPUWatchpoint *wp;
154206d55cc1Saliguori     int cpu_flags;
15430f459d16Spbrook 
154406d55cc1Saliguori     if (env->watchpoint_hit) {
154506d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
154606d55cc1Saliguori          * the debug interrupt so that is will trigger after the
154706d55cc1Saliguori          * current instruction. */
1548c3affe56SAndreas Färber         cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
154906d55cc1Saliguori         return;
155006d55cc1Saliguori     }
15512e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
155272cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1553b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
1554b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
15556e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
15566e140f28Saliguori             if (!env->watchpoint_hit) {
1557a1d1bb31Saliguori                 env->watchpoint_hit = wp;
15585a316526SBlue Swirl                 tb_check_watchpoint(env);
155906d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
156006d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
1561488d6577SMax Filippov                     cpu_loop_exit(env);
156206d55cc1Saliguori                 } else {
156306d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
156406d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
156506d55cc1Saliguori                     cpu_resume_from_signal(env, NULL);
15660f459d16Spbrook                 }
1567488d6577SMax Filippov             }
15686e140f28Saliguori         } else {
15696e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
15706e140f28Saliguori         }
15710f459d16Spbrook     }
15720f459d16Spbrook }
15730f459d16Spbrook 
15746658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
15756658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
15766658ffb8Spbrook    phys routines.  */
1577a8170e5eSAvi Kivity static uint64_t watch_mem_read(void *opaque, hwaddr addr,
15781ec9b909SAvi Kivity                                unsigned size)
15796658ffb8Spbrook {
15801ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
15811ec9b909SAvi Kivity     switch (size) {
15821ec9b909SAvi Kivity     case 1: return ldub_phys(addr);
15831ec9b909SAvi Kivity     case 2: return lduw_phys(addr);
15841ec9b909SAvi Kivity     case 4: return ldl_phys(addr);
15851ec9b909SAvi Kivity     default: abort();
15861ec9b909SAvi Kivity     }
15876658ffb8Spbrook }
15886658ffb8Spbrook 
1589a8170e5eSAvi Kivity static void watch_mem_write(void *opaque, hwaddr addr,
15901ec9b909SAvi Kivity                             uint64_t val, unsigned size)
15916658ffb8Spbrook {
15921ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
15931ec9b909SAvi Kivity     switch (size) {
159467364150SMax Filippov     case 1:
159567364150SMax Filippov         stb_phys(addr, val);
159667364150SMax Filippov         break;
159767364150SMax Filippov     case 2:
159867364150SMax Filippov         stw_phys(addr, val);
159967364150SMax Filippov         break;
160067364150SMax Filippov     case 4:
160167364150SMax Filippov         stl_phys(addr, val);
160267364150SMax Filippov         break;
16031ec9b909SAvi Kivity     default: abort();
16041ec9b909SAvi Kivity     }
16056658ffb8Spbrook }
16066658ffb8Spbrook 
16071ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
16081ec9b909SAvi Kivity     .read = watch_mem_read,
16091ec9b909SAvi Kivity     .write = watch_mem_write,
16101ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
16116658ffb8Spbrook };
16126658ffb8Spbrook 
1613a8170e5eSAvi Kivity static uint64_t subpage_read(void *opaque, hwaddr addr,
161470c68e44SAvi Kivity                              unsigned len)
1615db7b5426Sblueswir1 {
1616acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1617acc9d80bSJan Kiszka     uint8_t buf[4];
1618791af8c8SPaolo Bonzini 
1619db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1620016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1621acc9d80bSJan Kiszka            subpage, len, addr);
1622db7b5426Sblueswir1 #endif
1623acc9d80bSJan Kiszka     address_space_read(subpage->as, addr + subpage->base, buf, len);
1624acc9d80bSJan Kiszka     switch (len) {
1625acc9d80bSJan Kiszka     case 1:
1626acc9d80bSJan Kiszka         return ldub_p(buf);
1627acc9d80bSJan Kiszka     case 2:
1628acc9d80bSJan Kiszka         return lduw_p(buf);
1629acc9d80bSJan Kiszka     case 4:
1630acc9d80bSJan Kiszka         return ldl_p(buf);
1631acc9d80bSJan Kiszka     default:
1632acc9d80bSJan Kiszka         abort();
1633acc9d80bSJan Kiszka     }
1634db7b5426Sblueswir1 }
1635db7b5426Sblueswir1 
1636a8170e5eSAvi Kivity static void subpage_write(void *opaque, hwaddr addr,
163770c68e44SAvi Kivity                           uint64_t value, unsigned len)
1638db7b5426Sblueswir1 {
1639acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1640acc9d80bSJan Kiszka     uint8_t buf[4];
1641acc9d80bSJan Kiszka 
1642db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1643016e9d62SAmos Kong     printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1644acc9d80bSJan Kiszka            " value %"PRIx64"\n",
1645acc9d80bSJan Kiszka            __func__, subpage, len, addr, value);
1646db7b5426Sblueswir1 #endif
1647acc9d80bSJan Kiszka     switch (len) {
1648acc9d80bSJan Kiszka     case 1:
1649acc9d80bSJan Kiszka         stb_p(buf, value);
1650acc9d80bSJan Kiszka         break;
1651acc9d80bSJan Kiszka     case 2:
1652acc9d80bSJan Kiszka         stw_p(buf, value);
1653acc9d80bSJan Kiszka         break;
1654acc9d80bSJan Kiszka     case 4:
1655acc9d80bSJan Kiszka         stl_p(buf, value);
1656acc9d80bSJan Kiszka         break;
1657acc9d80bSJan Kiszka     default:
1658acc9d80bSJan Kiszka         abort();
1659acc9d80bSJan Kiszka     }
1660acc9d80bSJan Kiszka     address_space_write(subpage->as, addr + subpage->base, buf, len);
1661db7b5426Sblueswir1 }
1662db7b5426Sblueswir1 
1663c353e4ccSPaolo Bonzini static bool subpage_accepts(void *opaque, hwaddr addr,
1664016e9d62SAmos Kong                             unsigned len, bool is_write)
1665c353e4ccSPaolo Bonzini {
1666acc9d80bSJan Kiszka     subpage_t *subpage = opaque;
1667c353e4ccSPaolo Bonzini #if defined(DEBUG_SUBPAGE)
1668016e9d62SAmos Kong     printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1669acc9d80bSJan Kiszka            __func__, subpage, is_write ? 'w' : 'r', len, addr);
1670c353e4ccSPaolo Bonzini #endif
1671c353e4ccSPaolo Bonzini 
1672acc9d80bSJan Kiszka     return address_space_access_valid(subpage->as, addr + subpage->base,
1673016e9d62SAmos Kong                                       len, is_write);
1674c353e4ccSPaolo Bonzini }
1675c353e4ccSPaolo Bonzini 
167670c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
167770c68e44SAvi Kivity     .read = subpage_read,
167870c68e44SAvi Kivity     .write = subpage_write,
1679c353e4ccSPaolo Bonzini     .valid.accepts = subpage_accepts,
168070c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
1681db7b5426Sblueswir1 };
1682db7b5426Sblueswir1 
1683c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
16845312bd8bSAvi Kivity                              uint16_t section)
1685db7b5426Sblueswir1 {
1686db7b5426Sblueswir1     int idx, eidx;
1687db7b5426Sblueswir1 
1688db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1689db7b5426Sblueswir1         return -1;
1690db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
1691db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
1692db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1693016e9d62SAmos Kong     printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1694016e9d62SAmos Kong            __func__, mmio, start, end, idx, eidx, section);
1695db7b5426Sblueswir1 #endif
1696db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
16975312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
1698db7b5426Sblueswir1     }
1699db7b5426Sblueswir1 
1700db7b5426Sblueswir1     return 0;
1701db7b5426Sblueswir1 }
1702db7b5426Sblueswir1 
1703acc9d80bSJan Kiszka static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1704db7b5426Sblueswir1 {
1705c227f099SAnthony Liguori     subpage_t *mmio;
1706db7b5426Sblueswir1 
17077267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
17081eec614bSaliguori 
1709acc9d80bSJan Kiszka     mmio->as = as;
1710db7b5426Sblueswir1     mmio->base = base;
17112c9b15caSPaolo Bonzini     memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
171270c68e44SAvi Kivity                           "subpage", TARGET_PAGE_SIZE);
1713b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
1714db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
1715016e9d62SAmos Kong     printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1716016e9d62SAmos Kong            mmio, base, TARGET_PAGE_SIZE);
1717db7b5426Sblueswir1 #endif
1718b41aac4fSLiu Ping Fan     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1719db7b5426Sblueswir1 
1720db7b5426Sblueswir1     return mmio;
1721db7b5426Sblueswir1 }
1722db7b5426Sblueswir1 
17235312bd8bSAvi Kivity static uint16_t dummy_section(MemoryRegion *mr)
17245312bd8bSAvi Kivity {
17255312bd8bSAvi Kivity     MemoryRegionSection section = {
17265312bd8bSAvi Kivity         .mr = mr,
17275312bd8bSAvi Kivity         .offset_within_address_space = 0,
17285312bd8bSAvi Kivity         .offset_within_region = 0,
1729052e87b0SPaolo Bonzini         .size = int128_2_64(),
17305312bd8bSAvi Kivity     };
17315312bd8bSAvi Kivity 
17325312bd8bSAvi Kivity     return phys_section_add(&section);
17335312bd8bSAvi Kivity }
17345312bd8bSAvi Kivity 
1735a8170e5eSAvi Kivity MemoryRegion *iotlb_to_region(hwaddr index)
1736aa102231SAvi Kivity {
17370475d94fSPaolo Bonzini     return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
1738aa102231SAvi Kivity }
1739aa102231SAvi Kivity 
1740e9179ce1SAvi Kivity static void io_mem_init(void)
1741e9179ce1SAvi Kivity {
17422c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
17432c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
17440e0df1e2SAvi Kivity                           "unassigned", UINT64_MAX);
17452c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
17460e0df1e2SAvi Kivity                           "notdirty", UINT64_MAX);
17472c9b15caSPaolo Bonzini     memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
17481ec9b909SAvi Kivity                           "watch", UINT64_MAX);
1749e9179ce1SAvi Kivity }
1750e9179ce1SAvi Kivity 
1751ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
1752ac1970fbSAvi Kivity {
175389ae337aSPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
175400752703SPaolo Bonzini     AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
175500752703SPaolo Bonzini 
17569736e55bSMichael S. Tsirkin     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
175700752703SPaolo Bonzini     d->as = as;
175800752703SPaolo Bonzini     as->next_dispatch = d;
175900752703SPaolo Bonzini }
176000752703SPaolo Bonzini 
176100752703SPaolo Bonzini static void mem_commit(MemoryListener *listener)
176200752703SPaolo Bonzini {
176300752703SPaolo Bonzini     AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
17640475d94fSPaolo Bonzini     AddressSpaceDispatch *cur = as->dispatch;
17650475d94fSPaolo Bonzini     AddressSpaceDispatch *next = as->next_dispatch;
1766ac1970fbSAvi Kivity 
17670475d94fSPaolo Bonzini     next->nodes = next_map.nodes;
17680475d94fSPaolo Bonzini     next->sections = next_map.sections;
17690475d94fSPaolo Bonzini 
1770b35ba30fSMichael S. Tsirkin     phys_page_compact_all(next, next_map.nodes_nb);
1771b35ba30fSMichael S. Tsirkin 
17720475d94fSPaolo Bonzini     as->dispatch = next;
17730475d94fSPaolo Bonzini     g_free(cur);
1774ac1970fbSAvi Kivity }
1775ac1970fbSAvi Kivity 
177650c1e149SAvi Kivity static void core_begin(MemoryListener *listener)
177750c1e149SAvi Kivity {
1778b41aac4fSLiu Ping Fan     uint16_t n;
1779b41aac4fSLiu Ping Fan 
17806092666eSPaolo Bonzini     prev_map = g_new(PhysPageMap, 1);
17816092666eSPaolo Bonzini     *prev_map = next_map;
17826092666eSPaolo Bonzini 
17839affd6fcSPaolo Bonzini     memset(&next_map, 0, sizeof(next_map));
1784b41aac4fSLiu Ping Fan     n = dummy_section(&io_mem_unassigned);
1785b41aac4fSLiu Ping Fan     assert(n == PHYS_SECTION_UNASSIGNED);
1786b41aac4fSLiu Ping Fan     n = dummy_section(&io_mem_notdirty);
1787b41aac4fSLiu Ping Fan     assert(n == PHYS_SECTION_NOTDIRTY);
1788b41aac4fSLiu Ping Fan     n = dummy_section(&io_mem_rom);
1789b41aac4fSLiu Ping Fan     assert(n == PHYS_SECTION_ROM);
1790b41aac4fSLiu Ping Fan     n = dummy_section(&io_mem_watch);
1791b41aac4fSLiu Ping Fan     assert(n == PHYS_SECTION_WATCH);
179250c1e149SAvi Kivity }
179350c1e149SAvi Kivity 
17949affd6fcSPaolo Bonzini /* This listener's commit run after the other AddressSpaceDispatch listeners'.
17959affd6fcSPaolo Bonzini  * All AddressSpaceDispatch instances have switched to the next map.
17969affd6fcSPaolo Bonzini  */
17979affd6fcSPaolo Bonzini static void core_commit(MemoryListener *listener)
17989affd6fcSPaolo Bonzini {
17996092666eSPaolo Bonzini     phys_sections_free(prev_map);
18009affd6fcSPaolo Bonzini }
18019affd6fcSPaolo Bonzini 
18021d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
180350c1e149SAvi Kivity {
1804182735efSAndreas Färber     CPUState *cpu;
1805117712c3SAvi Kivity 
1806117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
1807117712c3SAvi Kivity        reset the modified entries */
1808117712c3SAvi Kivity     /* XXX: slow ! */
1809bdc44640SAndreas Färber     CPU_FOREACH(cpu) {
1810182735efSAndreas Färber         CPUArchState *env = cpu->env_ptr;
1811182735efSAndreas Färber 
1812117712c3SAvi Kivity         tlb_flush(env, 1);
1813117712c3SAvi Kivity     }
181450c1e149SAvi Kivity }
181550c1e149SAvi Kivity 
181693632747SAvi Kivity static void core_log_global_start(MemoryListener *listener)
181793632747SAvi Kivity {
181893632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(1);
181993632747SAvi Kivity }
182093632747SAvi Kivity 
182193632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener)
182293632747SAvi Kivity {
182393632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(0);
182493632747SAvi Kivity }
182593632747SAvi Kivity 
182693632747SAvi Kivity static MemoryListener core_memory_listener = {
182750c1e149SAvi Kivity     .begin = core_begin,
18289affd6fcSPaolo Bonzini     .commit = core_commit,
182993632747SAvi Kivity     .log_global_start = core_log_global_start,
183093632747SAvi Kivity     .log_global_stop = core_log_global_stop,
1831ac1970fbSAvi Kivity     .priority = 1,
183293632747SAvi Kivity };
183393632747SAvi Kivity 
18341d71148eSAvi Kivity static MemoryListener tcg_memory_listener = {
18351d71148eSAvi Kivity     .commit = tcg_commit,
18361d71148eSAvi Kivity };
18371d71148eSAvi Kivity 
1838ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
1839ac1970fbSAvi Kivity {
184000752703SPaolo Bonzini     as->dispatch = NULL;
184189ae337aSPaolo Bonzini     as->dispatch_listener = (MemoryListener) {
1842ac1970fbSAvi Kivity         .begin = mem_begin,
184300752703SPaolo Bonzini         .commit = mem_commit,
1844ac1970fbSAvi Kivity         .region_add = mem_add,
1845ac1970fbSAvi Kivity         .region_nop = mem_add,
1846ac1970fbSAvi Kivity         .priority = 0,
1847ac1970fbSAvi Kivity     };
184889ae337aSPaolo Bonzini     memory_listener_register(&as->dispatch_listener, as);
1849ac1970fbSAvi Kivity }
1850ac1970fbSAvi Kivity 
185183f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
185283f3c251SAvi Kivity {
185383f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
185483f3c251SAvi Kivity 
185589ae337aSPaolo Bonzini     memory_listener_unregister(&as->dispatch_listener);
185683f3c251SAvi Kivity     g_free(d);
185783f3c251SAvi Kivity     as->dispatch = NULL;
185883f3c251SAvi Kivity }
185983f3c251SAvi Kivity 
186062152b8aSAvi Kivity static void memory_map_init(void)
186162152b8aSAvi Kivity {
18627267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
186303f49957SPaolo Bonzini 
186457271d63SPaolo Bonzini     memory_region_init(system_memory, NULL, "system", UINT64_MAX);
18657dca8043SAlexey Kardashevskiy     address_space_init(&address_space_memory, system_memory, "memory");
1866309cb471SAvi Kivity 
18677267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
18683bb28b72SJan Kiszka     memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
18693bb28b72SJan Kiszka                           65536);
18707dca8043SAlexey Kardashevskiy     address_space_init(&address_space_io, system_io, "I/O");
187193632747SAvi Kivity 
1872f6790af6SAvi Kivity     memory_listener_register(&core_memory_listener, &address_space_memory);
18732641689aSliguang     if (tcg_enabled()) {
1874f6790af6SAvi Kivity         memory_listener_register(&tcg_memory_listener, &address_space_memory);
187562152b8aSAvi Kivity     }
18762641689aSliguang }
187762152b8aSAvi Kivity 
187862152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
187962152b8aSAvi Kivity {
188062152b8aSAvi Kivity     return system_memory;
188162152b8aSAvi Kivity }
188262152b8aSAvi Kivity 
1883309cb471SAvi Kivity MemoryRegion *get_system_io(void)
1884309cb471SAvi Kivity {
1885309cb471SAvi Kivity     return system_io;
1886309cb471SAvi Kivity }
1887309cb471SAvi Kivity 
1888e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
1889e2eef170Spbrook 
189013eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
189113eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
1892f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1893a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
189413eb76e0Sbellard {
189513eb76e0Sbellard     int l, flags;
189613eb76e0Sbellard     target_ulong page;
189753a5960aSpbrook     void * p;
189813eb76e0Sbellard 
189913eb76e0Sbellard     while (len > 0) {
190013eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
190113eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
190213eb76e0Sbellard         if (l > len)
190313eb76e0Sbellard             l = len;
190413eb76e0Sbellard         flags = page_get_flags(page);
190513eb76e0Sbellard         if (!(flags & PAGE_VALID))
1906a68fe89cSPaul Brook             return -1;
190713eb76e0Sbellard         if (is_write) {
190813eb76e0Sbellard             if (!(flags & PAGE_WRITE))
1909a68fe89cSPaul Brook                 return -1;
1910579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
191172fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1912a68fe89cSPaul Brook                 return -1;
191372fb7daaSaurel32             memcpy(p, buf, l);
191472fb7daaSaurel32             unlock_user(p, addr, l);
191513eb76e0Sbellard         } else {
191613eb76e0Sbellard             if (!(flags & PAGE_READ))
1917a68fe89cSPaul Brook                 return -1;
1918579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
191972fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1920a68fe89cSPaul Brook                 return -1;
192172fb7daaSaurel32             memcpy(buf, p, l);
19225b257578Saurel32             unlock_user(p, addr, 0);
192313eb76e0Sbellard         }
192413eb76e0Sbellard         len -= l;
192513eb76e0Sbellard         buf += l;
192613eb76e0Sbellard         addr += l;
192713eb76e0Sbellard     }
1928a68fe89cSPaul Brook     return 0;
192913eb76e0Sbellard }
19308df1cd07Sbellard 
193113eb76e0Sbellard #else
193251d7a9ebSAnthony PERARD 
1933a8170e5eSAvi Kivity static void invalidate_and_set_dirty(hwaddr addr,
1934a8170e5eSAvi Kivity                                      hwaddr length)
193551d7a9ebSAnthony PERARD {
193651d7a9ebSAnthony PERARD     if (!cpu_physical_memory_is_dirty(addr)) {
193751d7a9ebSAnthony PERARD         /* invalidate code */
193851d7a9ebSAnthony PERARD         tb_invalidate_phys_page_range(addr, addr + length, 0);
193951d7a9ebSAnthony PERARD         /* set dirty bit */
194051d7a9ebSAnthony PERARD         cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
194151d7a9ebSAnthony PERARD     }
1942e226939dSAnthony PERARD     xen_modified_memory(addr, length);
194351d7a9ebSAnthony PERARD }
194451d7a9ebSAnthony PERARD 
19452bbfa05dSPaolo Bonzini static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
19462bbfa05dSPaolo Bonzini {
19472bbfa05dSPaolo Bonzini     if (memory_region_is_ram(mr)) {
19482bbfa05dSPaolo Bonzini         return !(is_write && mr->readonly);
19492bbfa05dSPaolo Bonzini     }
19502bbfa05dSPaolo Bonzini     if (memory_region_is_romd(mr)) {
19512bbfa05dSPaolo Bonzini         return !is_write;
19522bbfa05dSPaolo Bonzini     }
19532bbfa05dSPaolo Bonzini 
19542bbfa05dSPaolo Bonzini     return false;
19552bbfa05dSPaolo Bonzini }
19562bbfa05dSPaolo Bonzini 
195723326164SRichard Henderson static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
195882f2563fSPaolo Bonzini {
1959e1622f4bSPaolo Bonzini     unsigned access_size_max = mr->ops->valid.max_access_size;
196023326164SRichard Henderson 
196123326164SRichard Henderson     /* Regions are assumed to support 1-4 byte accesses unless
196223326164SRichard Henderson        otherwise specified.  */
196323326164SRichard Henderson     if (access_size_max == 0) {
196423326164SRichard Henderson         access_size_max = 4;
196582f2563fSPaolo Bonzini     }
196623326164SRichard Henderson 
196723326164SRichard Henderson     /* Bound the maximum access by the alignment of the address.  */
196823326164SRichard Henderson     if (!mr->ops->impl.unaligned) {
196923326164SRichard Henderson         unsigned align_size_max = addr & -addr;
197023326164SRichard Henderson         if (align_size_max != 0 && align_size_max < access_size_max) {
197123326164SRichard Henderson             access_size_max = align_size_max;
197223326164SRichard Henderson         }
197323326164SRichard Henderson     }
197423326164SRichard Henderson 
197523326164SRichard Henderson     /* Don't attempt accesses larger than the maximum.  */
197623326164SRichard Henderson     if (l > access_size_max) {
197723326164SRichard Henderson         l = access_size_max;
197823326164SRichard Henderson     }
1979098178f2SPaolo Bonzini     if (l & (l - 1)) {
1980098178f2SPaolo Bonzini         l = 1 << (qemu_fls(l) - 1);
1981098178f2SPaolo Bonzini     }
198223326164SRichard Henderson 
198323326164SRichard Henderson     return l;
198482f2563fSPaolo Bonzini }
198582f2563fSPaolo Bonzini 
1986fd8aaa76SPaolo Bonzini bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1987ac1970fbSAvi Kivity                       int len, bool is_write)
198813eb76e0Sbellard {
1989149f54b5SPaolo Bonzini     hwaddr l;
199013eb76e0Sbellard     uint8_t *ptr;
1991791af8c8SPaolo Bonzini     uint64_t val;
1992149f54b5SPaolo Bonzini     hwaddr addr1;
19935c8a00ceSPaolo Bonzini     MemoryRegion *mr;
1994fd8aaa76SPaolo Bonzini     bool error = false;
199513eb76e0Sbellard 
199613eb76e0Sbellard     while (len > 0) {
199713eb76e0Sbellard         l = len;
19985c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &addr1, &l, is_write);
199913eb76e0Sbellard 
200013eb76e0Sbellard         if (is_write) {
20015c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
20025c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
20034917cf44SAndreas Färber                 /* XXX: could force current_cpu to NULL to avoid
20046a00d601Sbellard                    potential bugs */
200523326164SRichard Henderson                 switch (l) {
200623326164SRichard Henderson                 case 8:
200723326164SRichard Henderson                     /* 64 bit write access */
200823326164SRichard Henderson                     val = ldq_p(buf);
200923326164SRichard Henderson                     error |= io_mem_write(mr, addr1, val, 8);
201023326164SRichard Henderson                     break;
201123326164SRichard Henderson                 case 4:
20121c213d19Sbellard                     /* 32 bit write access */
2013c27004ecSbellard                     val = ldl_p(buf);
20145c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 4);
201523326164SRichard Henderson                     break;
201623326164SRichard Henderson                 case 2:
20171c213d19Sbellard                     /* 16 bit write access */
2018c27004ecSbellard                     val = lduw_p(buf);
20195c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 2);
202023326164SRichard Henderson                     break;
202123326164SRichard Henderson                 case 1:
20221c213d19Sbellard                     /* 8 bit write access */
2023c27004ecSbellard                     val = ldub_p(buf);
20245c8a00ceSPaolo Bonzini                     error |= io_mem_write(mr, addr1, val, 1);
202523326164SRichard Henderson                     break;
202623326164SRichard Henderson                 default:
202723326164SRichard Henderson                     abort();
202813eb76e0Sbellard                 }
20292bbfa05dSPaolo Bonzini             } else {
20305c8a00ceSPaolo Bonzini                 addr1 += memory_region_get_ram_addr(mr);
203113eb76e0Sbellard                 /* RAM case */
20325579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
203313eb76e0Sbellard                 memcpy(ptr, buf, l);
203451d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
20353a7d929eSbellard             }
203613eb76e0Sbellard         } else {
20375c8a00ceSPaolo Bonzini             if (!memory_access_is_direct(mr, is_write)) {
203813eb76e0Sbellard                 /* I/O case */
20395c8a00ceSPaolo Bonzini                 l = memory_access_size(mr, l, addr1);
204023326164SRichard Henderson                 switch (l) {
204123326164SRichard Henderson                 case 8:
204223326164SRichard Henderson                     /* 64 bit read access */
204323326164SRichard Henderson                     error |= io_mem_read(mr, addr1, &val, 8);
204423326164SRichard Henderson                     stq_p(buf, val);
204523326164SRichard Henderson                     break;
204623326164SRichard Henderson                 case 4:
204713eb76e0Sbellard                     /* 32 bit read access */
20485c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 4);
2049c27004ecSbellard                     stl_p(buf, val);
205023326164SRichard Henderson                     break;
205123326164SRichard Henderson                 case 2:
205213eb76e0Sbellard                     /* 16 bit read access */
20535c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 2);
2054c27004ecSbellard                     stw_p(buf, val);
205523326164SRichard Henderson                     break;
205623326164SRichard Henderson                 case 1:
20571c213d19Sbellard                     /* 8 bit read access */
20585c8a00ceSPaolo Bonzini                     error |= io_mem_read(mr, addr1, &val, 1);
2059c27004ecSbellard                     stb_p(buf, val);
206023326164SRichard Henderson                     break;
206123326164SRichard Henderson                 default:
206223326164SRichard Henderson                     abort();
206313eb76e0Sbellard                 }
206413eb76e0Sbellard             } else {
206513eb76e0Sbellard                 /* RAM case */
20665c8a00ceSPaolo Bonzini                 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2067f3705d53SAvi Kivity                 memcpy(buf, ptr, l);
206813eb76e0Sbellard             }
206913eb76e0Sbellard         }
207013eb76e0Sbellard         len -= l;
207113eb76e0Sbellard         buf += l;
207213eb76e0Sbellard         addr += l;
207313eb76e0Sbellard     }
2074fd8aaa76SPaolo Bonzini 
2075fd8aaa76SPaolo Bonzini     return error;
207613eb76e0Sbellard }
20778df1cd07Sbellard 
2078fd8aaa76SPaolo Bonzini bool address_space_write(AddressSpace *as, hwaddr addr,
2079ac1970fbSAvi Kivity                          const uint8_t *buf, int len)
2080ac1970fbSAvi Kivity {
2081fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2082ac1970fbSAvi Kivity }
2083ac1970fbSAvi Kivity 
2084fd8aaa76SPaolo Bonzini bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2085ac1970fbSAvi Kivity {
2086fd8aaa76SPaolo Bonzini     return address_space_rw(as, addr, buf, len, false);
2087ac1970fbSAvi Kivity }
2088ac1970fbSAvi Kivity 
2089ac1970fbSAvi Kivity 
2090a8170e5eSAvi Kivity void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2091ac1970fbSAvi Kivity                             int len, int is_write)
2092ac1970fbSAvi Kivity {
2093fd8aaa76SPaolo Bonzini     address_space_rw(&address_space_memory, addr, buf, len, is_write);
2094ac1970fbSAvi Kivity }
2095ac1970fbSAvi Kivity 
2096d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
2097a8170e5eSAvi Kivity void cpu_physical_memory_write_rom(hwaddr addr,
2098d0ecd2aaSbellard                                    const uint8_t *buf, int len)
2099d0ecd2aaSbellard {
2100149f54b5SPaolo Bonzini     hwaddr l;
2101d0ecd2aaSbellard     uint8_t *ptr;
2102149f54b5SPaolo Bonzini     hwaddr addr1;
21035c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2104d0ecd2aaSbellard 
2105d0ecd2aaSbellard     while (len > 0) {
2106d0ecd2aaSbellard         l = len;
21075c8a00ceSPaolo Bonzini         mr = address_space_translate(&address_space_memory,
2108149f54b5SPaolo Bonzini                                      addr, &addr1, &l, true);
2109d0ecd2aaSbellard 
21105c8a00ceSPaolo Bonzini         if (!(memory_region_is_ram(mr) ||
21115c8a00ceSPaolo Bonzini               memory_region_is_romd(mr))) {
2112d0ecd2aaSbellard             /* do nothing */
2113d0ecd2aaSbellard         } else {
21145c8a00ceSPaolo Bonzini             addr1 += memory_region_get_ram_addr(mr);
2115d0ecd2aaSbellard             /* ROM/RAM case */
21165579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
2117d0ecd2aaSbellard             memcpy(ptr, buf, l);
211851d7a9ebSAnthony PERARD             invalidate_and_set_dirty(addr1, l);
2119d0ecd2aaSbellard         }
2120d0ecd2aaSbellard         len -= l;
2121d0ecd2aaSbellard         buf += l;
2122d0ecd2aaSbellard         addr += l;
2123d0ecd2aaSbellard     }
2124d0ecd2aaSbellard }
2125d0ecd2aaSbellard 
21266d16c2f8Saliguori typedef struct {
2127d3e71559SPaolo Bonzini     MemoryRegion *mr;
21286d16c2f8Saliguori     void *buffer;
2129a8170e5eSAvi Kivity     hwaddr addr;
2130a8170e5eSAvi Kivity     hwaddr len;
21316d16c2f8Saliguori } BounceBuffer;
21326d16c2f8Saliguori 
21336d16c2f8Saliguori static BounceBuffer bounce;
21346d16c2f8Saliguori 
2135ba223c29Saliguori typedef struct MapClient {
2136ba223c29Saliguori     void *opaque;
2137ba223c29Saliguori     void (*callback)(void *opaque);
213872cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
2139ba223c29Saliguori } MapClient;
2140ba223c29Saliguori 
214172cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
214272cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
2143ba223c29Saliguori 
2144ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2145ba223c29Saliguori {
21467267c094SAnthony Liguori     MapClient *client = g_malloc(sizeof(*client));
2147ba223c29Saliguori 
2148ba223c29Saliguori     client->opaque = opaque;
2149ba223c29Saliguori     client->callback = callback;
215072cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
2151ba223c29Saliguori     return client;
2152ba223c29Saliguori }
2153ba223c29Saliguori 
21548b9c99d9SBlue Swirl static void cpu_unregister_map_client(void *_client)
2155ba223c29Saliguori {
2156ba223c29Saliguori     MapClient *client = (MapClient *)_client;
2157ba223c29Saliguori 
215872cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
21597267c094SAnthony Liguori     g_free(client);
2160ba223c29Saliguori }
2161ba223c29Saliguori 
2162ba223c29Saliguori static void cpu_notify_map_clients(void)
2163ba223c29Saliguori {
2164ba223c29Saliguori     MapClient *client;
2165ba223c29Saliguori 
216672cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
216772cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
2168ba223c29Saliguori         client->callback(client->opaque);
216934d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
2170ba223c29Saliguori     }
2171ba223c29Saliguori }
2172ba223c29Saliguori 
217351644ab7SPaolo Bonzini bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
217451644ab7SPaolo Bonzini {
21755c8a00ceSPaolo Bonzini     MemoryRegion *mr;
217651644ab7SPaolo Bonzini     hwaddr l, xlat;
217751644ab7SPaolo Bonzini 
217851644ab7SPaolo Bonzini     while (len > 0) {
217951644ab7SPaolo Bonzini         l = len;
21805c8a00ceSPaolo Bonzini         mr = address_space_translate(as, addr, &xlat, &l, is_write);
21815c8a00ceSPaolo Bonzini         if (!memory_access_is_direct(mr, is_write)) {
21825c8a00ceSPaolo Bonzini             l = memory_access_size(mr, l, addr);
21835c8a00ceSPaolo Bonzini             if (!memory_region_access_valid(mr, xlat, l, is_write)) {
218451644ab7SPaolo Bonzini                 return false;
218551644ab7SPaolo Bonzini             }
218651644ab7SPaolo Bonzini         }
218751644ab7SPaolo Bonzini 
218851644ab7SPaolo Bonzini         len -= l;
218951644ab7SPaolo Bonzini         addr += l;
219051644ab7SPaolo Bonzini     }
219151644ab7SPaolo Bonzini     return true;
219251644ab7SPaolo Bonzini }
219351644ab7SPaolo Bonzini 
21946d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
21956d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
21966d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
21976d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
2198ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
2199ba223c29Saliguori  * likely to succeed.
22006d16c2f8Saliguori  */
2201ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
2202a8170e5eSAvi Kivity                         hwaddr addr,
2203a8170e5eSAvi Kivity                         hwaddr *plen,
2204ac1970fbSAvi Kivity                         bool is_write)
22056d16c2f8Saliguori {
2206a8170e5eSAvi Kivity     hwaddr len = *plen;
2207e3127ae0SPaolo Bonzini     hwaddr done = 0;
2208e3127ae0SPaolo Bonzini     hwaddr l, xlat, base;
2209e3127ae0SPaolo Bonzini     MemoryRegion *mr, *this_mr;
2210e3127ae0SPaolo Bonzini     ram_addr_t raddr;
22116d16c2f8Saliguori 
2212e3127ae0SPaolo Bonzini     if (len == 0) {
2213e3127ae0SPaolo Bonzini         return NULL;
2214e3127ae0SPaolo Bonzini     }
2215e3127ae0SPaolo Bonzini 
22166d16c2f8Saliguori     l = len;
22175c8a00ceSPaolo Bonzini     mr = address_space_translate(as, addr, &xlat, &l, is_write);
22185c8a00ceSPaolo Bonzini     if (!memory_access_is_direct(mr, is_write)) {
2219e3127ae0SPaolo Bonzini         if (bounce.buffer) {
2220e3127ae0SPaolo Bonzini             return NULL;
22216d16c2f8Saliguori         }
2222e85d9db5SKevin Wolf         /* Avoid unbounded allocations */
2223e85d9db5SKevin Wolf         l = MIN(l, TARGET_PAGE_SIZE);
2224e85d9db5SKevin Wolf         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
22256d16c2f8Saliguori         bounce.addr = addr;
22266d16c2f8Saliguori         bounce.len = l;
2227d3e71559SPaolo Bonzini 
2228d3e71559SPaolo Bonzini         memory_region_ref(mr);
2229d3e71559SPaolo Bonzini         bounce.mr = mr;
22306d16c2f8Saliguori         if (!is_write) {
2231ac1970fbSAvi Kivity             address_space_read(as, addr, bounce.buffer, l);
22326d16c2f8Saliguori         }
223338bee5dcSStefano Stabellini 
223438bee5dcSStefano Stabellini         *plen = l;
223538bee5dcSStefano Stabellini         return bounce.buffer;
22366d16c2f8Saliguori     }
2237e3127ae0SPaolo Bonzini 
2238e3127ae0SPaolo Bonzini     base = xlat;
2239e3127ae0SPaolo Bonzini     raddr = memory_region_get_ram_addr(mr);
2240e3127ae0SPaolo Bonzini 
2241e3127ae0SPaolo Bonzini     for (;;) {
2242e3127ae0SPaolo Bonzini         len -= l;
2243e3127ae0SPaolo Bonzini         addr += l;
2244e3127ae0SPaolo Bonzini         done += l;
2245e3127ae0SPaolo Bonzini         if (len == 0) {
2246e3127ae0SPaolo Bonzini             break;
2247e3127ae0SPaolo Bonzini         }
2248e3127ae0SPaolo Bonzini 
2249e3127ae0SPaolo Bonzini         l = len;
2250e3127ae0SPaolo Bonzini         this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2251e3127ae0SPaolo Bonzini         if (this_mr != mr || xlat != base + done) {
2252149f54b5SPaolo Bonzini             break;
2253149f54b5SPaolo Bonzini         }
22548ab934f9SStefano Stabellini     }
22556d16c2f8Saliguori 
2256d3e71559SPaolo Bonzini     memory_region_ref(mr);
2257e3127ae0SPaolo Bonzini     *plen = done;
2258e3127ae0SPaolo Bonzini     return qemu_ram_ptr_length(raddr + base, plen);
22596d16c2f8Saliguori }
22606d16c2f8Saliguori 
2261ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
22626d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
22636d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
22646d16c2f8Saliguori  */
2265a8170e5eSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2266a8170e5eSAvi Kivity                          int is_write, hwaddr access_len)
22676d16c2f8Saliguori {
22686d16c2f8Saliguori     if (buffer != bounce.buffer) {
2269d3e71559SPaolo Bonzini         MemoryRegion *mr;
22707443b437SPaolo Bonzini         ram_addr_t addr1;
2271d3e71559SPaolo Bonzini 
2272d3e71559SPaolo Bonzini         mr = qemu_ram_addr_from_host(buffer, &addr1);
22731b5ec234SPaolo Bonzini         assert(mr != NULL);
2274d3e71559SPaolo Bonzini         if (is_write) {
22756d16c2f8Saliguori             while (access_len) {
22766d16c2f8Saliguori                 unsigned l;
22776d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
22786d16c2f8Saliguori                 if (l > access_len)
22796d16c2f8Saliguori                     l = access_len;
228051d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
22816d16c2f8Saliguori                 addr1 += l;
22826d16c2f8Saliguori                 access_len -= l;
22836d16c2f8Saliguori             }
22846d16c2f8Saliguori         }
2285868bb33fSJan Kiszka         if (xen_enabled()) {
2286e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
2287050a0ddfSAnthony PERARD         }
2288d3e71559SPaolo Bonzini         memory_region_unref(mr);
22896d16c2f8Saliguori         return;
22906d16c2f8Saliguori     }
22916d16c2f8Saliguori     if (is_write) {
2292ac1970fbSAvi Kivity         address_space_write(as, bounce.addr, bounce.buffer, access_len);
22936d16c2f8Saliguori     }
2294f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
22956d16c2f8Saliguori     bounce.buffer = NULL;
2296d3e71559SPaolo Bonzini     memory_region_unref(bounce.mr);
2297ba223c29Saliguori     cpu_notify_map_clients();
22986d16c2f8Saliguori }
2299d0ecd2aaSbellard 
2300a8170e5eSAvi Kivity void *cpu_physical_memory_map(hwaddr addr,
2301a8170e5eSAvi Kivity                               hwaddr *plen,
2302ac1970fbSAvi Kivity                               int is_write)
2303ac1970fbSAvi Kivity {
2304ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
2305ac1970fbSAvi Kivity }
2306ac1970fbSAvi Kivity 
2307a8170e5eSAvi Kivity void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2308a8170e5eSAvi Kivity                                int is_write, hwaddr access_len)
2309ac1970fbSAvi Kivity {
2310ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2311ac1970fbSAvi Kivity }
2312ac1970fbSAvi Kivity 
23138df1cd07Sbellard /* warning: addr must be aligned */
2314a8170e5eSAvi Kivity static inline uint32_t ldl_phys_internal(hwaddr addr,
23151e78bcc1SAlexander Graf                                          enum device_endian endian)
23168df1cd07Sbellard {
23178df1cd07Sbellard     uint8_t *ptr;
2318791af8c8SPaolo Bonzini     uint64_t val;
23195c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2320149f54b5SPaolo Bonzini     hwaddr l = 4;
2321149f54b5SPaolo Bonzini     hwaddr addr1;
23228df1cd07Sbellard 
23235c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2324149f54b5SPaolo Bonzini                                  false);
23255c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, false)) {
23268df1cd07Sbellard         /* I/O case */
23275c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 4);
23281e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
23291e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
23301e78bcc1SAlexander Graf             val = bswap32(val);
23311e78bcc1SAlexander Graf         }
23321e78bcc1SAlexander Graf #else
23331e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
23341e78bcc1SAlexander Graf             val = bswap32(val);
23351e78bcc1SAlexander Graf         }
23361e78bcc1SAlexander Graf #endif
23378df1cd07Sbellard     } else {
23388df1cd07Sbellard         /* RAM case */
23395c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
234006ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2341149f54b5SPaolo Bonzini                                + addr1);
23421e78bcc1SAlexander Graf         switch (endian) {
23431e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
23441e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
23451e78bcc1SAlexander Graf             break;
23461e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
23471e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
23481e78bcc1SAlexander Graf             break;
23491e78bcc1SAlexander Graf         default:
23508df1cd07Sbellard             val = ldl_p(ptr);
23511e78bcc1SAlexander Graf             break;
23521e78bcc1SAlexander Graf         }
23538df1cd07Sbellard     }
23548df1cd07Sbellard     return val;
23558df1cd07Sbellard }
23568df1cd07Sbellard 
2357a8170e5eSAvi Kivity uint32_t ldl_phys(hwaddr addr)
23581e78bcc1SAlexander Graf {
23591e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
23601e78bcc1SAlexander Graf }
23611e78bcc1SAlexander Graf 
2362a8170e5eSAvi Kivity uint32_t ldl_le_phys(hwaddr addr)
23631e78bcc1SAlexander Graf {
23641e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
23651e78bcc1SAlexander Graf }
23661e78bcc1SAlexander Graf 
2367a8170e5eSAvi Kivity uint32_t ldl_be_phys(hwaddr addr)
23681e78bcc1SAlexander Graf {
23691e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
23701e78bcc1SAlexander Graf }
23711e78bcc1SAlexander Graf 
237284b7b8e7Sbellard /* warning: addr must be aligned */
2373a8170e5eSAvi Kivity static inline uint64_t ldq_phys_internal(hwaddr addr,
23741e78bcc1SAlexander Graf                                          enum device_endian endian)
237584b7b8e7Sbellard {
237684b7b8e7Sbellard     uint8_t *ptr;
237784b7b8e7Sbellard     uint64_t val;
23785c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2379149f54b5SPaolo Bonzini     hwaddr l = 8;
2380149f54b5SPaolo Bonzini     hwaddr addr1;
238184b7b8e7Sbellard 
23825c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2383149f54b5SPaolo Bonzini                                  false);
23845c8a00ceSPaolo Bonzini     if (l < 8 || !memory_access_is_direct(mr, false)) {
238584b7b8e7Sbellard         /* I/O case */
23865c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 8);
2387968a5627SPaolo Bonzini #if defined(TARGET_WORDS_BIGENDIAN)
2388968a5627SPaolo Bonzini         if (endian == DEVICE_LITTLE_ENDIAN) {
2389968a5627SPaolo Bonzini             val = bswap64(val);
2390968a5627SPaolo Bonzini         }
2391968a5627SPaolo Bonzini #else
2392968a5627SPaolo Bonzini         if (endian == DEVICE_BIG_ENDIAN) {
2393968a5627SPaolo Bonzini             val = bswap64(val);
2394968a5627SPaolo Bonzini         }
2395968a5627SPaolo Bonzini #endif
239684b7b8e7Sbellard     } else {
239784b7b8e7Sbellard         /* RAM case */
23985c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
239906ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2400149f54b5SPaolo Bonzini                                + addr1);
24011e78bcc1SAlexander Graf         switch (endian) {
24021e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
24031e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
24041e78bcc1SAlexander Graf             break;
24051e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
24061e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
24071e78bcc1SAlexander Graf             break;
24081e78bcc1SAlexander Graf         default:
240984b7b8e7Sbellard             val = ldq_p(ptr);
24101e78bcc1SAlexander Graf             break;
24111e78bcc1SAlexander Graf         }
241284b7b8e7Sbellard     }
241384b7b8e7Sbellard     return val;
241484b7b8e7Sbellard }
241584b7b8e7Sbellard 
2416a8170e5eSAvi Kivity uint64_t ldq_phys(hwaddr addr)
24171e78bcc1SAlexander Graf {
24181e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
24191e78bcc1SAlexander Graf }
24201e78bcc1SAlexander Graf 
2421a8170e5eSAvi Kivity uint64_t ldq_le_phys(hwaddr addr)
24221e78bcc1SAlexander Graf {
24231e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
24241e78bcc1SAlexander Graf }
24251e78bcc1SAlexander Graf 
2426a8170e5eSAvi Kivity uint64_t ldq_be_phys(hwaddr addr)
24271e78bcc1SAlexander Graf {
24281e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
24291e78bcc1SAlexander Graf }
24301e78bcc1SAlexander Graf 
2431aab33094Sbellard /* XXX: optimize */
2432a8170e5eSAvi Kivity uint32_t ldub_phys(hwaddr addr)
2433aab33094Sbellard {
2434aab33094Sbellard     uint8_t val;
2435aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
2436aab33094Sbellard     return val;
2437aab33094Sbellard }
2438aab33094Sbellard 
2439733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
2440a8170e5eSAvi Kivity static inline uint32_t lduw_phys_internal(hwaddr addr,
24411e78bcc1SAlexander Graf                                           enum device_endian endian)
2442aab33094Sbellard {
2443733f0b02SMichael S. Tsirkin     uint8_t *ptr;
2444733f0b02SMichael S. Tsirkin     uint64_t val;
24455c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2446149f54b5SPaolo Bonzini     hwaddr l = 2;
2447149f54b5SPaolo Bonzini     hwaddr addr1;
2448733f0b02SMichael S. Tsirkin 
24495c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2450149f54b5SPaolo Bonzini                                  false);
24515c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, false)) {
2452733f0b02SMichael S. Tsirkin         /* I/O case */
24535c8a00ceSPaolo Bonzini         io_mem_read(mr, addr1, &val, 2);
24541e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
24551e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
24561e78bcc1SAlexander Graf             val = bswap16(val);
24571e78bcc1SAlexander Graf         }
24581e78bcc1SAlexander Graf #else
24591e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
24601e78bcc1SAlexander Graf             val = bswap16(val);
24611e78bcc1SAlexander Graf         }
24621e78bcc1SAlexander Graf #endif
2463733f0b02SMichael S. Tsirkin     } else {
2464733f0b02SMichael S. Tsirkin         /* RAM case */
24655c8a00ceSPaolo Bonzini         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
246606ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
2467149f54b5SPaolo Bonzini                                + addr1);
24681e78bcc1SAlexander Graf         switch (endian) {
24691e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
24701e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
24711e78bcc1SAlexander Graf             break;
24721e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
24731e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
24741e78bcc1SAlexander Graf             break;
24751e78bcc1SAlexander Graf         default:
2476733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
24771e78bcc1SAlexander Graf             break;
24781e78bcc1SAlexander Graf         }
2479733f0b02SMichael S. Tsirkin     }
2480733f0b02SMichael S. Tsirkin     return val;
2481aab33094Sbellard }
2482aab33094Sbellard 
2483a8170e5eSAvi Kivity uint32_t lduw_phys(hwaddr addr)
24841e78bcc1SAlexander Graf {
24851e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
24861e78bcc1SAlexander Graf }
24871e78bcc1SAlexander Graf 
2488a8170e5eSAvi Kivity uint32_t lduw_le_phys(hwaddr addr)
24891e78bcc1SAlexander Graf {
24901e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
24911e78bcc1SAlexander Graf }
24921e78bcc1SAlexander Graf 
2493a8170e5eSAvi Kivity uint32_t lduw_be_phys(hwaddr addr)
24941e78bcc1SAlexander Graf {
24951e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
24961e78bcc1SAlexander Graf }
24971e78bcc1SAlexander Graf 
24988df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
24998df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
25008df1cd07Sbellard    bits are used to track modified PTEs */
2501a8170e5eSAvi Kivity void stl_phys_notdirty(hwaddr addr, uint32_t val)
25028df1cd07Sbellard {
25038df1cd07Sbellard     uint8_t *ptr;
25045c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2505149f54b5SPaolo Bonzini     hwaddr l = 4;
2506149f54b5SPaolo Bonzini     hwaddr addr1;
25078df1cd07Sbellard 
25085c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2509149f54b5SPaolo Bonzini                                  true);
25105c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
25115c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
25128df1cd07Sbellard     } else {
25135c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
25145579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
25158df1cd07Sbellard         stl_p(ptr, val);
251674576198Saliguori 
251774576198Saliguori         if (unlikely(in_migration)) {
251874576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
251974576198Saliguori                 /* invalidate code */
252074576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
252174576198Saliguori                 /* set dirty bit */
2522f7c11b53SYoshiaki Tamura                 cpu_physical_memory_set_dirty_flags(
2523f7c11b53SYoshiaki Tamura                     addr1, (0xff & ~CODE_DIRTY_FLAG));
252474576198Saliguori             }
252574576198Saliguori         }
25268df1cd07Sbellard     }
25278df1cd07Sbellard }
25288df1cd07Sbellard 
25298df1cd07Sbellard /* warning: addr must be aligned */
2530a8170e5eSAvi Kivity static inline void stl_phys_internal(hwaddr addr, uint32_t val,
25311e78bcc1SAlexander Graf                                      enum device_endian endian)
25328df1cd07Sbellard {
25338df1cd07Sbellard     uint8_t *ptr;
25345c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2535149f54b5SPaolo Bonzini     hwaddr l = 4;
2536149f54b5SPaolo Bonzini     hwaddr addr1;
25378df1cd07Sbellard 
25385c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2539149f54b5SPaolo Bonzini                                  true);
25405c8a00ceSPaolo Bonzini     if (l < 4 || !memory_access_is_direct(mr, true)) {
25411e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
25421e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
25431e78bcc1SAlexander Graf             val = bswap32(val);
25441e78bcc1SAlexander Graf         }
25451e78bcc1SAlexander Graf #else
25461e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
25471e78bcc1SAlexander Graf             val = bswap32(val);
25481e78bcc1SAlexander Graf         }
25491e78bcc1SAlexander Graf #endif
25505c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 4);
25518df1cd07Sbellard     } else {
25528df1cd07Sbellard         /* RAM case */
25535c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
25545579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
25551e78bcc1SAlexander Graf         switch (endian) {
25561e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
25571e78bcc1SAlexander Graf             stl_le_p(ptr, val);
25581e78bcc1SAlexander Graf             break;
25591e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
25601e78bcc1SAlexander Graf             stl_be_p(ptr, val);
25611e78bcc1SAlexander Graf             break;
25621e78bcc1SAlexander Graf         default:
25638df1cd07Sbellard             stl_p(ptr, val);
25641e78bcc1SAlexander Graf             break;
25651e78bcc1SAlexander Graf         }
256651d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 4);
25678df1cd07Sbellard     }
25683a7d929eSbellard }
25698df1cd07Sbellard 
2570a8170e5eSAvi Kivity void stl_phys(hwaddr addr, uint32_t val)
25711e78bcc1SAlexander Graf {
25721e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
25731e78bcc1SAlexander Graf }
25741e78bcc1SAlexander Graf 
2575a8170e5eSAvi Kivity void stl_le_phys(hwaddr addr, uint32_t val)
25761e78bcc1SAlexander Graf {
25771e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
25781e78bcc1SAlexander Graf }
25791e78bcc1SAlexander Graf 
2580a8170e5eSAvi Kivity void stl_be_phys(hwaddr addr, uint32_t val)
25811e78bcc1SAlexander Graf {
25821e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
25831e78bcc1SAlexander Graf }
25841e78bcc1SAlexander Graf 
2585aab33094Sbellard /* XXX: optimize */
2586a8170e5eSAvi Kivity void stb_phys(hwaddr addr, uint32_t val)
2587aab33094Sbellard {
2588aab33094Sbellard     uint8_t v = val;
2589aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
2590aab33094Sbellard }
2591aab33094Sbellard 
2592733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
2593a8170e5eSAvi Kivity static inline void stw_phys_internal(hwaddr addr, uint32_t val,
25941e78bcc1SAlexander Graf                                      enum device_endian endian)
2595aab33094Sbellard {
2596733f0b02SMichael S. Tsirkin     uint8_t *ptr;
25975c8a00ceSPaolo Bonzini     MemoryRegion *mr;
2598149f54b5SPaolo Bonzini     hwaddr l = 2;
2599149f54b5SPaolo Bonzini     hwaddr addr1;
2600733f0b02SMichael S. Tsirkin 
26015c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2602149f54b5SPaolo Bonzini                                  true);
26035c8a00ceSPaolo Bonzini     if (l < 2 || !memory_access_is_direct(mr, true)) {
26041e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
26051e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
26061e78bcc1SAlexander Graf             val = bswap16(val);
26071e78bcc1SAlexander Graf         }
26081e78bcc1SAlexander Graf #else
26091e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
26101e78bcc1SAlexander Graf             val = bswap16(val);
26111e78bcc1SAlexander Graf         }
26121e78bcc1SAlexander Graf #endif
26135c8a00ceSPaolo Bonzini         io_mem_write(mr, addr1, val, 2);
2614733f0b02SMichael S. Tsirkin     } else {
2615733f0b02SMichael S. Tsirkin         /* RAM case */
26165c8a00ceSPaolo Bonzini         addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2617733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
26181e78bcc1SAlexander Graf         switch (endian) {
26191e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
26201e78bcc1SAlexander Graf             stw_le_p(ptr, val);
26211e78bcc1SAlexander Graf             break;
26221e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
26231e78bcc1SAlexander Graf             stw_be_p(ptr, val);
26241e78bcc1SAlexander Graf             break;
26251e78bcc1SAlexander Graf         default:
2626733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
26271e78bcc1SAlexander Graf             break;
26281e78bcc1SAlexander Graf         }
262951d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 2);
2630733f0b02SMichael S. Tsirkin     }
2631aab33094Sbellard }
2632aab33094Sbellard 
2633a8170e5eSAvi Kivity void stw_phys(hwaddr addr, uint32_t val)
26341e78bcc1SAlexander Graf {
26351e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
26361e78bcc1SAlexander Graf }
26371e78bcc1SAlexander Graf 
2638a8170e5eSAvi Kivity void stw_le_phys(hwaddr addr, uint32_t val)
26391e78bcc1SAlexander Graf {
26401e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
26411e78bcc1SAlexander Graf }
26421e78bcc1SAlexander Graf 
2643a8170e5eSAvi Kivity void stw_be_phys(hwaddr addr, uint32_t val)
26441e78bcc1SAlexander Graf {
26451e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
26461e78bcc1SAlexander Graf }
26471e78bcc1SAlexander Graf 
2648aab33094Sbellard /* XXX: optimize */
2649a8170e5eSAvi Kivity void stq_phys(hwaddr addr, uint64_t val)
2650aab33094Sbellard {
2651aab33094Sbellard     val = tswap64(val);
265271d2b725SStefan Weil     cpu_physical_memory_write(addr, &val, 8);
2653aab33094Sbellard }
2654aab33094Sbellard 
2655a8170e5eSAvi Kivity void stq_le_phys(hwaddr addr, uint64_t val)
26561e78bcc1SAlexander Graf {
26571e78bcc1SAlexander Graf     val = cpu_to_le64(val);
26581e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
26591e78bcc1SAlexander Graf }
26601e78bcc1SAlexander Graf 
2661a8170e5eSAvi Kivity void stq_be_phys(hwaddr addr, uint64_t val)
26621e78bcc1SAlexander Graf {
26631e78bcc1SAlexander Graf     val = cpu_to_be64(val);
26641e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
26651e78bcc1SAlexander Graf }
26661e78bcc1SAlexander Graf 
26675e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
2668f17ec444SAndreas Färber int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2669b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
267013eb76e0Sbellard {
267113eb76e0Sbellard     int l;
2672a8170e5eSAvi Kivity     hwaddr phys_addr;
26739b3c35e0Sj_mayer     target_ulong page;
267413eb76e0Sbellard 
267513eb76e0Sbellard     while (len > 0) {
267613eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
2677f17ec444SAndreas Färber         phys_addr = cpu_get_phys_page_debug(cpu, page);
267813eb76e0Sbellard         /* if no physical page mapped, return an error */
267913eb76e0Sbellard         if (phys_addr == -1)
268013eb76e0Sbellard             return -1;
268113eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
268213eb76e0Sbellard         if (l > len)
268313eb76e0Sbellard             l = len;
26845e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
26855e2972fdSaliguori         if (is_write)
26865e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
26875e2972fdSaliguori         else
26885e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
268913eb76e0Sbellard         len -= l;
269013eb76e0Sbellard         buf += l;
269113eb76e0Sbellard         addr += l;
269213eb76e0Sbellard     }
269313eb76e0Sbellard     return 0;
269413eb76e0Sbellard }
2695a68fe89cSPaul Brook #endif
269613eb76e0Sbellard 
26978e4a424bSBlue Swirl #if !defined(CONFIG_USER_ONLY)
26988e4a424bSBlue Swirl 
26998e4a424bSBlue Swirl /*
27008e4a424bSBlue Swirl  * A helper function for the _utterly broken_ virtio device model to find out if
27018e4a424bSBlue Swirl  * it's running on a big endian machine. Don't do this at home kids!
27028e4a424bSBlue Swirl  */
27038e4a424bSBlue Swirl bool virtio_is_big_endian(void);
27048e4a424bSBlue Swirl bool virtio_is_big_endian(void)
27058e4a424bSBlue Swirl {
27068e4a424bSBlue Swirl #if defined(TARGET_WORDS_BIGENDIAN)
27078e4a424bSBlue Swirl     return true;
27088e4a424bSBlue Swirl #else
27098e4a424bSBlue Swirl     return false;
27108e4a424bSBlue Swirl #endif
27118e4a424bSBlue Swirl }
27128e4a424bSBlue Swirl 
27138e4a424bSBlue Swirl #endif
27148e4a424bSBlue Swirl 
271576f35538SWen Congyang #ifndef CONFIG_USER_ONLY
2716a8170e5eSAvi Kivity bool cpu_physical_memory_is_io(hwaddr phys_addr)
271776f35538SWen Congyang {
27185c8a00ceSPaolo Bonzini     MemoryRegion*mr;
2719149f54b5SPaolo Bonzini     hwaddr l = 1;
272076f35538SWen Congyang 
27215c8a00ceSPaolo Bonzini     mr = address_space_translate(&address_space_memory,
2722149f54b5SPaolo Bonzini                                  phys_addr, &phys_addr, &l, false);
272376f35538SWen Congyang 
27245c8a00ceSPaolo Bonzini     return !(memory_region_is_ram(mr) ||
27255c8a00ceSPaolo Bonzini              memory_region_is_romd(mr));
272676f35538SWen Congyang }
2727bd2fa51fSMichael R. Hines 
2728bd2fa51fSMichael R. Hines void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2729bd2fa51fSMichael R. Hines {
2730bd2fa51fSMichael R. Hines     RAMBlock *block;
2731bd2fa51fSMichael R. Hines 
2732bd2fa51fSMichael R. Hines     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2733bd2fa51fSMichael R. Hines         func(block->host, block->offset, block->length, opaque);
2734bd2fa51fSMichael R. Hines     }
2735bd2fa51fSMichael R. Hines }
2736ec3f8c99SPeter Maydell #endif
2737