xref: /qemu/system/physmem.c (revision 5cd2c5b6ad75c46d40118ac67c0c09d4e7930a65)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard #include <stdlib.h>
2754936004Sbellard #include <stdio.h>
2854936004Sbellard #include <stdarg.h>
2954936004Sbellard #include <string.h>
3054936004Sbellard #include <errno.h>
3154936004Sbellard #include <unistd.h>
3254936004Sbellard #include <inttypes.h>
3354936004Sbellard 
346180a181Sbellard #include "cpu.h"
356180a181Sbellard #include "exec-all.h"
36ca10f867Saurel32 #include "qemu-common.h"
37b67d9a52Sbellard #include "tcg.h"
38b3c7724cSpbrook #include "hw/hw.h"
3974576198Saliguori #include "osdep.h"
407ba1e619Saliguori #include "kvm.h"
4153a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4253a5960aSpbrook #include <qemu.h>
43fd052bf6SRiku Voipio #include <signal.h>
4453a5960aSpbrook #endif
4554936004Sbellard 
46fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4766e85a21Sbellard //#define DEBUG_FLUSH
489fa3e853Sbellard //#define DEBUG_TLB
4967d3b957Spbrook //#define DEBUG_UNASSIGNED
50fd6ce8f6Sbellard 
51fd6ce8f6Sbellard /* make various TB consistency checks */
52fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
5398857888Sbellard //#define DEBUG_TLB_CHECK
54fd6ce8f6Sbellard 
551196be37Sths //#define DEBUG_IOPORT
56db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
571196be37Sths 
5899773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
5999773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
6099773bd4Spbrook #undef DEBUG_TB_CHECK
6199773bd4Spbrook #endif
6299773bd4Spbrook 
639fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
649fa3e853Sbellard 
65bdaf78e0Sblueswir1 static TranslationBlock *tbs;
6626a5f13bSbellard int code_gen_max_blocks;
679fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
68bdaf78e0Sblueswir1 static int nb_tbs;
69eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
70c227f099SAnthony Liguori spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
71fd6ce8f6Sbellard 
72141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__)
73141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
74141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
75d03d860bSblueswir1  section close to code segment. */
76d03d860bSblueswir1 #define code_gen_section                                \
77d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
78d03d860bSblueswir1     __attribute__((aligned (32)))
79f8e2af11SStefan Weil #elif defined(_WIN32)
80f8e2af11SStefan Weil /* Maximum alignment for Win32 is 16. */
81f8e2af11SStefan Weil #define code_gen_section                                \
82f8e2af11SStefan Weil     __attribute__((aligned (16)))
83d03d860bSblueswir1 #else
84d03d860bSblueswir1 #define code_gen_section                                \
85d03d860bSblueswir1     __attribute__((aligned (32)))
86d03d860bSblueswir1 #endif
87d03d860bSblueswir1 
88d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
89bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
90bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size;
9126a5f13bSbellard /* threshold to flush the translated code buffer */
92bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size;
93fd6ce8f6Sbellard uint8_t *code_gen_ptr;
94fd6ce8f6Sbellard 
95e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
969fa3e853Sbellard int phys_ram_fd;
971ccde1cbSbellard uint8_t *phys_ram_dirty;
9874576198Saliguori static int in_migration;
9994a6b54fSpbrook 
10094a6b54fSpbrook typedef struct RAMBlock {
10194a6b54fSpbrook     uint8_t *host;
102c227f099SAnthony Liguori     ram_addr_t offset;
103c227f099SAnthony Liguori     ram_addr_t length;
10494a6b54fSpbrook     struct RAMBlock *next;
10594a6b54fSpbrook } RAMBlock;
10694a6b54fSpbrook 
10794a6b54fSpbrook static RAMBlock *ram_blocks;
10894a6b54fSpbrook /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
109ccbb4d44SStuart Brady    then we can no longer assume contiguous ram offsets, and external uses
11094a6b54fSpbrook    of this variable will break.  */
111c227f099SAnthony Liguori ram_addr_t last_ram_offset;
112e2eef170Spbrook #endif
1139fa3e853Sbellard 
1146a00d601Sbellard CPUState *first_cpu;
1156a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1166a00d601Sbellard    cpu_exec() */
1176a00d601Sbellard CPUState *cpu_single_env;
1182e70f6efSpbrook /* 0 = Do not count executed instructions.
119bf20dc07Sths    1 = Precise instruction counting.
1202e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1212e70f6efSpbrook int use_icount = 0;
1222e70f6efSpbrook /* Current instruction counter.  While executing translated code this may
1232e70f6efSpbrook    include some instructions that have not yet been executed.  */
1242e70f6efSpbrook int64_t qemu_icount;
1256a00d601Sbellard 
12654936004Sbellard typedef struct PageDesc {
12792e873b9Sbellard     /* list of TBs intersecting this ram page */
128fd6ce8f6Sbellard     TranslationBlock *first_tb;
1299fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1309fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1319fa3e853Sbellard     unsigned int code_write_count;
1329fa3e853Sbellard     uint8_t *code_bitmap;
1339fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1349fa3e853Sbellard     unsigned long flags;
1359fa3e853Sbellard #endif
13654936004Sbellard } PageDesc;
13754936004Sbellard 
13892e873b9Sbellard typedef struct PhysPageDesc {
1390f459d16Spbrook     /* offset in host memory of the page + io_index in the low bits */
140c227f099SAnthony Liguori     ram_addr_t phys_offset;
141c227f099SAnthony Liguori     ram_addr_t region_offset;
14292e873b9Sbellard } PhysPageDesc;
14392e873b9Sbellard 
1445cd2c5b6SRichard Henderson /* In system mode we want L1_MAP to be based on physical addresses,
1455cd2c5b6SRichard Henderson    while in user mode we want it to be based on virtual addresses.  */
1465cd2c5b6SRichard Henderson #if !defined(CONFIG_USER_ONLY)
1475cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
148bedb69eaSj_mayer #else
1495cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
150bedb69eaSj_mayer #endif
15154936004Sbellard 
1525cd2c5b6SRichard Henderson /* Size of the L2 (and L3, etc) page tables.  */
1535cd2c5b6SRichard Henderson #define L2_BITS 10
15454936004Sbellard #define L2_SIZE (1 << L2_BITS)
15554936004Sbellard 
1565cd2c5b6SRichard Henderson /* The bits remaining after N lower levels of page tables.  */
1575cd2c5b6SRichard Henderson #define P_L1_BITS_REM \
1585cd2c5b6SRichard Henderson     ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1595cd2c5b6SRichard Henderson #define V_L1_BITS_REM \
1605cd2c5b6SRichard Henderson     ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1615cd2c5b6SRichard Henderson 
1625cd2c5b6SRichard Henderson /* Size of the L1 page table.  Avoid silly small sizes.  */
1635cd2c5b6SRichard Henderson #if P_L1_BITS_REM < 4
1645cd2c5b6SRichard Henderson #define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
1655cd2c5b6SRichard Henderson #else
1665cd2c5b6SRichard Henderson #define P_L1_BITS  P_L1_BITS_REM
1675cd2c5b6SRichard Henderson #endif
1685cd2c5b6SRichard Henderson 
1695cd2c5b6SRichard Henderson #if V_L1_BITS_REM < 4
1705cd2c5b6SRichard Henderson #define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
1715cd2c5b6SRichard Henderson #else
1725cd2c5b6SRichard Henderson #define V_L1_BITS  V_L1_BITS_REM
1735cd2c5b6SRichard Henderson #endif
1745cd2c5b6SRichard Henderson 
1755cd2c5b6SRichard Henderson #define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
1765cd2c5b6SRichard Henderson #define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
1775cd2c5b6SRichard Henderson 
1785cd2c5b6SRichard Henderson #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
1795cd2c5b6SRichard Henderson #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
1805cd2c5b6SRichard Henderson 
18183fb7adfSbellard unsigned long qemu_real_host_page_size;
18283fb7adfSbellard unsigned long qemu_host_page_bits;
18383fb7adfSbellard unsigned long qemu_host_page_size;
18483fb7adfSbellard unsigned long qemu_host_page_mask;
18554936004Sbellard 
1865cd2c5b6SRichard Henderson /* This is a multi-level map on the virtual address space.
1875cd2c5b6SRichard Henderson    The bottom level has pointers to PageDesc.  */
1885cd2c5b6SRichard Henderson static void *l1_map[V_L1_SIZE];
18954936004Sbellard 
190e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1915cd2c5b6SRichard Henderson /* This is a multi-level map on the physical address space.
1925cd2c5b6SRichard Henderson    The bottom level has pointers to PhysPageDesc.  */
1935cd2c5b6SRichard Henderson static void *l1_phys_map[P_L1_SIZE];
1946d9a1304SPaul Brook 
195e2eef170Spbrook static void io_mem_init(void);
196e2eef170Spbrook 
19733417e70Sbellard /* io memory support */
19833417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
19933417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
200a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
201511d2b14Sblueswir1 static char io_mem_used[IO_MEM_NB_ENTRIES];
2026658ffb8Spbrook static int io_mem_watch;
2036658ffb8Spbrook #endif
20433417e70Sbellard 
20534865134Sbellard /* log support */
2061e8b27caSJuha Riihimäki #ifdef WIN32
2071e8b27caSJuha Riihimäki static const char *logfilename = "qemu.log";
2081e8b27caSJuha Riihimäki #else
209d9b630fdSblueswir1 static const char *logfilename = "/tmp/qemu.log";
2101e8b27caSJuha Riihimäki #endif
21134865134Sbellard FILE *logfile;
21234865134Sbellard int loglevel;
213e735b91cSpbrook static int log_append = 0;
21434865134Sbellard 
215e3db7226Sbellard /* statistics */
216e3db7226Sbellard static int tlb_flush_count;
217e3db7226Sbellard static int tb_flush_count;
218e3db7226Sbellard static int tb_phys_invalidate_count;
219e3db7226Sbellard 
2207cb69caeSbellard #ifdef _WIN32
2217cb69caeSbellard static void map_exec(void *addr, long size)
2227cb69caeSbellard {
2237cb69caeSbellard     DWORD old_protect;
2247cb69caeSbellard     VirtualProtect(addr, size,
2257cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2267cb69caeSbellard 
2277cb69caeSbellard }
2287cb69caeSbellard #else
2297cb69caeSbellard static void map_exec(void *addr, long size)
2307cb69caeSbellard {
2314369415fSbellard     unsigned long start, end, page_size;
2327cb69caeSbellard 
2334369415fSbellard     page_size = getpagesize();
2347cb69caeSbellard     start = (unsigned long)addr;
2354369415fSbellard     start &= ~(page_size - 1);
2367cb69caeSbellard 
2377cb69caeSbellard     end = (unsigned long)addr + size;
2384369415fSbellard     end += page_size - 1;
2394369415fSbellard     end &= ~(page_size - 1);
2407cb69caeSbellard 
2417cb69caeSbellard     mprotect((void *)start, end - start,
2427cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2437cb69caeSbellard }
2447cb69caeSbellard #endif
2457cb69caeSbellard 
246b346ff46Sbellard static void page_init(void)
24754936004Sbellard {
24883fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
24954936004Sbellard        TARGET_PAGE_SIZE */
250c2b48b69Saliguori #ifdef _WIN32
251c2b48b69Saliguori     {
252c2b48b69Saliguori         SYSTEM_INFO system_info;
253c2b48b69Saliguori 
254c2b48b69Saliguori         GetSystemInfo(&system_info);
255c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
256c2b48b69Saliguori     }
257c2b48b69Saliguori #else
258c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
259c2b48b69Saliguori #endif
26083fb7adfSbellard     if (qemu_host_page_size == 0)
26183fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
26283fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
26383fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
26483fb7adfSbellard     qemu_host_page_bits = 0;
26583fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
26683fb7adfSbellard         qemu_host_page_bits++;
26783fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
26850a9569bSbalrog 
26950a9569bSbalrog #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
27050a9569bSbalrog     {
27150a9569bSbalrog         FILE *f;
27250a9569bSbalrog 
2730776590dSpbrook         last_brk = (unsigned long)sbrk(0);
2745cd2c5b6SRichard Henderson 
27550a9569bSbalrog         f = fopen("/proc/self/maps", "r");
27650a9569bSbalrog         if (f) {
2775cd2c5b6SRichard Henderson             mmap_lock();
2785cd2c5b6SRichard Henderson 
27950a9569bSbalrog             do {
2805cd2c5b6SRichard Henderson                 unsigned long startaddr, endaddr;
2815cd2c5b6SRichard Henderson                 int n;
2825cd2c5b6SRichard Henderson 
2835cd2c5b6SRichard Henderson                 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
2845cd2c5b6SRichard Henderson 
2855cd2c5b6SRichard Henderson                 if (n == 2 && h2g_valid(startaddr)) {
2865cd2c5b6SRichard Henderson                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
2875cd2c5b6SRichard Henderson 
2885cd2c5b6SRichard Henderson                     if (h2g_valid(endaddr)) {
2895cd2c5b6SRichard Henderson                         endaddr = h2g(endaddr);
2905cd2c5b6SRichard Henderson                     } else {
2915cd2c5b6SRichard Henderson                         endaddr = ~0ul;
2925cd2c5b6SRichard Henderson                     }
2935cd2c5b6SRichard Henderson                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
29450a9569bSbalrog                 }
29550a9569bSbalrog             } while (!feof(f));
2965cd2c5b6SRichard Henderson 
29750a9569bSbalrog             fclose(f);
298c8a706feSpbrook             mmap_unlock();
29950a9569bSbalrog         }
3005cd2c5b6SRichard Henderson     }
30150a9569bSbalrog #endif
30254936004Sbellard }
30354936004Sbellard 
3045cd2c5b6SRichard Henderson static PageDesc *page_find_alloc(target_ulong index, int alloc)
30554936004Sbellard {
30617e2377aSpbrook #if defined(CONFIG_USER_ONLY)
3075cd2c5b6SRichard Henderson     /* We can't use qemu_malloc because it may recurse into a locked mutex.
3085cd2c5b6SRichard Henderson        Neither can we record the new pages we reserve while allocating a
3095cd2c5b6SRichard Henderson        given page because that may recurse into an unallocated page table
3105cd2c5b6SRichard Henderson        entry.  Stuff the allocations we do make into a queue and process
3115cd2c5b6SRichard Henderson        them after having completed one entire page table allocation.  */
3125cd2c5b6SRichard Henderson 
3135cd2c5b6SRichard Henderson     unsigned long reserve[2 * (V_L1_SHIFT / L2_BITS)];
3145cd2c5b6SRichard Henderson     int reserve_idx = 0;
3155cd2c5b6SRichard Henderson 
3165cd2c5b6SRichard Henderson # define ALLOC(P, SIZE)                                 \
3175cd2c5b6SRichard Henderson     do {                                                \
3185cd2c5b6SRichard Henderson         P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
3195cd2c5b6SRichard Henderson                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
3205cd2c5b6SRichard Henderson         if (h2g_valid(P)) {                             \
3215cd2c5b6SRichard Henderson             reserve[reserve_idx] = h2g(P);              \
3225cd2c5b6SRichard Henderson             reserve[reserve_idx + 1] = SIZE;            \
3235cd2c5b6SRichard Henderson             reserve_idx += 2;                           \
3245cd2c5b6SRichard Henderson         }                                               \
3255cd2c5b6SRichard Henderson     } while (0)
3265cd2c5b6SRichard Henderson #else
3275cd2c5b6SRichard Henderson # define ALLOC(P, SIZE) \
3285cd2c5b6SRichard Henderson     do { P = qemu_mallocz(SIZE); } while (0)
3295cd2c5b6SRichard Henderson #endif
3305cd2c5b6SRichard Henderson 
3315cd2c5b6SRichard Henderson     PageDesc *pd;
3325cd2c5b6SRichard Henderson     void **lp;
3335cd2c5b6SRichard Henderson     int i;
3345cd2c5b6SRichard Henderson 
3355cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
3365cd2c5b6SRichard Henderson     lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
3375cd2c5b6SRichard Henderson 
3385cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
3395cd2c5b6SRichard Henderson     for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
3405cd2c5b6SRichard Henderson         void **p = *lp;
3415cd2c5b6SRichard Henderson 
3425cd2c5b6SRichard Henderson         if (p == NULL) {
3435cd2c5b6SRichard Henderson             if (!alloc) {
3445cd2c5b6SRichard Henderson                 return NULL;
3455cd2c5b6SRichard Henderson             }
3465cd2c5b6SRichard Henderson             ALLOC(p, sizeof(void *) * L2_SIZE);
34754936004Sbellard             *lp = p;
3485cd2c5b6SRichard Henderson         }
3495cd2c5b6SRichard Henderson 
3505cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
3515cd2c5b6SRichard Henderson     }
3525cd2c5b6SRichard Henderson 
3535cd2c5b6SRichard Henderson     pd = *lp;
3545cd2c5b6SRichard Henderson     if (pd == NULL) {
3555cd2c5b6SRichard Henderson         if (!alloc) {
3565cd2c5b6SRichard Henderson             return NULL;
3575cd2c5b6SRichard Henderson         }
3585cd2c5b6SRichard Henderson         ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
3595cd2c5b6SRichard Henderson         *lp = pd;
3605cd2c5b6SRichard Henderson     }
3615cd2c5b6SRichard Henderson 
3625cd2c5b6SRichard Henderson #undef ALLOC
3635cd2c5b6SRichard Henderson #if defined(CONFIG_USER_ONLY)
3645cd2c5b6SRichard Henderson     for (i = 0; i < reserve_idx; i += 2) {
3655cd2c5b6SRichard Henderson         unsigned long addr = reserve[i];
3665cd2c5b6SRichard Henderson         unsigned long len = reserve[i + 1];
3675cd2c5b6SRichard Henderson 
36817e2377aSpbrook         page_set_flags(addr & TARGET_PAGE_MASK,
36917e2377aSpbrook                        TARGET_PAGE_ALIGN(addr + len),
37017e2377aSpbrook                        PAGE_RESERVED);
37117e2377aSpbrook     }
37217e2377aSpbrook #endif
3735cd2c5b6SRichard Henderson 
3745cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
37554936004Sbellard }
37654936004Sbellard 
37700f82b8aSaurel32 static inline PageDesc *page_find(target_ulong index)
37854936004Sbellard {
3795cd2c5b6SRichard Henderson     return page_find_alloc(index, 0);
38054936004Sbellard }
38154936004Sbellard 
3826d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
383c227f099SAnthony Liguori static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
38492e873b9Sbellard {
385e3f4e2a4Spbrook     PhysPageDesc *pd;
3865cd2c5b6SRichard Henderson     void **lp;
387e3f4e2a4Spbrook     int i;
3885cd2c5b6SRichard Henderson 
3895cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
3905cd2c5b6SRichard Henderson     lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
3915cd2c5b6SRichard Henderson 
3925cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
3935cd2c5b6SRichard Henderson     for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
3945cd2c5b6SRichard Henderson         void **p = *lp;
3955cd2c5b6SRichard Henderson         if (p == NULL) {
3965cd2c5b6SRichard Henderson             if (!alloc) {
397108c49b8Sbellard                 return NULL;
3985cd2c5b6SRichard Henderson             }
3995cd2c5b6SRichard Henderson             *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
4005cd2c5b6SRichard Henderson         }
4015cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
4025cd2c5b6SRichard Henderson     }
4035cd2c5b6SRichard Henderson 
4045cd2c5b6SRichard Henderson     pd = *lp;
4055cd2c5b6SRichard Henderson     if (pd == NULL) {
4065cd2c5b6SRichard Henderson         int i;
4075cd2c5b6SRichard Henderson 
4085cd2c5b6SRichard Henderson         if (!alloc) {
4095cd2c5b6SRichard Henderson             return NULL;
4105cd2c5b6SRichard Henderson         }
4115cd2c5b6SRichard Henderson 
4125cd2c5b6SRichard Henderson         *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
4135cd2c5b6SRichard Henderson 
41467c4d23cSpbrook         for (i = 0; i < L2_SIZE; i++) {
415e3f4e2a4Spbrook             pd[i].phys_offset = IO_MEM_UNASSIGNED;
41667c4d23cSpbrook             pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
41767c4d23cSpbrook         }
41892e873b9Sbellard     }
4195cd2c5b6SRichard Henderson 
4205cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
42192e873b9Sbellard }
42292e873b9Sbellard 
423c227f099SAnthony Liguori static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
42492e873b9Sbellard {
425108c49b8Sbellard     return phys_page_find_alloc(index, 0);
42692e873b9Sbellard }
42792e873b9Sbellard 
428c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr);
429c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
4303a7d929eSbellard                                     target_ulong vaddr);
431c8a706feSpbrook #define mmap_lock() do { } while(0)
432c8a706feSpbrook #define mmap_unlock() do { } while(0)
4339fa3e853Sbellard #endif
434fd6ce8f6Sbellard 
4354369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
4364369415fSbellard 
4374369415fSbellard #if defined(CONFIG_USER_ONLY)
438ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in
4394369415fSbellard    user mode. It will change when a dedicated libc will be used */
4404369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
4414369415fSbellard #endif
4424369415fSbellard 
4434369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4444369415fSbellard static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
4454369415fSbellard #endif
4464369415fSbellard 
4478fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size)
44826a5f13bSbellard {
4494369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4504369415fSbellard     code_gen_buffer = static_code_gen_buffer;
4514369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4524369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
4534369415fSbellard #else
45426a5f13bSbellard     code_gen_buffer_size = tb_size;
45526a5f13bSbellard     if (code_gen_buffer_size == 0) {
4564369415fSbellard #if defined(CONFIG_USER_ONLY)
4574369415fSbellard         /* in user mode, phys_ram_size is not meaningful */
4584369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4594369415fSbellard #else
460ccbb4d44SStuart Brady         /* XXX: needs adjustments */
46194a6b54fSpbrook         code_gen_buffer_size = (unsigned long)(ram_size / 4);
4624369415fSbellard #endif
46326a5f13bSbellard     }
46426a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
46526a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
46626a5f13bSbellard     /* The code gen buffer location may have constraints depending on
46726a5f13bSbellard        the host cpu and OS */
46826a5f13bSbellard #if defined(__linux__)
46926a5f13bSbellard     {
47026a5f13bSbellard         int flags;
471141ac468Sblueswir1         void *start = NULL;
472141ac468Sblueswir1 
47326a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
47426a5f13bSbellard #if defined(__x86_64__)
47526a5f13bSbellard         flags |= MAP_32BIT;
47626a5f13bSbellard         /* Cannot map more than that */
47726a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
47826a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
479141ac468Sblueswir1 #elif defined(__sparc_v9__)
480141ac468Sblueswir1         // Map the buffer below 2G, so we can use direct calls and branches
481141ac468Sblueswir1         flags |= MAP_FIXED;
482141ac468Sblueswir1         start = (void *) 0x60000000UL;
483141ac468Sblueswir1         if (code_gen_buffer_size > (512 * 1024 * 1024))
484141ac468Sblueswir1             code_gen_buffer_size = (512 * 1024 * 1024);
4851cb0661eSbalrog #elif defined(__arm__)
48663d41246Sbalrog         /* Map the buffer below 32M, so we can use direct calls and branches */
4871cb0661eSbalrog         flags |= MAP_FIXED;
4881cb0661eSbalrog         start = (void *) 0x01000000UL;
4891cb0661eSbalrog         if (code_gen_buffer_size > 16 * 1024 * 1024)
4901cb0661eSbalrog             code_gen_buffer_size = 16 * 1024 * 1024;
49126a5f13bSbellard #endif
492141ac468Sblueswir1         code_gen_buffer = mmap(start, code_gen_buffer_size,
49326a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
49426a5f13bSbellard                                flags, -1, 0);
49526a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
49626a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
49726a5f13bSbellard             exit(1);
49826a5f13bSbellard         }
49926a5f13bSbellard     }
500a167ba50SAurelien Jarno #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
50106e67a82Saliguori     {
50206e67a82Saliguori         int flags;
50306e67a82Saliguori         void *addr = NULL;
50406e67a82Saliguori         flags = MAP_PRIVATE | MAP_ANONYMOUS;
50506e67a82Saliguori #if defined(__x86_64__)
50606e67a82Saliguori         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
50706e67a82Saliguori          * 0x40000000 is free */
50806e67a82Saliguori         flags |= MAP_FIXED;
50906e67a82Saliguori         addr = (void *)0x40000000;
51006e67a82Saliguori         /* Cannot map more than that */
51106e67a82Saliguori         if (code_gen_buffer_size > (800 * 1024 * 1024))
51206e67a82Saliguori             code_gen_buffer_size = (800 * 1024 * 1024);
51306e67a82Saliguori #endif
51406e67a82Saliguori         code_gen_buffer = mmap(addr, code_gen_buffer_size,
51506e67a82Saliguori                                PROT_WRITE | PROT_READ | PROT_EXEC,
51606e67a82Saliguori                                flags, -1, 0);
51706e67a82Saliguori         if (code_gen_buffer == MAP_FAILED) {
51806e67a82Saliguori             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
51906e67a82Saliguori             exit(1);
52006e67a82Saliguori         }
52106e67a82Saliguori     }
52226a5f13bSbellard #else
52326a5f13bSbellard     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
52426a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
52526a5f13bSbellard #endif
5264369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
52726a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
52826a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
52926a5f13bSbellard         code_gen_max_block_size();
53026a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
53126a5f13bSbellard     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
53226a5f13bSbellard }
53326a5f13bSbellard 
53426a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
53526a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
53626a5f13bSbellard    size. */
53726a5f13bSbellard void cpu_exec_init_all(unsigned long tb_size)
53826a5f13bSbellard {
53926a5f13bSbellard     cpu_gen_init();
54026a5f13bSbellard     code_gen_alloc(tb_size);
54126a5f13bSbellard     code_gen_ptr = code_gen_buffer;
5424369415fSbellard     page_init();
543e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
54426a5f13bSbellard     io_mem_init();
545e2eef170Spbrook #endif
54626a5f13bSbellard }
54726a5f13bSbellard 
5489656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5499656f324Spbrook 
550e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
551e7f4eff7SJuan Quintela {
552e7f4eff7SJuan Quintela     CPUState *env = opaque;
553e7f4eff7SJuan Quintela 
5543098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
5553098dba0Saurel32        version_id is increased. */
5563098dba0Saurel32     env->interrupt_request &= ~0x01;
5579656f324Spbrook     tlb_flush(env, 1);
5589656f324Spbrook 
5599656f324Spbrook     return 0;
5609656f324Spbrook }
561e7f4eff7SJuan Quintela 
562e7f4eff7SJuan Quintela static const VMStateDescription vmstate_cpu_common = {
563e7f4eff7SJuan Quintela     .name = "cpu_common",
564e7f4eff7SJuan Quintela     .version_id = 1,
565e7f4eff7SJuan Quintela     .minimum_version_id = 1,
566e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
567e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
568e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
569e7f4eff7SJuan Quintela         VMSTATE_UINT32(halted, CPUState),
570e7f4eff7SJuan Quintela         VMSTATE_UINT32(interrupt_request, CPUState),
571e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
572e7f4eff7SJuan Quintela     }
573e7f4eff7SJuan Quintela };
5749656f324Spbrook #endif
5759656f324Spbrook 
576950f1472SGlauber Costa CPUState *qemu_get_cpu(int cpu)
577950f1472SGlauber Costa {
578950f1472SGlauber Costa     CPUState *env = first_cpu;
579950f1472SGlauber Costa 
580950f1472SGlauber Costa     while (env) {
581950f1472SGlauber Costa         if (env->cpu_index == cpu)
582950f1472SGlauber Costa             break;
583950f1472SGlauber Costa         env = env->next_cpu;
584950f1472SGlauber Costa     }
585950f1472SGlauber Costa 
586950f1472SGlauber Costa     return env;
587950f1472SGlauber Costa }
588950f1472SGlauber Costa 
5896a00d601Sbellard void cpu_exec_init(CPUState *env)
590fd6ce8f6Sbellard {
5916a00d601Sbellard     CPUState **penv;
5926a00d601Sbellard     int cpu_index;
5936a00d601Sbellard 
594c2764719Spbrook #if defined(CONFIG_USER_ONLY)
595c2764719Spbrook     cpu_list_lock();
596c2764719Spbrook #endif
5976a00d601Sbellard     env->next_cpu = NULL;
5986a00d601Sbellard     penv = &first_cpu;
5996a00d601Sbellard     cpu_index = 0;
6006a00d601Sbellard     while (*penv != NULL) {
6011e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
6026a00d601Sbellard         cpu_index++;
6036a00d601Sbellard     }
6046a00d601Sbellard     env->cpu_index = cpu_index;
605268a362cSaliguori     env->numa_node = 0;
60672cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
60772cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
6086a00d601Sbellard     *penv = env;
609c2764719Spbrook #if defined(CONFIG_USER_ONLY)
610c2764719Spbrook     cpu_list_unlock();
611c2764719Spbrook #endif
612b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
613e7f4eff7SJuan Quintela     vmstate_register(cpu_index, &vmstate_cpu_common, env);
614b3c7724cSpbrook     register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
615b3c7724cSpbrook                     cpu_save, cpu_load, env);
616b3c7724cSpbrook #endif
617fd6ce8f6Sbellard }
618fd6ce8f6Sbellard 
6199fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
6209fa3e853Sbellard {
6219fa3e853Sbellard     if (p->code_bitmap) {
62259817ccbSbellard         qemu_free(p->code_bitmap);
6239fa3e853Sbellard         p->code_bitmap = NULL;
6249fa3e853Sbellard     }
6259fa3e853Sbellard     p->code_write_count = 0;
6269fa3e853Sbellard }
6279fa3e853Sbellard 
6285cd2c5b6SRichard Henderson /* Set to NULL all the 'first_tb' fields in all PageDescs. */
6295cd2c5b6SRichard Henderson 
6305cd2c5b6SRichard Henderson static void page_flush_tb_1 (int level, void **lp)
6315cd2c5b6SRichard Henderson {
6325cd2c5b6SRichard Henderson     int i;
6335cd2c5b6SRichard Henderson 
6345cd2c5b6SRichard Henderson     if (*lp == NULL) {
6355cd2c5b6SRichard Henderson         return;
6365cd2c5b6SRichard Henderson     }
6375cd2c5b6SRichard Henderson     if (level == 0) {
6385cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
6395cd2c5b6SRichard Henderson         for (i = 0; i < L2_BITS; ++i) {
6405cd2c5b6SRichard Henderson             pd[i].first_tb = NULL;
6415cd2c5b6SRichard Henderson             invalidate_page_bitmap(pd + i);
6425cd2c5b6SRichard Henderson         }
6435cd2c5b6SRichard Henderson     } else {
6445cd2c5b6SRichard Henderson         void **pp = *lp;
6455cd2c5b6SRichard Henderson         for (i = 0; i < L2_BITS; ++i) {
6465cd2c5b6SRichard Henderson             page_flush_tb_1 (level - 1, pp + i);
6475cd2c5b6SRichard Henderson         }
6485cd2c5b6SRichard Henderson     }
6495cd2c5b6SRichard Henderson }
6505cd2c5b6SRichard Henderson 
651fd6ce8f6Sbellard static void page_flush_tb(void)
652fd6ce8f6Sbellard {
6535cd2c5b6SRichard Henderson     int i;
6545cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
6555cd2c5b6SRichard Henderson         page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
656fd6ce8f6Sbellard     }
657fd6ce8f6Sbellard }
658fd6ce8f6Sbellard 
659fd6ce8f6Sbellard /* flush all the translation blocks */
660d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
6616a00d601Sbellard void tb_flush(CPUState *env1)
662fd6ce8f6Sbellard {
6636a00d601Sbellard     CPUState *env;
6640124311eSbellard #if defined(DEBUG_FLUSH)
665ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
666ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
667ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
668ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
669fd6ce8f6Sbellard #endif
67026a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
671a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
672a208e54aSpbrook 
673fd6ce8f6Sbellard     nb_tbs = 0;
6746a00d601Sbellard 
6756a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
6768a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
6776a00d601Sbellard     }
6789fa3e853Sbellard 
6798a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
680fd6ce8f6Sbellard     page_flush_tb();
6819fa3e853Sbellard 
682fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
683d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
684d4e8164fSbellard        expensive */
685e3db7226Sbellard     tb_flush_count++;
686fd6ce8f6Sbellard }
687fd6ce8f6Sbellard 
688fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
689fd6ce8f6Sbellard 
690bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
691fd6ce8f6Sbellard {
692fd6ce8f6Sbellard     TranslationBlock *tb;
693fd6ce8f6Sbellard     int i;
694fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
69599773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
69699773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
697fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
698fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
6990bf9e31aSBlue Swirl                 printf("ERROR invalidate: address=" TARGET_FMT_lx
7000bf9e31aSBlue Swirl                        " PC=%08lx size=%04x\n",
70199773bd4Spbrook                        address, (long)tb->pc, tb->size);
702fd6ce8f6Sbellard             }
703fd6ce8f6Sbellard         }
704fd6ce8f6Sbellard     }
705fd6ce8f6Sbellard }
706fd6ce8f6Sbellard 
707fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
708fd6ce8f6Sbellard static void tb_page_check(void)
709fd6ce8f6Sbellard {
710fd6ce8f6Sbellard     TranslationBlock *tb;
711fd6ce8f6Sbellard     int i, flags1, flags2;
712fd6ce8f6Sbellard 
71399773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
71499773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
715fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
716fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
717fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
718fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
71999773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
720fd6ce8f6Sbellard             }
721fd6ce8f6Sbellard         }
722fd6ce8f6Sbellard     }
723fd6ce8f6Sbellard }
724fd6ce8f6Sbellard 
725fd6ce8f6Sbellard #endif
726fd6ce8f6Sbellard 
727fd6ce8f6Sbellard /* invalidate one TB */
728fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
729fd6ce8f6Sbellard                              int next_offset)
730fd6ce8f6Sbellard {
731fd6ce8f6Sbellard     TranslationBlock *tb1;
732fd6ce8f6Sbellard     for(;;) {
733fd6ce8f6Sbellard         tb1 = *ptb;
734fd6ce8f6Sbellard         if (tb1 == tb) {
735fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
736fd6ce8f6Sbellard             break;
737fd6ce8f6Sbellard         }
738fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
739fd6ce8f6Sbellard     }
740fd6ce8f6Sbellard }
741fd6ce8f6Sbellard 
7429fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
7439fa3e853Sbellard {
7449fa3e853Sbellard     TranslationBlock *tb1;
7459fa3e853Sbellard     unsigned int n1;
7469fa3e853Sbellard 
7479fa3e853Sbellard     for(;;) {
7489fa3e853Sbellard         tb1 = *ptb;
7499fa3e853Sbellard         n1 = (long)tb1 & 3;
7509fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
7519fa3e853Sbellard         if (tb1 == tb) {
7529fa3e853Sbellard             *ptb = tb1->page_next[n1];
7539fa3e853Sbellard             break;
7549fa3e853Sbellard         }
7559fa3e853Sbellard         ptb = &tb1->page_next[n1];
7569fa3e853Sbellard     }
7579fa3e853Sbellard }
7589fa3e853Sbellard 
759d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
760d4e8164fSbellard {
761d4e8164fSbellard     TranslationBlock *tb1, **ptb;
762d4e8164fSbellard     unsigned int n1;
763d4e8164fSbellard 
764d4e8164fSbellard     ptb = &tb->jmp_next[n];
765d4e8164fSbellard     tb1 = *ptb;
766d4e8164fSbellard     if (tb1) {
767d4e8164fSbellard         /* find tb(n) in circular list */
768d4e8164fSbellard         for(;;) {
769d4e8164fSbellard             tb1 = *ptb;
770d4e8164fSbellard             n1 = (long)tb1 & 3;
771d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
772d4e8164fSbellard             if (n1 == n && tb1 == tb)
773d4e8164fSbellard                 break;
774d4e8164fSbellard             if (n1 == 2) {
775d4e8164fSbellard                 ptb = &tb1->jmp_first;
776d4e8164fSbellard             } else {
777d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
778d4e8164fSbellard             }
779d4e8164fSbellard         }
780d4e8164fSbellard         /* now we can suppress tb(n) from the list */
781d4e8164fSbellard         *ptb = tb->jmp_next[n];
782d4e8164fSbellard 
783d4e8164fSbellard         tb->jmp_next[n] = NULL;
784d4e8164fSbellard     }
785d4e8164fSbellard }
786d4e8164fSbellard 
787d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
788d4e8164fSbellard    another TB */
789d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
790d4e8164fSbellard {
791d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
792d4e8164fSbellard }
793d4e8164fSbellard 
7942e70f6efSpbrook void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
795fd6ce8f6Sbellard {
7966a00d601Sbellard     CPUState *env;
797fd6ce8f6Sbellard     PageDesc *p;
7988a40a180Sbellard     unsigned int h, n1;
799c227f099SAnthony Liguori     target_phys_addr_t phys_pc;
8008a40a180Sbellard     TranslationBlock *tb1, *tb2;
801fd6ce8f6Sbellard 
8029fa3e853Sbellard     /* remove the TB from the hash list */
8039fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
8049fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
8059fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
8069fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
8079fa3e853Sbellard 
8089fa3e853Sbellard     /* remove the TB from the page list */
8099fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
8109fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
8119fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8129fa3e853Sbellard         invalidate_page_bitmap(p);
8139fa3e853Sbellard     }
8149fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
8159fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
8169fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
8179fa3e853Sbellard         invalidate_page_bitmap(p);
8189fa3e853Sbellard     }
8199fa3e853Sbellard 
8208a40a180Sbellard     tb_invalidated_flag = 1;
8218a40a180Sbellard 
8228a40a180Sbellard     /* remove the TB from the hash list */
8238a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
8246a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
8256a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
8266a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
8276a00d601Sbellard     }
8288a40a180Sbellard 
8298a40a180Sbellard     /* suppress this TB from the two jump lists */
8308a40a180Sbellard     tb_jmp_remove(tb, 0);
8318a40a180Sbellard     tb_jmp_remove(tb, 1);
8328a40a180Sbellard 
8338a40a180Sbellard     /* suppress any remaining jumps to this TB */
8348a40a180Sbellard     tb1 = tb->jmp_first;
8358a40a180Sbellard     for(;;) {
8368a40a180Sbellard         n1 = (long)tb1 & 3;
8378a40a180Sbellard         if (n1 == 2)
8388a40a180Sbellard             break;
8398a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
8408a40a180Sbellard         tb2 = tb1->jmp_next[n1];
8418a40a180Sbellard         tb_reset_jump(tb1, n1);
8428a40a180Sbellard         tb1->jmp_next[n1] = NULL;
8438a40a180Sbellard         tb1 = tb2;
8448a40a180Sbellard     }
8458a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
8468a40a180Sbellard 
847e3db7226Sbellard     tb_phys_invalidate_count++;
8489fa3e853Sbellard }
8499fa3e853Sbellard 
8509fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
8519fa3e853Sbellard {
8529fa3e853Sbellard     int end, mask, end1;
8539fa3e853Sbellard 
8549fa3e853Sbellard     end = start + len;
8559fa3e853Sbellard     tab += start >> 3;
8569fa3e853Sbellard     mask = 0xff << (start & 7);
8579fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
8589fa3e853Sbellard         if (start < end) {
8599fa3e853Sbellard             mask &= ~(0xff << (end & 7));
8609fa3e853Sbellard             *tab |= mask;
8619fa3e853Sbellard         }
8629fa3e853Sbellard     } else {
8639fa3e853Sbellard         *tab++ |= mask;
8649fa3e853Sbellard         start = (start + 8) & ~7;
8659fa3e853Sbellard         end1 = end & ~7;
8669fa3e853Sbellard         while (start < end1) {
8679fa3e853Sbellard             *tab++ = 0xff;
8689fa3e853Sbellard             start += 8;
8699fa3e853Sbellard         }
8709fa3e853Sbellard         if (start < end) {
8719fa3e853Sbellard             mask = ~(0xff << (end & 7));
8729fa3e853Sbellard             *tab |= mask;
8739fa3e853Sbellard         }
8749fa3e853Sbellard     }
8759fa3e853Sbellard }
8769fa3e853Sbellard 
8779fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
8789fa3e853Sbellard {
8799fa3e853Sbellard     int n, tb_start, tb_end;
8809fa3e853Sbellard     TranslationBlock *tb;
8819fa3e853Sbellard 
882b2a7081aSpbrook     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
8839fa3e853Sbellard 
8849fa3e853Sbellard     tb = p->first_tb;
8859fa3e853Sbellard     while (tb != NULL) {
8869fa3e853Sbellard         n = (long)tb & 3;
8879fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
8889fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
8899fa3e853Sbellard         if (n == 0) {
8909fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
8919fa3e853Sbellard                it is not a problem */
8929fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
8939fa3e853Sbellard             tb_end = tb_start + tb->size;
8949fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
8959fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
8969fa3e853Sbellard         } else {
8979fa3e853Sbellard             tb_start = 0;
8989fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
8999fa3e853Sbellard         }
9009fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
9019fa3e853Sbellard         tb = tb->page_next[n];
9029fa3e853Sbellard     }
9039fa3e853Sbellard }
9049fa3e853Sbellard 
9052e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env,
9062e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
9072e70f6efSpbrook                               int flags, int cflags)
908d720b93dSbellard {
909d720b93dSbellard     TranslationBlock *tb;
910d720b93dSbellard     uint8_t *tc_ptr;
911d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
912d720b93dSbellard     int code_gen_size;
913d720b93dSbellard 
914c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
915c27004ecSbellard     tb = tb_alloc(pc);
916d720b93dSbellard     if (!tb) {
917d720b93dSbellard         /* flush must be done */
918d720b93dSbellard         tb_flush(env);
919d720b93dSbellard         /* cannot fail at this point */
920c27004ecSbellard         tb = tb_alloc(pc);
9212e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
9222e70f6efSpbrook         tb_invalidated_flag = 1;
923d720b93dSbellard     }
924d720b93dSbellard     tc_ptr = code_gen_ptr;
925d720b93dSbellard     tb->tc_ptr = tc_ptr;
926d720b93dSbellard     tb->cs_base = cs_base;
927d720b93dSbellard     tb->flags = flags;
928d720b93dSbellard     tb->cflags = cflags;
929d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
930d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
931d720b93dSbellard 
932d720b93dSbellard     /* check next page if needed */
933c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
934d720b93dSbellard     phys_page2 = -1;
935c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
936d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
937d720b93dSbellard     }
938d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
9392e70f6efSpbrook     return tb;
940d720b93dSbellard }
941d720b93dSbellard 
9429fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
9439fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
944d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
945d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
946d720b93dSbellard    TB if code is modified inside this TB. */
947c227f099SAnthony Liguori void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
948d720b93dSbellard                                    int is_cpu_write_access)
9499fa3e853Sbellard {
9506b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
951d720b93dSbellard     CPUState *env = cpu_single_env;
9529fa3e853Sbellard     target_ulong tb_start, tb_end;
9536b917547Saliguori     PageDesc *p;
9546b917547Saliguori     int n;
9556b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
9566b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
9576b917547Saliguori     TranslationBlock *current_tb = NULL;
9586b917547Saliguori     int current_tb_modified = 0;
9596b917547Saliguori     target_ulong current_pc = 0;
9606b917547Saliguori     target_ulong current_cs_base = 0;
9616b917547Saliguori     int current_flags = 0;
9626b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
9639fa3e853Sbellard 
9649fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
9659fa3e853Sbellard     if (!p)
9669fa3e853Sbellard         return;
9679fa3e853Sbellard     if (!p->code_bitmap &&
968d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
969d720b93dSbellard         is_cpu_write_access) {
9709fa3e853Sbellard         /* build code bitmap */
9719fa3e853Sbellard         build_page_bitmap(p);
9729fa3e853Sbellard     }
9739fa3e853Sbellard 
9749fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
9759fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
9769fa3e853Sbellard     tb = p->first_tb;
9779fa3e853Sbellard     while (tb != NULL) {
9789fa3e853Sbellard         n = (long)tb & 3;
9799fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
9809fa3e853Sbellard         tb_next = tb->page_next[n];
9819fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
9829fa3e853Sbellard         if (n == 0) {
9839fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
9849fa3e853Sbellard                it is not a problem */
9859fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
9869fa3e853Sbellard             tb_end = tb_start + tb->size;
9879fa3e853Sbellard         } else {
9889fa3e853Sbellard             tb_start = tb->page_addr[1];
9899fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
9909fa3e853Sbellard         }
9919fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
992d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
993d720b93dSbellard             if (current_tb_not_found) {
994d720b93dSbellard                 current_tb_not_found = 0;
995d720b93dSbellard                 current_tb = NULL;
9962e70f6efSpbrook                 if (env->mem_io_pc) {
997d720b93dSbellard                     /* now we have a real cpu fault */
9982e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
999d720b93dSbellard                 }
1000d720b93dSbellard             }
1001d720b93dSbellard             if (current_tb == tb &&
10022e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1003d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1004d720b93dSbellard                 its execution. We could be more precise by checking
1005d720b93dSbellard                 that the modification is after the current PC, but it
1006d720b93dSbellard                 would require a specialized function to partially
1007d720b93dSbellard                 restore the CPU state */
1008d720b93dSbellard 
1009d720b93dSbellard                 current_tb_modified = 1;
1010d720b93dSbellard                 cpu_restore_state(current_tb, env,
10112e70f6efSpbrook                                   env->mem_io_pc, NULL);
10126b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
10136b917547Saliguori                                      &current_flags);
1014d720b93dSbellard             }
1015d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
10166f5a9f7eSbellard             /* we need to do that to handle the case where a signal
10176f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
10186f5a9f7eSbellard             saved_tb = NULL;
10196f5a9f7eSbellard             if (env) {
1020ea1c1802Sbellard                 saved_tb = env->current_tb;
1021ea1c1802Sbellard                 env->current_tb = NULL;
10226f5a9f7eSbellard             }
10239fa3e853Sbellard             tb_phys_invalidate(tb, -1);
10246f5a9f7eSbellard             if (env) {
1025ea1c1802Sbellard                 env->current_tb = saved_tb;
1026ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
1027ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
10289fa3e853Sbellard             }
10296f5a9f7eSbellard         }
10309fa3e853Sbellard         tb = tb_next;
10319fa3e853Sbellard     }
10329fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
10339fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
10349fa3e853Sbellard     if (!p->first_tb) {
10359fa3e853Sbellard         invalidate_page_bitmap(p);
1036d720b93dSbellard         if (is_cpu_write_access) {
10372e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1038d720b93dSbellard         }
1039d720b93dSbellard     }
1040d720b93dSbellard #endif
1041d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1042d720b93dSbellard     if (current_tb_modified) {
1043d720b93dSbellard         /* we generate a block containing just the instruction
1044d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1045d720b93dSbellard            itself */
1046ea1c1802Sbellard         env->current_tb = NULL;
10472e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1048d720b93dSbellard         cpu_resume_from_signal(env, NULL);
10499fa3e853Sbellard     }
10509fa3e853Sbellard #endif
10519fa3e853Sbellard }
10529fa3e853Sbellard 
10539fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
1054c227f099SAnthony Liguori static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
10559fa3e853Sbellard {
10569fa3e853Sbellard     PageDesc *p;
10579fa3e853Sbellard     int offset, b;
105859817ccbSbellard #if 0
1059a4193c8aSbellard     if (1) {
106093fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
10612e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1062a4193c8aSbellard                   cpu_single_env->eip,
1063a4193c8aSbellard                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1064a4193c8aSbellard     }
106559817ccbSbellard #endif
10669fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
10679fa3e853Sbellard     if (!p)
10689fa3e853Sbellard         return;
10699fa3e853Sbellard     if (p->code_bitmap) {
10709fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
10719fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
10729fa3e853Sbellard         if (b & ((1 << len) - 1))
10739fa3e853Sbellard             goto do_invalidate;
10749fa3e853Sbellard     } else {
10759fa3e853Sbellard     do_invalidate:
1076d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
10779fa3e853Sbellard     }
10789fa3e853Sbellard }
10799fa3e853Sbellard 
10809fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
1081c227f099SAnthony Liguori static void tb_invalidate_phys_page(target_phys_addr_t addr,
1082d720b93dSbellard                                     unsigned long pc, void *puc)
10839fa3e853Sbellard {
10846b917547Saliguori     TranslationBlock *tb;
10859fa3e853Sbellard     PageDesc *p;
10866b917547Saliguori     int n;
1087d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
10886b917547Saliguori     TranslationBlock *current_tb = NULL;
1089d720b93dSbellard     CPUState *env = cpu_single_env;
10906b917547Saliguori     int current_tb_modified = 0;
10916b917547Saliguori     target_ulong current_pc = 0;
10926b917547Saliguori     target_ulong current_cs_base = 0;
10936b917547Saliguori     int current_flags = 0;
1094d720b93dSbellard #endif
10959fa3e853Sbellard 
10969fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
10979fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1098fd6ce8f6Sbellard     if (!p)
1099fd6ce8f6Sbellard         return;
1100fd6ce8f6Sbellard     tb = p->first_tb;
1101d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1102d720b93dSbellard     if (tb && pc != 0) {
1103d720b93dSbellard         current_tb = tb_find_pc(pc);
1104d720b93dSbellard     }
1105d720b93dSbellard #endif
1106fd6ce8f6Sbellard     while (tb != NULL) {
11079fa3e853Sbellard         n = (long)tb & 3;
11089fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
1109d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1110d720b93dSbellard         if (current_tb == tb &&
11112e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1112d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1113d720b93dSbellard                    its execution. We could be more precise by checking
1114d720b93dSbellard                    that the modification is after the current PC, but it
1115d720b93dSbellard                    would require a specialized function to partially
1116d720b93dSbellard                    restore the CPU state */
1117d720b93dSbellard 
1118d720b93dSbellard             current_tb_modified = 1;
1119d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
11206b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
11216b917547Saliguori                                  &current_flags);
1122d720b93dSbellard         }
1123d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
11249fa3e853Sbellard         tb_phys_invalidate(tb, addr);
11259fa3e853Sbellard         tb = tb->page_next[n];
1126fd6ce8f6Sbellard     }
1127fd6ce8f6Sbellard     p->first_tb = NULL;
1128d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1129d720b93dSbellard     if (current_tb_modified) {
1130d720b93dSbellard         /* we generate a block containing just the instruction
1131d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1132d720b93dSbellard            itself */
1133ea1c1802Sbellard         env->current_tb = NULL;
11342e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1135d720b93dSbellard         cpu_resume_from_signal(env, puc);
1136d720b93dSbellard     }
1137d720b93dSbellard #endif
1138fd6ce8f6Sbellard }
11399fa3e853Sbellard #endif
1140fd6ce8f6Sbellard 
1141fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
11429fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
114353a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
1144fd6ce8f6Sbellard {
1145fd6ce8f6Sbellard     PageDesc *p;
11469fa3e853Sbellard     TranslationBlock *last_first_tb;
11479fa3e853Sbellard 
11489fa3e853Sbellard     tb->page_addr[n] = page_addr;
11495cd2c5b6SRichard Henderson     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
11509fa3e853Sbellard     tb->page_next[n] = p->first_tb;
11519fa3e853Sbellard     last_first_tb = p->first_tb;
11529fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
11539fa3e853Sbellard     invalidate_page_bitmap(p);
11549fa3e853Sbellard 
1155107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1156d720b93dSbellard 
11579fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
11589fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
115953a5960aSpbrook         target_ulong addr;
116053a5960aSpbrook         PageDesc *p2;
1161fd6ce8f6Sbellard         int prot;
1162fd6ce8f6Sbellard 
1163fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1164fd6ce8f6Sbellard            page fault + mprotect overhead) */
116553a5960aSpbrook         page_addr &= qemu_host_page_mask;
1166fd6ce8f6Sbellard         prot = 0;
116753a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
116853a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
116953a5960aSpbrook 
117053a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
117153a5960aSpbrook             if (!p2)
117253a5960aSpbrook                 continue;
117353a5960aSpbrook             prot |= p2->flags;
117453a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
117553a5960aSpbrook             page_get_flags(addr);
117653a5960aSpbrook           }
117753a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1178fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1179fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1180ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
118153a5960aSpbrook                page_addr);
1182fd6ce8f6Sbellard #endif
1183fd6ce8f6Sbellard     }
11849fa3e853Sbellard #else
11859fa3e853Sbellard     /* if some code is already present, then the pages are already
11869fa3e853Sbellard        protected. So we handle the case where only the first TB is
11879fa3e853Sbellard        allocated in a physical page */
11889fa3e853Sbellard     if (!last_first_tb) {
11896a00d601Sbellard         tlb_protect_code(page_addr);
11909fa3e853Sbellard     }
11919fa3e853Sbellard #endif
1192d720b93dSbellard 
1193d720b93dSbellard #endif /* TARGET_HAS_SMC */
1194fd6ce8f6Sbellard }
1195fd6ce8f6Sbellard 
1196fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
1197fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
1198c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
1199fd6ce8f6Sbellard {
1200fd6ce8f6Sbellard     TranslationBlock *tb;
1201fd6ce8f6Sbellard 
120226a5f13bSbellard     if (nb_tbs >= code_gen_max_blocks ||
120326a5f13bSbellard         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1204d4e8164fSbellard         return NULL;
1205fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
1206fd6ce8f6Sbellard     tb->pc = pc;
1207b448f2f3Sbellard     tb->cflags = 0;
1208d4e8164fSbellard     return tb;
1209d4e8164fSbellard }
1210d4e8164fSbellard 
12112e70f6efSpbrook void tb_free(TranslationBlock *tb)
12122e70f6efSpbrook {
1213bf20dc07Sths     /* In practice this is mostly used for single use temporary TB
12142e70f6efSpbrook        Ignore the hard cases and just back up if this TB happens to
12152e70f6efSpbrook        be the last one generated.  */
12162e70f6efSpbrook     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
12172e70f6efSpbrook         code_gen_ptr = tb->tc_ptr;
12182e70f6efSpbrook         nb_tbs--;
12192e70f6efSpbrook     }
12202e70f6efSpbrook }
12212e70f6efSpbrook 
12229fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
12239fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
12249fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
12259fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
1226d4e8164fSbellard {
12279fa3e853Sbellard     unsigned int h;
12289fa3e853Sbellard     TranslationBlock **ptb;
12299fa3e853Sbellard 
1230c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1231c8a706feSpbrook        before we are done.  */
1232c8a706feSpbrook     mmap_lock();
12339fa3e853Sbellard     /* add in the physical hash table */
12349fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
12359fa3e853Sbellard     ptb = &tb_phys_hash[h];
12369fa3e853Sbellard     tb->phys_hash_next = *ptb;
12379fa3e853Sbellard     *ptb = tb;
1238fd6ce8f6Sbellard 
1239fd6ce8f6Sbellard     /* add in the page list */
12409fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
12419fa3e853Sbellard     if (phys_page2 != -1)
12429fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
12439fa3e853Sbellard     else
12449fa3e853Sbellard         tb->page_addr[1] = -1;
12459fa3e853Sbellard 
1246d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1247d4e8164fSbellard     tb->jmp_next[0] = NULL;
1248d4e8164fSbellard     tb->jmp_next[1] = NULL;
1249d4e8164fSbellard 
1250d4e8164fSbellard     /* init original jump addresses */
1251d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1252d4e8164fSbellard         tb_reset_jump(tb, 0);
1253d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1254d4e8164fSbellard         tb_reset_jump(tb, 1);
12558a40a180Sbellard 
12568a40a180Sbellard #ifdef DEBUG_TB_CHECK
12578a40a180Sbellard     tb_page_check();
12588a40a180Sbellard #endif
1259c8a706feSpbrook     mmap_unlock();
1260fd6ce8f6Sbellard }
1261fd6ce8f6Sbellard 
1262a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1263a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1264a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1265a513fe19Sbellard {
1266a513fe19Sbellard     int m_min, m_max, m;
1267a513fe19Sbellard     unsigned long v;
1268a513fe19Sbellard     TranslationBlock *tb;
1269a513fe19Sbellard 
1270a513fe19Sbellard     if (nb_tbs <= 0)
1271a513fe19Sbellard         return NULL;
1272a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1273a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1274a513fe19Sbellard         return NULL;
1275a513fe19Sbellard     /* binary search (cf Knuth) */
1276a513fe19Sbellard     m_min = 0;
1277a513fe19Sbellard     m_max = nb_tbs - 1;
1278a513fe19Sbellard     while (m_min <= m_max) {
1279a513fe19Sbellard         m = (m_min + m_max) >> 1;
1280a513fe19Sbellard         tb = &tbs[m];
1281a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1282a513fe19Sbellard         if (v == tc_ptr)
1283a513fe19Sbellard             return tb;
1284a513fe19Sbellard         else if (tc_ptr < v) {
1285a513fe19Sbellard             m_max = m - 1;
1286a513fe19Sbellard         } else {
1287a513fe19Sbellard             m_min = m + 1;
1288a513fe19Sbellard         }
1289a513fe19Sbellard     }
1290a513fe19Sbellard     return &tbs[m_max];
1291a513fe19Sbellard }
12927501267eSbellard 
1293ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1294ea041c0eSbellard 
1295ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1296ea041c0eSbellard {
1297ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1298ea041c0eSbellard     unsigned int n1;
1299ea041c0eSbellard 
1300ea041c0eSbellard     tb1 = tb->jmp_next[n];
1301ea041c0eSbellard     if (tb1 != NULL) {
1302ea041c0eSbellard         /* find head of list */
1303ea041c0eSbellard         for(;;) {
1304ea041c0eSbellard             n1 = (long)tb1 & 3;
1305ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1306ea041c0eSbellard             if (n1 == 2)
1307ea041c0eSbellard                 break;
1308ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1309ea041c0eSbellard         }
1310ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1311ea041c0eSbellard         tb_next = tb1;
1312ea041c0eSbellard 
1313ea041c0eSbellard         /* remove tb from the jmp_first list */
1314ea041c0eSbellard         ptb = &tb_next->jmp_first;
1315ea041c0eSbellard         for(;;) {
1316ea041c0eSbellard             tb1 = *ptb;
1317ea041c0eSbellard             n1 = (long)tb1 & 3;
1318ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1319ea041c0eSbellard             if (n1 == n && tb1 == tb)
1320ea041c0eSbellard                 break;
1321ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1322ea041c0eSbellard         }
1323ea041c0eSbellard         *ptb = tb->jmp_next[n];
1324ea041c0eSbellard         tb->jmp_next[n] = NULL;
1325ea041c0eSbellard 
1326ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1327ea041c0eSbellard         tb_reset_jump(tb, n);
1328ea041c0eSbellard 
13290124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1330ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1331ea041c0eSbellard     }
1332ea041c0eSbellard }
1333ea041c0eSbellard 
1334ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1335ea041c0eSbellard {
1336ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1337ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1338ea041c0eSbellard }
1339ea041c0eSbellard 
13401fddef4bSbellard #if defined(TARGET_HAS_ICE)
134194df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
134294df27fdSPaul Brook static void breakpoint_invalidate(CPUState *env, target_ulong pc)
134394df27fdSPaul Brook {
134494df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
134594df27fdSPaul Brook }
134694df27fdSPaul Brook #else
1347d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1348d720b93dSbellard {
1349c227f099SAnthony Liguori     target_phys_addr_t addr;
13509b3c35e0Sj_mayer     target_ulong pd;
1351c227f099SAnthony Liguori     ram_addr_t ram_addr;
1352c2f07f81Spbrook     PhysPageDesc *p;
1353d720b93dSbellard 
1354c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1355c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1356c2f07f81Spbrook     if (!p) {
1357c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1358c2f07f81Spbrook     } else {
1359c2f07f81Spbrook         pd = p->phys_offset;
1360c2f07f81Spbrook     }
1361c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1362706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1363d720b93dSbellard }
1364c27004ecSbellard #endif
136594df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
1366d720b93dSbellard 
1367c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
1368c527ee8fSPaul Brook void cpu_watchpoint_remove_all(CPUState *env, int mask)
1369c527ee8fSPaul Brook 
1370c527ee8fSPaul Brook {
1371c527ee8fSPaul Brook }
1372c527ee8fSPaul Brook 
1373c527ee8fSPaul Brook int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1374c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
1375c527ee8fSPaul Brook {
1376c527ee8fSPaul Brook     return -ENOSYS;
1377c527ee8fSPaul Brook }
1378c527ee8fSPaul Brook #else
13796658ffb8Spbrook /* Add a watchpoint.  */
1380a1d1bb31Saliguori int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1381a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
13826658ffb8Spbrook {
1383b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1384c0ce998eSaliguori     CPUWatchpoint *wp;
13856658ffb8Spbrook 
1386b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1387b4051334Saliguori     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1388b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1389b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1390b4051334Saliguori         return -EINVAL;
1391b4051334Saliguori     }
1392a1d1bb31Saliguori     wp = qemu_malloc(sizeof(*wp));
13936658ffb8Spbrook 
1394a1d1bb31Saliguori     wp->vaddr = addr;
1395b4051334Saliguori     wp->len_mask = len_mask;
1396a1d1bb31Saliguori     wp->flags = flags;
1397a1d1bb31Saliguori 
13982dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1399c0ce998eSaliguori     if (flags & BP_GDB)
140072cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1401c0ce998eSaliguori     else
140272cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1403a1d1bb31Saliguori 
14046658ffb8Spbrook     tlb_flush_page(env, addr);
1405a1d1bb31Saliguori 
1406a1d1bb31Saliguori     if (watchpoint)
1407a1d1bb31Saliguori         *watchpoint = wp;
1408a1d1bb31Saliguori     return 0;
14096658ffb8Spbrook }
14106658ffb8Spbrook 
1411a1d1bb31Saliguori /* Remove a specific watchpoint.  */
1412a1d1bb31Saliguori int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1413a1d1bb31Saliguori                           int flags)
14146658ffb8Spbrook {
1415b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1416a1d1bb31Saliguori     CPUWatchpoint *wp;
14176658ffb8Spbrook 
141872cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1419b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
14206e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1421a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
14226658ffb8Spbrook             return 0;
14236658ffb8Spbrook         }
14246658ffb8Spbrook     }
1425a1d1bb31Saliguori     return -ENOENT;
14266658ffb8Spbrook }
14276658ffb8Spbrook 
1428a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
1429a1d1bb31Saliguori void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1430a1d1bb31Saliguori {
143172cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
14327d03f82fSedgar_igl 
1433a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1434a1d1bb31Saliguori 
1435a1d1bb31Saliguori     qemu_free(watchpoint);
14367d03f82fSedgar_igl }
14377d03f82fSedgar_igl 
1438a1d1bb31Saliguori /* Remove all matching watchpoints.  */
1439a1d1bb31Saliguori void cpu_watchpoint_remove_all(CPUState *env, int mask)
1440a1d1bb31Saliguori {
1441c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1442a1d1bb31Saliguori 
144372cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1444a1d1bb31Saliguori         if (wp->flags & mask)
1445a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1446a1d1bb31Saliguori     }
1447c0ce998eSaliguori }
1448c527ee8fSPaul Brook #endif
1449a1d1bb31Saliguori 
1450a1d1bb31Saliguori /* Add a breakpoint.  */
1451a1d1bb31Saliguori int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1452a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
14534c3a88a2Sbellard {
14541fddef4bSbellard #if defined(TARGET_HAS_ICE)
1455c0ce998eSaliguori     CPUBreakpoint *bp;
14564c3a88a2Sbellard 
1457a1d1bb31Saliguori     bp = qemu_malloc(sizeof(*bp));
14584c3a88a2Sbellard 
1459a1d1bb31Saliguori     bp->pc = pc;
1460a1d1bb31Saliguori     bp->flags = flags;
1461a1d1bb31Saliguori 
14622dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1463c0ce998eSaliguori     if (flags & BP_GDB)
146472cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1465c0ce998eSaliguori     else
146672cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1467d720b93dSbellard 
1468d720b93dSbellard     breakpoint_invalidate(env, pc);
1469a1d1bb31Saliguori 
1470a1d1bb31Saliguori     if (breakpoint)
1471a1d1bb31Saliguori         *breakpoint = bp;
14724c3a88a2Sbellard     return 0;
14734c3a88a2Sbellard #else
1474a1d1bb31Saliguori     return -ENOSYS;
14754c3a88a2Sbellard #endif
14764c3a88a2Sbellard }
14774c3a88a2Sbellard 
1478a1d1bb31Saliguori /* Remove a specific breakpoint.  */
1479a1d1bb31Saliguori int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1480a1d1bb31Saliguori {
14817d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1482a1d1bb31Saliguori     CPUBreakpoint *bp;
1483a1d1bb31Saliguori 
148472cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1485a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1486a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1487a1d1bb31Saliguori             return 0;
14887d03f82fSedgar_igl         }
1489a1d1bb31Saliguori     }
1490a1d1bb31Saliguori     return -ENOENT;
1491a1d1bb31Saliguori #else
1492a1d1bb31Saliguori     return -ENOSYS;
14937d03f82fSedgar_igl #endif
14947d03f82fSedgar_igl }
14957d03f82fSedgar_igl 
1496a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
1497a1d1bb31Saliguori void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
14984c3a88a2Sbellard {
14991fddef4bSbellard #if defined(TARGET_HAS_ICE)
150072cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1501d720b93dSbellard 
1502a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1503a1d1bb31Saliguori 
1504a1d1bb31Saliguori     qemu_free(breakpoint);
1505a1d1bb31Saliguori #endif
1506a1d1bb31Saliguori }
1507a1d1bb31Saliguori 
1508a1d1bb31Saliguori /* Remove all matching breakpoints. */
1509a1d1bb31Saliguori void cpu_breakpoint_remove_all(CPUState *env, int mask)
1510a1d1bb31Saliguori {
1511a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1512c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1513a1d1bb31Saliguori 
151472cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1515a1d1bb31Saliguori         if (bp->flags & mask)
1516a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1517c0ce998eSaliguori     }
15184c3a88a2Sbellard #endif
15194c3a88a2Sbellard }
15204c3a88a2Sbellard 
1521c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1522c33a346eSbellard    CPU loop after each instruction */
1523c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1524c33a346eSbellard {
15251fddef4bSbellard #if defined(TARGET_HAS_ICE)
1526c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1527c33a346eSbellard         env->singlestep_enabled = enabled;
1528e22a25c9Saliguori         if (kvm_enabled())
1529e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
1530e22a25c9Saliguori         else {
1531ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
15329fa3e853Sbellard             /* XXX: only flush what is necessary */
15330124311eSbellard             tb_flush(env);
1534c33a346eSbellard         }
1535e22a25c9Saliguori     }
1536c33a346eSbellard #endif
1537c33a346eSbellard }
1538c33a346eSbellard 
153934865134Sbellard /* enable or disable low levels log */
154034865134Sbellard void cpu_set_log(int log_flags)
154134865134Sbellard {
154234865134Sbellard     loglevel = log_flags;
154334865134Sbellard     if (loglevel && !logfile) {
154411fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
154534865134Sbellard         if (!logfile) {
154634865134Sbellard             perror(logfilename);
154734865134Sbellard             _exit(1);
154834865134Sbellard         }
15499fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15509fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
15519fa3e853Sbellard         {
1552b55266b5Sblueswir1             static char logfile_buf[4096];
15539fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
15549fa3e853Sbellard         }
1555bf65f53fSFilip Navara #elif !defined(_WIN32)
1556bf65f53fSFilip Navara         /* Win32 doesn't support line-buffering and requires size >= 2 */
155734865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
15589fa3e853Sbellard #endif
1559e735b91cSpbrook         log_append = 1;
1560e735b91cSpbrook     }
1561e735b91cSpbrook     if (!loglevel && logfile) {
1562e735b91cSpbrook         fclose(logfile);
1563e735b91cSpbrook         logfile = NULL;
156434865134Sbellard     }
156534865134Sbellard }
156634865134Sbellard 
156734865134Sbellard void cpu_set_log_filename(const char *filename)
156834865134Sbellard {
156934865134Sbellard     logfilename = strdup(filename);
1570e735b91cSpbrook     if (logfile) {
1571e735b91cSpbrook         fclose(logfile);
1572e735b91cSpbrook         logfile = NULL;
1573e735b91cSpbrook     }
1574e735b91cSpbrook     cpu_set_log(loglevel);
157534865134Sbellard }
1576c33a346eSbellard 
15773098dba0Saurel32 static void cpu_unlink_tb(CPUState *env)
1578ea041c0eSbellard {
1579d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1580d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1581d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1582d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
15833098dba0Saurel32     TranslationBlock *tb;
1584c227f099SAnthony Liguori     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
15853098dba0Saurel32 
1586cab1b4bdSRiku Voipio     spin_lock(&interrupt_lock);
15873098dba0Saurel32     tb = env->current_tb;
15883098dba0Saurel32     /* if the cpu is currently executing code, we must unlink it and
15893098dba0Saurel32        all the potentially executing TB */
1590f76cfe56SRiku Voipio     if (tb) {
15913098dba0Saurel32         env->current_tb = NULL;
15923098dba0Saurel32         tb_reset_jump_recursive(tb);
15933098dba0Saurel32     }
1594cab1b4bdSRiku Voipio     spin_unlock(&interrupt_lock);
15953098dba0Saurel32 }
15963098dba0Saurel32 
15973098dba0Saurel32 /* mask must never be zero, except for A20 change call */
15983098dba0Saurel32 void cpu_interrupt(CPUState *env, int mask)
15993098dba0Saurel32 {
16003098dba0Saurel32     int old_mask;
16013098dba0Saurel32 
16023098dba0Saurel32     old_mask = env->interrupt_request;
16033098dba0Saurel32     env->interrupt_request |= mask;
16043098dba0Saurel32 
16058edac960Saliguori #ifndef CONFIG_USER_ONLY
16068edac960Saliguori     /*
16078edac960Saliguori      * If called from iothread context, wake the target cpu in
16088edac960Saliguori      * case its halted.
16098edac960Saliguori      */
16108edac960Saliguori     if (!qemu_cpu_self(env)) {
16118edac960Saliguori         qemu_cpu_kick(env);
16128edac960Saliguori         return;
16138edac960Saliguori     }
16148edac960Saliguori #endif
16158edac960Saliguori 
16162e70f6efSpbrook     if (use_icount) {
1617266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
16182e70f6efSpbrook #ifndef CONFIG_USER_ONLY
16192e70f6efSpbrook         if (!can_do_io(env)
1620be214e6cSaurel32             && (mask & ~old_mask) != 0) {
16212e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
16222e70f6efSpbrook         }
16232e70f6efSpbrook #endif
16242e70f6efSpbrook     } else {
16253098dba0Saurel32         cpu_unlink_tb(env);
1626ea041c0eSbellard     }
16272e70f6efSpbrook }
1628ea041c0eSbellard 
1629b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1630b54ad049Sbellard {
1631b54ad049Sbellard     env->interrupt_request &= ~mask;
1632b54ad049Sbellard }
1633b54ad049Sbellard 
16343098dba0Saurel32 void cpu_exit(CPUState *env)
16353098dba0Saurel32 {
16363098dba0Saurel32     env->exit_request = 1;
16373098dba0Saurel32     cpu_unlink_tb(env);
16383098dba0Saurel32 }
16393098dba0Saurel32 
1640c7cd6a37Sblueswir1 const CPULogItem cpu_log_items[] = {
1641f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1642f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1643f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1644f193c797Sbellard       "show target assembly code for each compiled TB" },
1645f193c797Sbellard     { CPU_LOG_TB_OP, "op",
164657fec1feSbellard       "show micro ops for each compiled TB" },
1647f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1648e01a1157Sblueswir1       "show micro ops "
1649e01a1157Sblueswir1 #ifdef TARGET_I386
1650e01a1157Sblueswir1       "before eflags optimization and "
1651f193c797Sbellard #endif
1652e01a1157Sblueswir1       "after liveness analysis" },
1653f193c797Sbellard     { CPU_LOG_INT, "int",
1654f193c797Sbellard       "show interrupts/exceptions in short format" },
1655f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1656f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
16579fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1658e91c8a77Sths       "show CPU state before block translation" },
1659f193c797Sbellard #ifdef TARGET_I386
1660f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1661f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1662eca1bdf4Saliguori     { CPU_LOG_RESET, "cpu_reset",
1663eca1bdf4Saliguori       "show CPU state before CPU resets" },
1664f193c797Sbellard #endif
16658e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1666fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1667fd872598Sbellard       "show all i/o ports accesses" },
16688e3a9fd2Sbellard #endif
1669f193c797Sbellard     { 0, NULL, NULL },
1670f193c797Sbellard };
1671f193c797Sbellard 
1672f6f3fbcaSMichael S. Tsirkin #ifndef CONFIG_USER_ONLY
1673f6f3fbcaSMichael S. Tsirkin static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1674f6f3fbcaSMichael S. Tsirkin     = QLIST_HEAD_INITIALIZER(memory_client_list);
1675f6f3fbcaSMichael S. Tsirkin 
1676f6f3fbcaSMichael S. Tsirkin static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1677f6f3fbcaSMichael S. Tsirkin 				  ram_addr_t size,
1678f6f3fbcaSMichael S. Tsirkin 				  ram_addr_t phys_offset)
1679f6f3fbcaSMichael S. Tsirkin {
1680f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1681f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1682f6f3fbcaSMichael S. Tsirkin         client->set_memory(client, start_addr, size, phys_offset);
1683f6f3fbcaSMichael S. Tsirkin     }
1684f6f3fbcaSMichael S. Tsirkin }
1685f6f3fbcaSMichael S. Tsirkin 
1686f6f3fbcaSMichael S. Tsirkin static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1687f6f3fbcaSMichael S. Tsirkin 					target_phys_addr_t end)
1688f6f3fbcaSMichael S. Tsirkin {
1689f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1690f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1691f6f3fbcaSMichael S. Tsirkin         int r = client->sync_dirty_bitmap(client, start, end);
1692f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1693f6f3fbcaSMichael S. Tsirkin             return r;
1694f6f3fbcaSMichael S. Tsirkin     }
1695f6f3fbcaSMichael S. Tsirkin     return 0;
1696f6f3fbcaSMichael S. Tsirkin }
1697f6f3fbcaSMichael S. Tsirkin 
1698f6f3fbcaSMichael S. Tsirkin static int cpu_notify_migration_log(int enable)
1699f6f3fbcaSMichael S. Tsirkin {
1700f6f3fbcaSMichael S. Tsirkin     CPUPhysMemoryClient *client;
1701f6f3fbcaSMichael S. Tsirkin     QLIST_FOREACH(client, &memory_client_list, list) {
1702f6f3fbcaSMichael S. Tsirkin         int r = client->migration_log(client, enable);
1703f6f3fbcaSMichael S. Tsirkin         if (r < 0)
1704f6f3fbcaSMichael S. Tsirkin             return r;
1705f6f3fbcaSMichael S. Tsirkin     }
1706f6f3fbcaSMichael S. Tsirkin     return 0;
1707f6f3fbcaSMichael S. Tsirkin }
1708f6f3fbcaSMichael S. Tsirkin 
17095cd2c5b6SRichard Henderson static void phys_page_for_each_1(CPUPhysMemoryClient *client,
17105cd2c5b6SRichard Henderson                                  int level, void **lp)
1711f6f3fbcaSMichael S. Tsirkin {
17125cd2c5b6SRichard Henderson     int i;
1713f6f3fbcaSMichael S. Tsirkin 
17145cd2c5b6SRichard Henderson     if (*lp == NULL) {
17155cd2c5b6SRichard Henderson         return;
1716f6f3fbcaSMichael S. Tsirkin     }
17175cd2c5b6SRichard Henderson     if (level == 0) {
17185cd2c5b6SRichard Henderson         PhysPageDesc *pd = *lp;
17195cd2c5b6SRichard Henderson         for (i = 0; i < L2_BITS; ++i) {
17205cd2c5b6SRichard Henderson             if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
17215cd2c5b6SRichard Henderson                 client->set_memory(client, pd[i].region_offset,
17225cd2c5b6SRichard Henderson                                    TARGET_PAGE_SIZE, pd[i].phys_offset);
1723f6f3fbcaSMichael S. Tsirkin             }
17245cd2c5b6SRichard Henderson         }
17255cd2c5b6SRichard Henderson     } else {
17265cd2c5b6SRichard Henderson         void **pp = *lp;
17275cd2c5b6SRichard Henderson         for (i = 0; i < L2_BITS; ++i) {
17285cd2c5b6SRichard Henderson             phys_page_for_each_1(client, level - 1, pp + i);
1729f6f3fbcaSMichael S. Tsirkin         }
1730f6f3fbcaSMichael S. Tsirkin     }
1731f6f3fbcaSMichael S. Tsirkin }
1732f6f3fbcaSMichael S. Tsirkin 
1733f6f3fbcaSMichael S. Tsirkin static void phys_page_for_each(CPUPhysMemoryClient *client)
1734f6f3fbcaSMichael S. Tsirkin {
17355cd2c5b6SRichard Henderson     int i;
17365cd2c5b6SRichard Henderson     for (i = 0; i < P_L1_SIZE; ++i) {
17375cd2c5b6SRichard Henderson         phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
17385cd2c5b6SRichard Henderson                              l1_phys_map + 1);
1739f6f3fbcaSMichael S. Tsirkin     }
1740f6f3fbcaSMichael S. Tsirkin }
1741f6f3fbcaSMichael S. Tsirkin 
1742f6f3fbcaSMichael S. Tsirkin void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1743f6f3fbcaSMichael S. Tsirkin {
1744f6f3fbcaSMichael S. Tsirkin     QLIST_INSERT_HEAD(&memory_client_list, client, list);
1745f6f3fbcaSMichael S. Tsirkin     phys_page_for_each(client);
1746f6f3fbcaSMichael S. Tsirkin }
1747f6f3fbcaSMichael S. Tsirkin 
1748f6f3fbcaSMichael S. Tsirkin void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1749f6f3fbcaSMichael S. Tsirkin {
1750f6f3fbcaSMichael S. Tsirkin     QLIST_REMOVE(client, list);
1751f6f3fbcaSMichael S. Tsirkin }
1752f6f3fbcaSMichael S. Tsirkin #endif
1753f6f3fbcaSMichael S. Tsirkin 
1754f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1755f193c797Sbellard {
1756f193c797Sbellard     if (strlen(s2) != n)
1757f193c797Sbellard         return 0;
1758f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1759f193c797Sbellard }
1760f193c797Sbellard 
1761f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1762f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1763f193c797Sbellard {
1764c7cd6a37Sblueswir1     const CPULogItem *item;
1765f193c797Sbellard     int mask;
1766f193c797Sbellard     const char *p, *p1;
1767f193c797Sbellard 
1768f193c797Sbellard     p = str;
1769f193c797Sbellard     mask = 0;
1770f193c797Sbellard     for(;;) {
1771f193c797Sbellard         p1 = strchr(p, ',');
1772f193c797Sbellard         if (!p1)
1773f193c797Sbellard             p1 = p + strlen(p);
17748e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
17758e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
17768e3a9fd2Sbellard 			mask |= item->mask;
17778e3a9fd2Sbellard 		}
17788e3a9fd2Sbellard 	} else {
1779f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1780f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1781f193c797Sbellard                 goto found;
1782f193c797Sbellard         }
1783f193c797Sbellard         return 0;
17848e3a9fd2Sbellard 	}
1785f193c797Sbellard     found:
1786f193c797Sbellard         mask |= item->mask;
1787f193c797Sbellard         if (*p1 != ',')
1788f193c797Sbellard             break;
1789f193c797Sbellard         p = p1 + 1;
1790f193c797Sbellard     }
1791f193c797Sbellard     return mask;
1792f193c797Sbellard }
1793ea041c0eSbellard 
17947501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
17957501267eSbellard {
17967501267eSbellard     va_list ap;
1797493ae1f0Spbrook     va_list ap2;
17987501267eSbellard 
17997501267eSbellard     va_start(ap, fmt);
1800493ae1f0Spbrook     va_copy(ap2, ap);
18017501267eSbellard     fprintf(stderr, "qemu: fatal: ");
18027501267eSbellard     vfprintf(stderr, fmt, ap);
18037501267eSbellard     fprintf(stderr, "\n");
18047501267eSbellard #ifdef TARGET_I386
18057fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
18067fe48483Sbellard #else
18077fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
18087501267eSbellard #endif
180993fcfe39Saliguori     if (qemu_log_enabled()) {
181093fcfe39Saliguori         qemu_log("qemu: fatal: ");
181193fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
181293fcfe39Saliguori         qemu_log("\n");
1813f9373291Sj_mayer #ifdef TARGET_I386
181493fcfe39Saliguori         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1815f9373291Sj_mayer #else
181693fcfe39Saliguori         log_cpu_state(env, 0);
1817f9373291Sj_mayer #endif
181831b1a7b4Saliguori         qemu_log_flush();
181993fcfe39Saliguori         qemu_log_close();
1820924edcaeSbalrog     }
1821493ae1f0Spbrook     va_end(ap2);
1822f9373291Sj_mayer     va_end(ap);
1823fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
1824fd052bf6SRiku Voipio     {
1825fd052bf6SRiku Voipio         struct sigaction act;
1826fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
1827fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
1828fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
1829fd052bf6SRiku Voipio     }
1830fd052bf6SRiku Voipio #endif
18317501267eSbellard     abort();
18327501267eSbellard }
18337501267eSbellard 
1834c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1835c5be9f08Sths {
183601ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1837c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1838c5be9f08Sths     int cpu_index = new_env->cpu_index;
18395a38f081Saliguori #if defined(TARGET_HAS_ICE)
18405a38f081Saliguori     CPUBreakpoint *bp;
18415a38f081Saliguori     CPUWatchpoint *wp;
18425a38f081Saliguori #endif
18435a38f081Saliguori 
1844c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
18455a38f081Saliguori 
18465a38f081Saliguori     /* Preserve chaining and index. */
1847c5be9f08Sths     new_env->next_cpu = next_cpu;
1848c5be9f08Sths     new_env->cpu_index = cpu_index;
18495a38f081Saliguori 
18505a38f081Saliguori     /* Clone all break/watchpoints.
18515a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
18525a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
185372cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
185472cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
18555a38f081Saliguori #if defined(TARGET_HAS_ICE)
185672cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
18575a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
18585a38f081Saliguori     }
185972cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
18605a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
18615a38f081Saliguori                               wp->flags, NULL);
18625a38f081Saliguori     }
18635a38f081Saliguori #endif
18645a38f081Saliguori 
1865c5be9f08Sths     return new_env;
1866c5be9f08Sths }
1867c5be9f08Sths 
18680124311eSbellard #if !defined(CONFIG_USER_ONLY)
18690124311eSbellard 
18705c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
18715c751e99Sedgar_igl {
18725c751e99Sedgar_igl     unsigned int i;
18735c751e99Sedgar_igl 
18745c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
18755c751e99Sedgar_igl        overlap the flushed page.  */
18765c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
18775c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
18785c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
18795c751e99Sedgar_igl 
18805c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
18815c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
18825c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
18835c751e99Sedgar_igl }
18845c751e99Sedgar_igl 
188508738984SIgor Kovalenko static CPUTLBEntry s_cputlb_empty_entry = {
188608738984SIgor Kovalenko     .addr_read  = -1,
188708738984SIgor Kovalenko     .addr_write = -1,
188808738984SIgor Kovalenko     .addr_code  = -1,
188908738984SIgor Kovalenko     .addend     = -1,
189008738984SIgor Kovalenko };
189108738984SIgor Kovalenko 
1892ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1893ee8b7021Sbellard    implemented yet) */
1894ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
189533417e70Sbellard {
189633417e70Sbellard     int i;
18970124311eSbellard 
18989fa3e853Sbellard #if defined(DEBUG_TLB)
18999fa3e853Sbellard     printf("tlb_flush:\n");
19009fa3e853Sbellard #endif
19010124311eSbellard     /* must reset current TB so that interrupts cannot modify the
19020124311eSbellard        links while we are modifying them */
19030124311eSbellard     env->current_tb = NULL;
19040124311eSbellard 
190533417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
1906cfde4bd9SIsaku Yamahata         int mmu_idx;
1907cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
190808738984SIgor Kovalenko             env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1909cfde4bd9SIsaku Yamahata         }
191033417e70Sbellard     }
19119fa3e853Sbellard 
19128a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
19139fa3e853Sbellard 
1914e3db7226Sbellard     tlb_flush_count++;
191533417e70Sbellard }
191633417e70Sbellard 
1917274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
191861382a50Sbellard {
191984b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
192084b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
192184b7b8e7Sbellard         addr == (tlb_entry->addr_write &
192284b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
192384b7b8e7Sbellard         addr == (tlb_entry->addr_code &
192484b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
192508738984SIgor Kovalenko         *tlb_entry = s_cputlb_empty_entry;
192684b7b8e7Sbellard     }
192761382a50Sbellard }
192861382a50Sbellard 
19292e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
193033417e70Sbellard {
19318a40a180Sbellard     int i;
1932cfde4bd9SIsaku Yamahata     int mmu_idx;
19330124311eSbellard 
19349fa3e853Sbellard #if defined(DEBUG_TLB)
1935108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
19369fa3e853Sbellard #endif
19370124311eSbellard     /* must reset current TB so that interrupts cannot modify the
19380124311eSbellard        links while we are modifying them */
19390124311eSbellard     env->current_tb = NULL;
194033417e70Sbellard 
194161382a50Sbellard     addr &= TARGET_PAGE_MASK;
194233417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1943cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1944cfde4bd9SIsaku Yamahata         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
19450124311eSbellard 
19465c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
19479fa3e853Sbellard }
19489fa3e853Sbellard 
19499fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
19509fa3e853Sbellard    can be detected */
1951c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr)
195261382a50Sbellard {
19536a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
19546a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
19556a00d601Sbellard                                     CODE_DIRTY_FLAG);
19569fa3e853Sbellard }
19579fa3e853Sbellard 
19589fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
19593a7d929eSbellard    tested for self modifying code */
1960c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
19613a7d929eSbellard                                     target_ulong vaddr)
19629fa3e853Sbellard {
19633a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
19649fa3e853Sbellard }
19659fa3e853Sbellard 
19661ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
19671ccde1cbSbellard                                          unsigned long start, unsigned long length)
19681ccde1cbSbellard {
19691ccde1cbSbellard     unsigned long addr;
197084b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
197184b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
19721ccde1cbSbellard         if ((addr - start) < length) {
19730f459d16Spbrook             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
19741ccde1cbSbellard         }
19751ccde1cbSbellard     }
19761ccde1cbSbellard }
19771ccde1cbSbellard 
19785579c7f3Spbrook /* Note: start and end must be within the same ram block.  */
1979c227f099SAnthony Liguori void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
19800a962c02Sbellard                                      int dirty_flags)
19811ccde1cbSbellard {
19821ccde1cbSbellard     CPUState *env;
19834f2ac237Sbellard     unsigned long length, start1;
19840a962c02Sbellard     int i, mask, len;
19850a962c02Sbellard     uint8_t *p;
19861ccde1cbSbellard 
19871ccde1cbSbellard     start &= TARGET_PAGE_MASK;
19881ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
19891ccde1cbSbellard 
19901ccde1cbSbellard     length = end - start;
19911ccde1cbSbellard     if (length == 0)
19921ccde1cbSbellard         return;
19930a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
1994f23db169Sbellard     mask = ~dirty_flags;
1995f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1996f23db169Sbellard     for(i = 0; i < len; i++)
1997f23db169Sbellard         p[i] &= mask;
1998f23db169Sbellard 
19991ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
20001ccde1cbSbellard        when accessing the range */
20015579c7f3Spbrook     start1 = (unsigned long)qemu_get_ram_ptr(start);
20025579c7f3Spbrook     /* Chek that we don't span multiple blocks - this breaks the
20035579c7f3Spbrook        address comparisons below.  */
20045579c7f3Spbrook     if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
20055579c7f3Spbrook             != (end - 1) - start) {
20065579c7f3Spbrook         abort();
20075579c7f3Spbrook     }
20085579c7f3Spbrook 
20096a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
2010cfde4bd9SIsaku Yamahata         int mmu_idx;
2011cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
20121ccde1cbSbellard             for(i = 0; i < CPU_TLB_SIZE; i++)
2013cfde4bd9SIsaku Yamahata                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2014cfde4bd9SIsaku Yamahata                                       start1, length);
2015cfde4bd9SIsaku Yamahata         }
20166a00d601Sbellard     }
20171ccde1cbSbellard }
20181ccde1cbSbellard 
201974576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
202074576198Saliguori {
2021f6f3fbcaSMichael S. Tsirkin     int ret = 0;
202274576198Saliguori     in_migration = enable;
2023f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_migration_log(!!enable);
2024f6f3fbcaSMichael S. Tsirkin     return ret;
202574576198Saliguori }
202674576198Saliguori 
202774576198Saliguori int cpu_physical_memory_get_dirty_tracking(void)
202874576198Saliguori {
202974576198Saliguori     return in_migration;
203074576198Saliguori }
203174576198Saliguori 
2032c227f099SAnthony Liguori int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2033c227f099SAnthony Liguori                                    target_phys_addr_t end_addr)
20342bec46dcSaliguori {
20357b8f3b78SMichael S. Tsirkin     int ret;
2036151f7749SJan Kiszka 
2037f6f3fbcaSMichael S. Tsirkin     ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2038151f7749SJan Kiszka     return ret;
20392bec46dcSaliguori }
20402bec46dcSaliguori 
20413a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
20423a7d929eSbellard {
2043c227f099SAnthony Liguori     ram_addr_t ram_addr;
20445579c7f3Spbrook     void *p;
20453a7d929eSbellard 
204684b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
20475579c7f3Spbrook         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
20485579c7f3Spbrook             + tlb_entry->addend);
20495579c7f3Spbrook         ram_addr = qemu_ram_addr_from_host(p);
20503a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
20510f459d16Spbrook             tlb_entry->addr_write |= TLB_NOTDIRTY;
20523a7d929eSbellard         }
20533a7d929eSbellard     }
20543a7d929eSbellard }
20553a7d929eSbellard 
20563a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
20573a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
20583a7d929eSbellard {
20593a7d929eSbellard     int i;
2060cfde4bd9SIsaku Yamahata     int mmu_idx;
2061cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
20623a7d929eSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
2063cfde4bd9SIsaku Yamahata             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2064cfde4bd9SIsaku Yamahata     }
20653a7d929eSbellard }
20663a7d929eSbellard 
20670f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
20681ccde1cbSbellard {
20690f459d16Spbrook     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
20700f459d16Spbrook         tlb_entry->addr_write = vaddr;
20711ccde1cbSbellard }
20721ccde1cbSbellard 
20730f459d16Spbrook /* update the TLB corresponding to virtual page vaddr
20740f459d16Spbrook    so that it is no longer dirty */
20750f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
20761ccde1cbSbellard {
20771ccde1cbSbellard     int i;
2078cfde4bd9SIsaku Yamahata     int mmu_idx;
20791ccde1cbSbellard 
20800f459d16Spbrook     vaddr &= TARGET_PAGE_MASK;
20811ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2082cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2083cfde4bd9SIsaku Yamahata         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
20841ccde1cbSbellard }
20851ccde1cbSbellard 
208659817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
208759817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
208859817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
208959817ccbSbellard    conflicting with the host address space). */
209084b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2091c227f099SAnthony Liguori                       target_phys_addr_t paddr, int prot,
20926ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
20939fa3e853Sbellard {
209492e873b9Sbellard     PhysPageDesc *p;
20954f2ac237Sbellard     unsigned long pd;
20969fa3e853Sbellard     unsigned int index;
20974f2ac237Sbellard     target_ulong address;
20980f459d16Spbrook     target_ulong code_address;
2099c227f099SAnthony Liguori     target_phys_addr_t addend;
21009fa3e853Sbellard     int ret;
210184b7b8e7Sbellard     CPUTLBEntry *te;
2102a1d1bb31Saliguori     CPUWatchpoint *wp;
2103c227f099SAnthony Liguori     target_phys_addr_t iotlb;
21049fa3e853Sbellard 
210592e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
21069fa3e853Sbellard     if (!p) {
21079fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
21089fa3e853Sbellard     } else {
21099fa3e853Sbellard         pd = p->phys_offset;
21109fa3e853Sbellard     }
21119fa3e853Sbellard #if defined(DEBUG_TLB)
21126ebbf390Sj_mayer     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
21136ebbf390Sj_mayer            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
21149fa3e853Sbellard #endif
21159fa3e853Sbellard 
21169fa3e853Sbellard     ret = 0;
21179fa3e853Sbellard     address = vaddr;
21180f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
21190f459d16Spbrook         /* IO memory case (romd handled later) */
21200f459d16Spbrook         address |= TLB_MMIO;
21210f459d16Spbrook     }
21225579c7f3Spbrook     addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
21230f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
21240f459d16Spbrook         /* Normal RAM.  */
21250f459d16Spbrook         iotlb = pd & TARGET_PAGE_MASK;
21260f459d16Spbrook         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
21270f459d16Spbrook             iotlb |= IO_MEM_NOTDIRTY;
21280f459d16Spbrook         else
21290f459d16Spbrook             iotlb |= IO_MEM_ROM;
21300f459d16Spbrook     } else {
2131ccbb4d44SStuart Brady         /* IO handlers are currently passed a physical address.
21320f459d16Spbrook            It would be nice to pass an offset from the base address
21330f459d16Spbrook            of that region.  This would avoid having to special case RAM,
21340f459d16Spbrook            and avoid full address decoding in every device.
21350f459d16Spbrook            We can't use the high bits of pd for this because
21360f459d16Spbrook            IO_MEM_ROMD uses these as a ram address.  */
21378da3ff18Spbrook         iotlb = (pd & ~TARGET_PAGE_MASK);
21388da3ff18Spbrook         if (p) {
21398da3ff18Spbrook             iotlb += p->region_offset;
21408da3ff18Spbrook         } else {
21418da3ff18Spbrook             iotlb += paddr;
21428da3ff18Spbrook         }
21439fa3e853Sbellard     }
21449fa3e853Sbellard 
21450f459d16Spbrook     code_address = address;
21466658ffb8Spbrook     /* Make accesses to pages with watchpoints go via the
21476658ffb8Spbrook        watchpoint trap routines.  */
214872cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2149a1d1bb31Saliguori         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
21500f459d16Spbrook             iotlb = io_mem_watch + paddr;
21510f459d16Spbrook             /* TODO: The memory case can be optimized by not trapping
21520f459d16Spbrook                reads of pages with a write breakpoint.  */
21530f459d16Spbrook             address |= TLB_MMIO;
21546658ffb8Spbrook         }
21556658ffb8Spbrook     }
21566658ffb8Spbrook 
215790f18422Sbellard     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
21580f459d16Spbrook     env->iotlb[mmu_idx][index] = iotlb - vaddr;
21596ebbf390Sj_mayer     te = &env->tlb_table[mmu_idx][index];
21600f459d16Spbrook     te->addend = addend - vaddr;
216167b915a5Sbellard     if (prot & PAGE_READ) {
216284b7b8e7Sbellard         te->addr_read = address;
21639fa3e853Sbellard     } else {
216484b7b8e7Sbellard         te->addr_read = -1;
216584b7b8e7Sbellard     }
21665c751e99Sedgar_igl 
216784b7b8e7Sbellard     if (prot & PAGE_EXEC) {
21680f459d16Spbrook         te->addr_code = code_address;
216984b7b8e7Sbellard     } else {
217084b7b8e7Sbellard         te->addr_code = -1;
21719fa3e853Sbellard     }
217267b915a5Sbellard     if (prot & PAGE_WRITE) {
2173856074ecSbellard         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2174856074ecSbellard             (pd & IO_MEM_ROMD)) {
21750f459d16Spbrook             /* Write access calls the I/O callback.  */
21760f459d16Spbrook             te->addr_write = address | TLB_MMIO;
21773a7d929eSbellard         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
21781ccde1cbSbellard                    !cpu_physical_memory_is_dirty(pd)) {
21790f459d16Spbrook             te->addr_write = address | TLB_NOTDIRTY;
21809fa3e853Sbellard         } else {
218184b7b8e7Sbellard             te->addr_write = address;
21829fa3e853Sbellard         }
21839fa3e853Sbellard     } else {
218484b7b8e7Sbellard         te->addr_write = -1;
21859fa3e853Sbellard     }
21869fa3e853Sbellard     return ret;
21879fa3e853Sbellard }
21889fa3e853Sbellard 
21890124311eSbellard #else
21900124311eSbellard 
2191ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
21920124311eSbellard {
21930124311eSbellard }
21940124311eSbellard 
21952e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
21960124311eSbellard {
21970124311eSbellard }
21980124311eSbellard 
2199edf8e2afSMika Westerberg /*
2200edf8e2afSMika Westerberg  * Walks guest process memory "regions" one by one
2201edf8e2afSMika Westerberg  * and calls callback function 'fn' for each region.
2202edf8e2afSMika Westerberg  */
22035cd2c5b6SRichard Henderson 
22045cd2c5b6SRichard Henderson struct walk_memory_regions_data
220533417e70Sbellard {
22065cd2c5b6SRichard Henderson     walk_memory_regions_fn fn;
22075cd2c5b6SRichard Henderson     void *priv;
22085cd2c5b6SRichard Henderson     unsigned long start;
22095cd2c5b6SRichard Henderson     int prot;
22105cd2c5b6SRichard Henderson };
22119fa3e853Sbellard 
22125cd2c5b6SRichard Henderson static int walk_memory_regions_end(struct walk_memory_regions_data *data,
22135cd2c5b6SRichard Henderson                                    unsigned long end, int new_prot)
22145cd2c5b6SRichard Henderson {
22155cd2c5b6SRichard Henderson     if (data->start != -1ul) {
22165cd2c5b6SRichard Henderson         int rc = data->fn(data->priv, data->start, end, data->prot);
22175cd2c5b6SRichard Henderson         if (rc != 0) {
22185cd2c5b6SRichard Henderson             return rc;
22195cd2c5b6SRichard Henderson         }
22205cd2c5b6SRichard Henderson     }
2221edf8e2afSMika Westerberg 
22225cd2c5b6SRichard Henderson     data->start = (new_prot ? end : -1ul);
22235cd2c5b6SRichard Henderson     data->prot = new_prot;
22245cd2c5b6SRichard Henderson 
22255cd2c5b6SRichard Henderson     return 0;
222633417e70Sbellard }
22275cd2c5b6SRichard Henderson 
22285cd2c5b6SRichard Henderson static int walk_memory_regions_1(struct walk_memory_regions_data *data,
22295cd2c5b6SRichard Henderson                                  unsigned long base, int level, void **lp)
22305cd2c5b6SRichard Henderson {
22315cd2c5b6SRichard Henderson     unsigned long pa;
22325cd2c5b6SRichard Henderson     int i, rc;
22335cd2c5b6SRichard Henderson 
22345cd2c5b6SRichard Henderson     if (*lp == NULL) {
22355cd2c5b6SRichard Henderson         return walk_memory_regions_end(data, base, 0);
22369fa3e853Sbellard     }
22375cd2c5b6SRichard Henderson 
22385cd2c5b6SRichard Henderson     if (level == 0) {
22395cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
22405cd2c5b6SRichard Henderson         for (i = 0; i < L2_BITS; ++i) {
22415cd2c5b6SRichard Henderson             int prot = pd[i].flags;
22425cd2c5b6SRichard Henderson 
22435cd2c5b6SRichard Henderson             pa = base | (i << TARGET_PAGE_BITS);
22445cd2c5b6SRichard Henderson             if (prot != data->prot) {
22455cd2c5b6SRichard Henderson                 rc = walk_memory_regions_end(data, pa, prot);
22465cd2c5b6SRichard Henderson                 if (rc != 0) {
22475cd2c5b6SRichard Henderson                     return rc;
22489fa3e853Sbellard                 }
22499fa3e853Sbellard             }
22505cd2c5b6SRichard Henderson         }
22515cd2c5b6SRichard Henderson     } else {
22525cd2c5b6SRichard Henderson         void **pp = *lp;
22535cd2c5b6SRichard Henderson         for (i = 0; i < L2_BITS; ++i) {
22545cd2c5b6SRichard Henderson             pa = base | (i << (TARGET_PAGE_BITS + L2_BITS * level));
22555cd2c5b6SRichard Henderson             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
22565cd2c5b6SRichard Henderson             if (rc != 0) {
22575cd2c5b6SRichard Henderson                 return rc;
22585cd2c5b6SRichard Henderson             }
22595cd2c5b6SRichard Henderson         }
22605cd2c5b6SRichard Henderson     }
22615cd2c5b6SRichard Henderson 
22625cd2c5b6SRichard Henderson     return 0;
22635cd2c5b6SRichard Henderson }
22645cd2c5b6SRichard Henderson 
22655cd2c5b6SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
22665cd2c5b6SRichard Henderson {
22675cd2c5b6SRichard Henderson     struct walk_memory_regions_data data;
22685cd2c5b6SRichard Henderson     unsigned long i;
22695cd2c5b6SRichard Henderson 
22705cd2c5b6SRichard Henderson     data.fn = fn;
22715cd2c5b6SRichard Henderson     data.priv = priv;
22725cd2c5b6SRichard Henderson     data.start = -1ul;
22735cd2c5b6SRichard Henderson     data.prot = 0;
22745cd2c5b6SRichard Henderson 
22755cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
22765cd2c5b6SRichard Henderson         int rc = walk_memory_regions_1(&data, i << V_L1_SHIFT,
22775cd2c5b6SRichard Henderson                                        V_L1_SHIFT / L2_BITS - 1, l1_map + i);
22785cd2c5b6SRichard Henderson         if (rc != 0) {
22795cd2c5b6SRichard Henderson             return rc;
22805cd2c5b6SRichard Henderson         }
22815cd2c5b6SRichard Henderson     }
22825cd2c5b6SRichard Henderson 
22835cd2c5b6SRichard Henderson     return walk_memory_regions_end(&data, 0, 0);
2284edf8e2afSMika Westerberg }
2285edf8e2afSMika Westerberg 
2286edf8e2afSMika Westerberg static int dump_region(void *priv, unsigned long start,
2287edf8e2afSMika Westerberg     unsigned long end, unsigned long prot)
2288edf8e2afSMika Westerberg {
2289edf8e2afSMika Westerberg     FILE *f = (FILE *)priv;
2290edf8e2afSMika Westerberg 
2291edf8e2afSMika Westerberg     (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2292edf8e2afSMika Westerberg         start, end, end - start,
2293edf8e2afSMika Westerberg         ((prot & PAGE_READ) ? 'r' : '-'),
2294edf8e2afSMika Westerberg         ((prot & PAGE_WRITE) ? 'w' : '-'),
2295edf8e2afSMika Westerberg         ((prot & PAGE_EXEC) ? 'x' : '-'));
2296edf8e2afSMika Westerberg 
2297edf8e2afSMika Westerberg     return (0);
2298edf8e2afSMika Westerberg }
2299edf8e2afSMika Westerberg 
2300edf8e2afSMika Westerberg /* dump memory mappings */
2301edf8e2afSMika Westerberg void page_dump(FILE *f)
2302edf8e2afSMika Westerberg {
2303edf8e2afSMika Westerberg     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2304edf8e2afSMika Westerberg             "start", "end", "size", "prot");
2305edf8e2afSMika Westerberg     walk_memory_regions(f, dump_region);
23069fa3e853Sbellard }
23079fa3e853Sbellard 
230853a5960aSpbrook int page_get_flags(target_ulong address)
23099fa3e853Sbellard {
23109fa3e853Sbellard     PageDesc *p;
23119fa3e853Sbellard 
23129fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
23139fa3e853Sbellard     if (!p)
23149fa3e853Sbellard         return 0;
23159fa3e853Sbellard     return p->flags;
23169fa3e853Sbellard }
23179fa3e853Sbellard 
23189fa3e853Sbellard /* modify the flags of a page and invalidate the code if
2319ccbb4d44SStuart Brady    necessary. The flag PAGE_WRITE_ORG is positioned automatically
23209fa3e853Sbellard    depending on PAGE_WRITE */
232153a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
23229fa3e853Sbellard {
23239fa3e853Sbellard     PageDesc *p;
232453a5960aSpbrook     target_ulong addr;
23259fa3e853Sbellard 
2326c8a706feSpbrook     /* mmap_lock should already be held.  */
23279fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
23289fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
23299fa3e853Sbellard     if (flags & PAGE_WRITE)
23309fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
23319fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
23329fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
233317e2377aSpbrook         /* We may be called for host regions that are outside guest
233417e2377aSpbrook            address space.  */
233517e2377aSpbrook         if (!p)
233617e2377aSpbrook             return;
23379fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
23389fa3e853Sbellard            inside */
23399fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
23409fa3e853Sbellard             (flags & PAGE_WRITE) &&
23419fa3e853Sbellard             p->first_tb) {
2342d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
23439fa3e853Sbellard         }
23449fa3e853Sbellard         p->flags = flags;
23459fa3e853Sbellard     }
23469fa3e853Sbellard }
23479fa3e853Sbellard 
23483d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
23493d97b40bSths {
23503d97b40bSths     PageDesc *p;
23513d97b40bSths     target_ulong end;
23523d97b40bSths     target_ulong addr;
23533d97b40bSths 
235455f280c9Sbalrog     if (start + len < start)
235555f280c9Sbalrog         /* we've wrapped around */
235655f280c9Sbalrog         return -1;
235755f280c9Sbalrog 
23583d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
23593d97b40bSths     start = start & TARGET_PAGE_MASK;
23603d97b40bSths 
23613d97b40bSths     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
23623d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
23633d97b40bSths         if( !p )
23643d97b40bSths             return -1;
23653d97b40bSths         if( !(p->flags & PAGE_VALID) )
23663d97b40bSths             return -1;
23673d97b40bSths 
2368dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
23693d97b40bSths             return -1;
2370dae3270cSbellard         if (flags & PAGE_WRITE) {
2371dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
23723d97b40bSths                 return -1;
2373dae3270cSbellard             /* unprotect the page if it was put read-only because it
2374dae3270cSbellard                contains translated code */
2375dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2376dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2377dae3270cSbellard                     return -1;
2378dae3270cSbellard             }
2379dae3270cSbellard             return 0;
2380dae3270cSbellard         }
23813d97b40bSths     }
23823d97b40bSths     return 0;
23833d97b40bSths }
23843d97b40bSths 
23859fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
2386ccbb4d44SStuart Brady    page. Return TRUE if the fault was successfully handled. */
238753a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
23889fa3e853Sbellard {
23899fa3e853Sbellard     unsigned int page_index, prot, pindex;
23909fa3e853Sbellard     PageDesc *p, *p1;
239153a5960aSpbrook     target_ulong host_start, host_end, addr;
23929fa3e853Sbellard 
2393c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2394c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2395c8a706feSpbrook        practice it seems to be ok.  */
2396c8a706feSpbrook     mmap_lock();
2397c8a706feSpbrook 
239883fb7adfSbellard     host_start = address & qemu_host_page_mask;
23999fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
24009fa3e853Sbellard     p1 = page_find(page_index);
2401c8a706feSpbrook     if (!p1) {
2402c8a706feSpbrook         mmap_unlock();
24039fa3e853Sbellard         return 0;
2404c8a706feSpbrook     }
240583fb7adfSbellard     host_end = host_start + qemu_host_page_size;
24069fa3e853Sbellard     p = p1;
24079fa3e853Sbellard     prot = 0;
24089fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
24099fa3e853Sbellard         prot |= p->flags;
24109fa3e853Sbellard         p++;
24119fa3e853Sbellard     }
24129fa3e853Sbellard     /* if the page was really writable, then we change its
24139fa3e853Sbellard        protection back to writable */
24149fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
24159fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
24169fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
241753a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
24189fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
24199fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
24209fa3e853Sbellard             /* and since the content will be modified, we must invalidate
24219fa3e853Sbellard                the corresponding translated code. */
2422d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
24239fa3e853Sbellard #ifdef DEBUG_TB_CHECK
24249fa3e853Sbellard             tb_invalidate_check(address);
24259fa3e853Sbellard #endif
2426c8a706feSpbrook             mmap_unlock();
24279fa3e853Sbellard             return 1;
24289fa3e853Sbellard         }
24299fa3e853Sbellard     }
2430c8a706feSpbrook     mmap_unlock();
24319fa3e853Sbellard     return 0;
24329fa3e853Sbellard }
24339fa3e853Sbellard 
24346a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
24356a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
24361ccde1cbSbellard {
24371ccde1cbSbellard }
24389fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
243933417e70Sbellard 
2440e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
24418da3ff18Spbrook 
2442c04b2b78SPaul Brook #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2443c04b2b78SPaul Brook typedef struct subpage_t {
2444c04b2b78SPaul Brook     target_phys_addr_t base;
2445c04b2b78SPaul Brook     CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2446c04b2b78SPaul Brook     CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2447c04b2b78SPaul Brook     void *opaque[TARGET_PAGE_SIZE][2][4];
2448c04b2b78SPaul Brook     ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2449c04b2b78SPaul Brook } subpage_t;
2450c04b2b78SPaul Brook 
2451c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2452c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset);
2453c227f099SAnthony Liguori static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2454c227f099SAnthony Liguori                            ram_addr_t orig_memory, ram_addr_t region_offset);
2455db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2456db7b5426Sblueswir1                       need_subpage)                                     \
2457db7b5426Sblueswir1     do {                                                                \
2458db7b5426Sblueswir1         if (addr > start_addr)                                          \
2459db7b5426Sblueswir1             start_addr2 = 0;                                            \
2460db7b5426Sblueswir1         else {                                                          \
2461db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2462db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2463db7b5426Sblueswir1                 need_subpage = 1;                                       \
2464db7b5426Sblueswir1         }                                                               \
2465db7b5426Sblueswir1                                                                         \
246649e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2467db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2468db7b5426Sblueswir1         else {                                                          \
2469db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2470db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2471db7b5426Sblueswir1                 need_subpage = 1;                                       \
2472db7b5426Sblueswir1         }                                                               \
2473db7b5426Sblueswir1     } while (0)
2474db7b5426Sblueswir1 
24758f2498f9SMichael S. Tsirkin /* register physical memory.
24768f2498f9SMichael S. Tsirkin    For RAM, 'size' must be a multiple of the target page size.
24778f2498f9SMichael S. Tsirkin    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
24788da3ff18Spbrook    io memory page.  The address used when calling the IO function is
24798da3ff18Spbrook    the offset from the start of the region, plus region_offset.  Both
2480ccbb4d44SStuart Brady    start_addr and region_offset are rounded down to a page boundary
24818da3ff18Spbrook    before calculating this offset.  This should not be a problem unless
24828da3ff18Spbrook    the low bits of start_addr and region_offset differ.  */
2483c227f099SAnthony Liguori void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2484c227f099SAnthony Liguori                                          ram_addr_t size,
2485c227f099SAnthony Liguori                                          ram_addr_t phys_offset,
2486c227f099SAnthony Liguori                                          ram_addr_t region_offset)
248733417e70Sbellard {
2488c227f099SAnthony Liguori     target_phys_addr_t addr, end_addr;
248992e873b9Sbellard     PhysPageDesc *p;
24909d42037bSbellard     CPUState *env;
2491c227f099SAnthony Liguori     ram_addr_t orig_size = size;
2492db7b5426Sblueswir1     void *subpage;
249333417e70Sbellard 
2494f6f3fbcaSMichael S. Tsirkin     cpu_notify_set_memory(start_addr, size, phys_offset);
2495f6f3fbcaSMichael S. Tsirkin 
249667c4d23cSpbrook     if (phys_offset == IO_MEM_UNASSIGNED) {
249767c4d23cSpbrook         region_offset = start_addr;
249867c4d23cSpbrook     }
24998da3ff18Spbrook     region_offset &= TARGET_PAGE_MASK;
25005fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2501c227f099SAnthony Liguori     end_addr = start_addr + (target_phys_addr_t)size;
250249e9fba2Sblueswir1     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2503db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2504db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2505c227f099SAnthony Liguori             ram_addr_t orig_memory = p->phys_offset;
2506c227f099SAnthony Liguori             target_phys_addr_t start_addr2, end_addr2;
2507db7b5426Sblueswir1             int need_subpage = 0;
2508db7b5426Sblueswir1 
2509db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2510db7b5426Sblueswir1                           need_subpage);
25114254fab8Sblueswir1             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2512db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2513db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
25148da3ff18Spbrook                                            &p->phys_offset, orig_memory,
25158da3ff18Spbrook                                            p->region_offset);
2516db7b5426Sblueswir1                 } else {
2517db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2518db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2519db7b5426Sblueswir1                 }
25208da3ff18Spbrook                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
25218da3ff18Spbrook                                  region_offset);
25228da3ff18Spbrook                 p->region_offset = 0;
2523db7b5426Sblueswir1             } else {
2524db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2525db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2526db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2527db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2528db7b5426Sblueswir1             }
2529db7b5426Sblueswir1         } else {
2530108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
25319fa3e853Sbellard             p->phys_offset = phys_offset;
25328da3ff18Spbrook             p->region_offset = region_offset;
25332a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
25348da3ff18Spbrook                 (phys_offset & IO_MEM_ROMD)) {
253533417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
25368da3ff18Spbrook             } else {
2537c227f099SAnthony Liguori                 target_phys_addr_t start_addr2, end_addr2;
2538db7b5426Sblueswir1                 int need_subpage = 0;
2539db7b5426Sblueswir1 
2540db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2541db7b5426Sblueswir1                               end_addr2, need_subpage);
2542db7b5426Sblueswir1 
25434254fab8Sblueswir1                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2544db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
25458da3ff18Spbrook                                            &p->phys_offset, IO_MEM_UNASSIGNED,
254667c4d23cSpbrook                                            addr & TARGET_PAGE_MASK);
2547db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
25488da3ff18Spbrook                                      phys_offset, region_offset);
25498da3ff18Spbrook                     p->region_offset = 0;
2550db7b5426Sblueswir1                 }
2551db7b5426Sblueswir1             }
2552db7b5426Sblueswir1         }
25538da3ff18Spbrook         region_offset += TARGET_PAGE_SIZE;
255433417e70Sbellard     }
25559d42037bSbellard 
25569d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
25579d42037bSbellard        reset the modified entries */
25589d42037bSbellard     /* XXX: slow ! */
25599d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
25609d42037bSbellard         tlb_flush(env, 1);
25619d42037bSbellard     }
256233417e70Sbellard }
256333417e70Sbellard 
2564ba863458Sbellard /* XXX: temporary until new memory mapping API */
2565c227f099SAnthony Liguori ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2566ba863458Sbellard {
2567ba863458Sbellard     PhysPageDesc *p;
2568ba863458Sbellard 
2569ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2570ba863458Sbellard     if (!p)
2571ba863458Sbellard         return IO_MEM_UNASSIGNED;
2572ba863458Sbellard     return p->phys_offset;
2573ba863458Sbellard }
2574ba863458Sbellard 
2575c227f099SAnthony Liguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2576f65ed4c1Saliguori {
2577f65ed4c1Saliguori     if (kvm_enabled())
2578f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2579f65ed4c1Saliguori }
2580f65ed4c1Saliguori 
2581c227f099SAnthony Liguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2582f65ed4c1Saliguori {
2583f65ed4c1Saliguori     if (kvm_enabled())
2584f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2585f65ed4c1Saliguori }
2586f65ed4c1Saliguori 
258762a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
258862a2744cSSheng Yang {
258962a2744cSSheng Yang     if (kvm_enabled())
259062a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
259162a2744cSSheng Yang }
259262a2744cSSheng Yang 
2593c902760fSMarcelo Tosatti #if defined(__linux__) && !defined(TARGET_S390X)
2594c902760fSMarcelo Tosatti 
2595c902760fSMarcelo Tosatti #include <sys/vfs.h>
2596c902760fSMarcelo Tosatti 
2597c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
2598c902760fSMarcelo Tosatti 
2599c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
2600c902760fSMarcelo Tosatti {
2601c902760fSMarcelo Tosatti     struct statfs fs;
2602c902760fSMarcelo Tosatti     int ret;
2603c902760fSMarcelo Tosatti 
2604c902760fSMarcelo Tosatti     do {
2605c902760fSMarcelo Tosatti 	    ret = statfs(path, &fs);
2606c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
2607c902760fSMarcelo Tosatti 
2608c902760fSMarcelo Tosatti     if (ret != 0) {
2609c902760fSMarcelo Tosatti 	    perror("statfs");
2610c902760fSMarcelo Tosatti 	    return 0;
2611c902760fSMarcelo Tosatti     }
2612c902760fSMarcelo Tosatti 
2613c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
2614c902760fSMarcelo Tosatti 	    fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2615c902760fSMarcelo Tosatti 
2616c902760fSMarcelo Tosatti     return fs.f_bsize;
2617c902760fSMarcelo Tosatti }
2618c902760fSMarcelo Tosatti 
2619c902760fSMarcelo Tosatti static void *file_ram_alloc(ram_addr_t memory, const char *path)
2620c902760fSMarcelo Tosatti {
2621c902760fSMarcelo Tosatti     char *filename;
2622c902760fSMarcelo Tosatti     void *area;
2623c902760fSMarcelo Tosatti     int fd;
2624c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2625c902760fSMarcelo Tosatti     int flags;
2626c902760fSMarcelo Tosatti #endif
2627c902760fSMarcelo Tosatti     unsigned long hpagesize;
2628c902760fSMarcelo Tosatti 
2629c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
2630c902760fSMarcelo Tosatti     if (!hpagesize) {
2631c902760fSMarcelo Tosatti 	return NULL;
2632c902760fSMarcelo Tosatti     }
2633c902760fSMarcelo Tosatti 
2634c902760fSMarcelo Tosatti     if (memory < hpagesize) {
2635c902760fSMarcelo Tosatti         return NULL;
2636c902760fSMarcelo Tosatti     }
2637c902760fSMarcelo Tosatti 
2638c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
2639c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2640c902760fSMarcelo Tosatti         return NULL;
2641c902760fSMarcelo Tosatti     }
2642c902760fSMarcelo Tosatti 
2643c902760fSMarcelo Tosatti     if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2644c902760fSMarcelo Tosatti 	return NULL;
2645c902760fSMarcelo Tosatti     }
2646c902760fSMarcelo Tosatti 
2647c902760fSMarcelo Tosatti     fd = mkstemp(filename);
2648c902760fSMarcelo Tosatti     if (fd < 0) {
2649c902760fSMarcelo Tosatti 	perror("mkstemp");
2650c902760fSMarcelo Tosatti 	free(filename);
2651c902760fSMarcelo Tosatti 	return NULL;
2652c902760fSMarcelo Tosatti     }
2653c902760fSMarcelo Tosatti     unlink(filename);
2654c902760fSMarcelo Tosatti     free(filename);
2655c902760fSMarcelo Tosatti 
2656c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
2657c902760fSMarcelo Tosatti 
2658c902760fSMarcelo Tosatti     /*
2659c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
2660c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
2661c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
2662c902760fSMarcelo Tosatti      * mmap will fail.
2663c902760fSMarcelo Tosatti      */
2664c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
2665c902760fSMarcelo Tosatti 	perror("ftruncate");
2666c902760fSMarcelo Tosatti 
2667c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2668c902760fSMarcelo Tosatti     /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2669c902760fSMarcelo Tosatti      * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2670c902760fSMarcelo Tosatti      * to sidestep this quirk.
2671c902760fSMarcelo Tosatti      */
2672c902760fSMarcelo Tosatti     flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2673c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2674c902760fSMarcelo Tosatti #else
2675c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2676c902760fSMarcelo Tosatti #endif
2677c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
2678c902760fSMarcelo Tosatti 	perror("file_ram_alloc: can't mmap RAM pages");
2679c902760fSMarcelo Tosatti 	close(fd);
2680c902760fSMarcelo Tosatti 	return (NULL);
2681c902760fSMarcelo Tosatti     }
2682c902760fSMarcelo Tosatti     return area;
2683c902760fSMarcelo Tosatti }
2684c902760fSMarcelo Tosatti #endif
2685c902760fSMarcelo Tosatti 
2686c227f099SAnthony Liguori ram_addr_t qemu_ram_alloc(ram_addr_t size)
268794a6b54fSpbrook {
268894a6b54fSpbrook     RAMBlock *new_block;
268994a6b54fSpbrook 
269094a6b54fSpbrook     size = TARGET_PAGE_ALIGN(size);
269194a6b54fSpbrook     new_block = qemu_malloc(sizeof(*new_block));
269294a6b54fSpbrook 
2693c902760fSMarcelo Tosatti     if (mem_path) {
2694c902760fSMarcelo Tosatti #if defined (__linux__) && !defined(TARGET_S390X)
2695c902760fSMarcelo Tosatti         new_block->host = file_ram_alloc(size, mem_path);
2696c902760fSMarcelo Tosatti         if (!new_block->host)
2697c902760fSMarcelo Tosatti             exit(1);
2698c902760fSMarcelo Tosatti #else
2699c902760fSMarcelo Tosatti         fprintf(stderr, "-mem-path option unsupported\n");
2700c902760fSMarcelo Tosatti         exit(1);
2701c902760fSMarcelo Tosatti #endif
2702c902760fSMarcelo Tosatti     } else {
27036b02494dSAlexander Graf #if defined(TARGET_S390X) && defined(CONFIG_KVM)
27046b02494dSAlexander Graf         /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2705c902760fSMarcelo Tosatti         new_block->host = mmap((void*)0x1000000, size,
2706c902760fSMarcelo Tosatti                                 PROT_EXEC|PROT_READ|PROT_WRITE,
27076b02494dSAlexander Graf                                 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
27086b02494dSAlexander Graf #else
270994a6b54fSpbrook         new_block->host = qemu_vmalloc(size);
27106b02494dSAlexander Graf #endif
2711ccb167e9SIzik Eidus #ifdef MADV_MERGEABLE
2712ccb167e9SIzik Eidus         madvise(new_block->host, size, MADV_MERGEABLE);
2713ccb167e9SIzik Eidus #endif
2714c902760fSMarcelo Tosatti     }
271594a6b54fSpbrook     new_block->offset = last_ram_offset;
271694a6b54fSpbrook     new_block->length = size;
271794a6b54fSpbrook 
271894a6b54fSpbrook     new_block->next = ram_blocks;
271994a6b54fSpbrook     ram_blocks = new_block;
272094a6b54fSpbrook 
272194a6b54fSpbrook     phys_ram_dirty = qemu_realloc(phys_ram_dirty,
272294a6b54fSpbrook         (last_ram_offset + size) >> TARGET_PAGE_BITS);
272394a6b54fSpbrook     memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
272494a6b54fSpbrook            0xff, size >> TARGET_PAGE_BITS);
272594a6b54fSpbrook 
272694a6b54fSpbrook     last_ram_offset += size;
272794a6b54fSpbrook 
27286f0437e8SJan Kiszka     if (kvm_enabled())
27296f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
27306f0437e8SJan Kiszka 
273194a6b54fSpbrook     return new_block->offset;
273294a6b54fSpbrook }
2733e9a1ab19Sbellard 
2734c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
2735e9a1ab19Sbellard {
273694a6b54fSpbrook     /* TODO: implement this.  */
2737e9a1ab19Sbellard }
2738e9a1ab19Sbellard 
2739dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
27405579c7f3Spbrook    With the exception of the softmmu code in this file, this should
27415579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
27425579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
27435579c7f3Spbrook 
27445579c7f3Spbrook    It should not be used for general purpose DMA.
27455579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
27465579c7f3Spbrook  */
2747c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr)
2748dc828ca1Spbrook {
274994a6b54fSpbrook     RAMBlock *prev;
275094a6b54fSpbrook     RAMBlock **prevp;
275194a6b54fSpbrook     RAMBlock *block;
275294a6b54fSpbrook 
275394a6b54fSpbrook     prev = NULL;
275494a6b54fSpbrook     prevp = &ram_blocks;
275594a6b54fSpbrook     block = ram_blocks;
275694a6b54fSpbrook     while (block && (block->offset > addr
275794a6b54fSpbrook                      || block->offset + block->length <= addr)) {
275894a6b54fSpbrook         if (prev)
275994a6b54fSpbrook           prevp = &prev->next;
276094a6b54fSpbrook         prev = block;
276194a6b54fSpbrook         block = block->next;
276294a6b54fSpbrook     }
276394a6b54fSpbrook     if (!block) {
276494a6b54fSpbrook         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
276594a6b54fSpbrook         abort();
276694a6b54fSpbrook     }
276794a6b54fSpbrook     /* Move this entry to to start of the list.  */
276894a6b54fSpbrook     if (prev) {
276994a6b54fSpbrook         prev->next = block->next;
277094a6b54fSpbrook         block->next = *prevp;
277194a6b54fSpbrook         *prevp = block;
277294a6b54fSpbrook     }
277394a6b54fSpbrook     return block->host + (addr - block->offset);
2774dc828ca1Spbrook }
2775dc828ca1Spbrook 
27765579c7f3Spbrook /* Some of the softmmu routines need to translate from a host pointer
27775579c7f3Spbrook    (typically a TLB entry) back to a ram offset.  */
2778c227f099SAnthony Liguori ram_addr_t qemu_ram_addr_from_host(void *ptr)
27795579c7f3Spbrook {
278094a6b54fSpbrook     RAMBlock *prev;
278194a6b54fSpbrook     RAMBlock *block;
278294a6b54fSpbrook     uint8_t *host = ptr;
278394a6b54fSpbrook 
278494a6b54fSpbrook     prev = NULL;
278594a6b54fSpbrook     block = ram_blocks;
278694a6b54fSpbrook     while (block && (block->host > host
278794a6b54fSpbrook                      || block->host + block->length <= host)) {
278894a6b54fSpbrook         prev = block;
278994a6b54fSpbrook         block = block->next;
279094a6b54fSpbrook     }
279194a6b54fSpbrook     if (!block) {
279294a6b54fSpbrook         fprintf(stderr, "Bad ram pointer %p\n", ptr);
279394a6b54fSpbrook         abort();
279494a6b54fSpbrook     }
279594a6b54fSpbrook     return block->offset + (host - block->host);
27965579c7f3Spbrook }
27975579c7f3Spbrook 
2798c227f099SAnthony Liguori static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
279933417e70Sbellard {
280067d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2801ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
280267d3b957Spbrook #endif
2803faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2804e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 1);
2805e18231a3Sblueswir1 #endif
2806e18231a3Sblueswir1     return 0;
2807e18231a3Sblueswir1 }
2808e18231a3Sblueswir1 
2809c227f099SAnthony Liguori static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2810e18231a3Sblueswir1 {
2811e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2812e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2813e18231a3Sblueswir1 #endif
2814faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2815e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 2);
2816e18231a3Sblueswir1 #endif
2817e18231a3Sblueswir1     return 0;
2818e18231a3Sblueswir1 }
2819e18231a3Sblueswir1 
2820c227f099SAnthony Liguori static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2821e18231a3Sblueswir1 {
2822e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2823e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2824e18231a3Sblueswir1 #endif
2825faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2826e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 4);
2827b4f0a316Sblueswir1 #endif
282833417e70Sbellard     return 0;
282933417e70Sbellard }
283033417e70Sbellard 
2831c227f099SAnthony Liguori static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
283233417e70Sbellard {
283367d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2834ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
283567d3b957Spbrook #endif
2836faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2837e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 1);
2838e18231a3Sblueswir1 #endif
2839e18231a3Sblueswir1 }
2840e18231a3Sblueswir1 
2841c227f099SAnthony Liguori static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2842e18231a3Sblueswir1 {
2843e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2844e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2845e18231a3Sblueswir1 #endif
2846faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2847e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 2);
2848e18231a3Sblueswir1 #endif
2849e18231a3Sblueswir1 }
2850e18231a3Sblueswir1 
2851c227f099SAnthony Liguori static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2852e18231a3Sblueswir1 {
2853e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2854e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2855e18231a3Sblueswir1 #endif
2856faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2857e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 4);
2858b4f0a316Sblueswir1 #endif
285933417e70Sbellard }
286033417e70Sbellard 
2861d60efc6bSBlue Swirl static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
286233417e70Sbellard     unassigned_mem_readb,
2863e18231a3Sblueswir1     unassigned_mem_readw,
2864e18231a3Sblueswir1     unassigned_mem_readl,
286533417e70Sbellard };
286633417e70Sbellard 
2867d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
286833417e70Sbellard     unassigned_mem_writeb,
2869e18231a3Sblueswir1     unassigned_mem_writew,
2870e18231a3Sblueswir1     unassigned_mem_writel,
287133417e70Sbellard };
287233417e70Sbellard 
2873c227f099SAnthony Liguori static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
28740f459d16Spbrook                                 uint32_t val)
28751ccde1cbSbellard {
28763a7d929eSbellard     int dirty_flags;
28773a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
28783a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
28793a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
28803a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
28813a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
28823a7d929eSbellard #endif
28833a7d929eSbellard     }
28845579c7f3Spbrook     stb_p(qemu_get_ram_ptr(ram_addr), val);
2885f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2886f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2887f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2888f23db169Sbellard        flushed */
2889f23db169Sbellard     if (dirty_flags == 0xff)
28902e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
28911ccde1cbSbellard }
28921ccde1cbSbellard 
2893c227f099SAnthony Liguori static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
28940f459d16Spbrook                                 uint32_t val)
28951ccde1cbSbellard {
28963a7d929eSbellard     int dirty_flags;
28973a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
28983a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
28993a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
29003a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
29013a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
29023a7d929eSbellard #endif
29033a7d929eSbellard     }
29045579c7f3Spbrook     stw_p(qemu_get_ram_ptr(ram_addr), val);
2905f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2906f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2907f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2908f23db169Sbellard        flushed */
2909f23db169Sbellard     if (dirty_flags == 0xff)
29102e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
29111ccde1cbSbellard }
29121ccde1cbSbellard 
2913c227f099SAnthony Liguori static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
29140f459d16Spbrook                                 uint32_t val)
29151ccde1cbSbellard {
29163a7d929eSbellard     int dirty_flags;
29173a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
29183a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
29193a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
29203a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
29213a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
29223a7d929eSbellard #endif
29233a7d929eSbellard     }
29245579c7f3Spbrook     stl_p(qemu_get_ram_ptr(ram_addr), val);
2925f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2926f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2927f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2928f23db169Sbellard        flushed */
2929f23db169Sbellard     if (dirty_flags == 0xff)
29302e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
29311ccde1cbSbellard }
29321ccde1cbSbellard 
2933d60efc6bSBlue Swirl static CPUReadMemoryFunc * const error_mem_read[3] = {
29343a7d929eSbellard     NULL, /* never used */
29353a7d929eSbellard     NULL, /* never used */
29363a7d929eSbellard     NULL, /* never used */
29373a7d929eSbellard };
29383a7d929eSbellard 
2939d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
29401ccde1cbSbellard     notdirty_mem_writeb,
29411ccde1cbSbellard     notdirty_mem_writew,
29421ccde1cbSbellard     notdirty_mem_writel,
29431ccde1cbSbellard };
29441ccde1cbSbellard 
29450f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
2946b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
29470f459d16Spbrook {
29480f459d16Spbrook     CPUState *env = cpu_single_env;
294906d55cc1Saliguori     target_ulong pc, cs_base;
295006d55cc1Saliguori     TranslationBlock *tb;
29510f459d16Spbrook     target_ulong vaddr;
2952a1d1bb31Saliguori     CPUWatchpoint *wp;
295306d55cc1Saliguori     int cpu_flags;
29540f459d16Spbrook 
295506d55cc1Saliguori     if (env->watchpoint_hit) {
295606d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
295706d55cc1Saliguori          * the debug interrupt so that is will trigger after the
295806d55cc1Saliguori          * current instruction. */
295906d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
296006d55cc1Saliguori         return;
296106d55cc1Saliguori     }
29622e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
296372cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2964b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
2965b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
29666e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
29676e140f28Saliguori             if (!env->watchpoint_hit) {
2968a1d1bb31Saliguori                 env->watchpoint_hit = wp;
296906d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
297006d55cc1Saliguori                 if (!tb) {
29716e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
29726e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
297306d55cc1Saliguori                 }
297406d55cc1Saliguori                 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
297506d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
297606d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
297706d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
297806d55cc1Saliguori                 } else {
297906d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
298006d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
298106d55cc1Saliguori                 }
298206d55cc1Saliguori                 cpu_resume_from_signal(env, NULL);
29830f459d16Spbrook             }
29846e140f28Saliguori         } else {
29856e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
29866e140f28Saliguori         }
29870f459d16Spbrook     }
29880f459d16Spbrook }
29890f459d16Spbrook 
29906658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
29916658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
29926658ffb8Spbrook    phys routines.  */
2993c227f099SAnthony Liguori static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
29946658ffb8Spbrook {
2995b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
29966658ffb8Spbrook     return ldub_phys(addr);
29976658ffb8Spbrook }
29986658ffb8Spbrook 
2999c227f099SAnthony Liguori static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
30006658ffb8Spbrook {
3001b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
30026658ffb8Spbrook     return lduw_phys(addr);
30036658ffb8Spbrook }
30046658ffb8Spbrook 
3005c227f099SAnthony Liguori static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
30066658ffb8Spbrook {
3007b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
30086658ffb8Spbrook     return ldl_phys(addr);
30096658ffb8Spbrook }
30106658ffb8Spbrook 
3011c227f099SAnthony Liguori static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
30126658ffb8Spbrook                              uint32_t val)
30136658ffb8Spbrook {
3014b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
30156658ffb8Spbrook     stb_phys(addr, val);
30166658ffb8Spbrook }
30176658ffb8Spbrook 
3018c227f099SAnthony Liguori static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
30196658ffb8Spbrook                              uint32_t val)
30206658ffb8Spbrook {
3021b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
30226658ffb8Spbrook     stw_phys(addr, val);
30236658ffb8Spbrook }
30246658ffb8Spbrook 
3025c227f099SAnthony Liguori static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
30266658ffb8Spbrook                              uint32_t val)
30276658ffb8Spbrook {
3028b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
30296658ffb8Spbrook     stl_phys(addr, val);
30306658ffb8Spbrook }
30316658ffb8Spbrook 
3032d60efc6bSBlue Swirl static CPUReadMemoryFunc * const watch_mem_read[3] = {
30336658ffb8Spbrook     watch_mem_readb,
30346658ffb8Spbrook     watch_mem_readw,
30356658ffb8Spbrook     watch_mem_readl,
30366658ffb8Spbrook };
30376658ffb8Spbrook 
3038d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const watch_mem_write[3] = {
30396658ffb8Spbrook     watch_mem_writeb,
30406658ffb8Spbrook     watch_mem_writew,
30416658ffb8Spbrook     watch_mem_writel,
30426658ffb8Spbrook };
30436658ffb8Spbrook 
3044c227f099SAnthony Liguori static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3045db7b5426Sblueswir1                                  unsigned int len)
3046db7b5426Sblueswir1 {
3047db7b5426Sblueswir1     uint32_t ret;
3048db7b5426Sblueswir1     unsigned int idx;
3049db7b5426Sblueswir1 
30508da3ff18Spbrook     idx = SUBPAGE_IDX(addr);
3051db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3052db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3053db7b5426Sblueswir1            mmio, len, addr, idx);
3054db7b5426Sblueswir1 #endif
30558da3ff18Spbrook     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
30568da3ff18Spbrook                                        addr + mmio->region_offset[idx][0][len]);
3057db7b5426Sblueswir1 
3058db7b5426Sblueswir1     return ret;
3059db7b5426Sblueswir1 }
3060db7b5426Sblueswir1 
3061c227f099SAnthony Liguori static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3062db7b5426Sblueswir1                               uint32_t value, unsigned int len)
3063db7b5426Sblueswir1 {
3064db7b5426Sblueswir1     unsigned int idx;
3065db7b5426Sblueswir1 
30668da3ff18Spbrook     idx = SUBPAGE_IDX(addr);
3067db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3068db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3069db7b5426Sblueswir1            mmio, len, addr, idx, value);
3070db7b5426Sblueswir1 #endif
30718da3ff18Spbrook     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
30728da3ff18Spbrook                                   addr + mmio->region_offset[idx][1][len],
30738da3ff18Spbrook                                   value);
3074db7b5426Sblueswir1 }
3075db7b5426Sblueswir1 
3076c227f099SAnthony Liguori static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3077db7b5426Sblueswir1 {
3078db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3079db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3080db7b5426Sblueswir1 #endif
3081db7b5426Sblueswir1 
3082db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
3083db7b5426Sblueswir1 }
3084db7b5426Sblueswir1 
3085c227f099SAnthony Liguori static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3086db7b5426Sblueswir1                             uint32_t value)
3087db7b5426Sblueswir1 {
3088db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3089db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3090db7b5426Sblueswir1 #endif
3091db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
3092db7b5426Sblueswir1 }
3093db7b5426Sblueswir1 
3094c227f099SAnthony Liguori static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3095db7b5426Sblueswir1 {
3096db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3097db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3098db7b5426Sblueswir1 #endif
3099db7b5426Sblueswir1 
3100db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
3101db7b5426Sblueswir1 }
3102db7b5426Sblueswir1 
3103c227f099SAnthony Liguori static void subpage_writew (void *opaque, target_phys_addr_t addr,
3104db7b5426Sblueswir1                             uint32_t value)
3105db7b5426Sblueswir1 {
3106db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3107db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3108db7b5426Sblueswir1 #endif
3109db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
3110db7b5426Sblueswir1 }
3111db7b5426Sblueswir1 
3112c227f099SAnthony Liguori static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3113db7b5426Sblueswir1 {
3114db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3115db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3116db7b5426Sblueswir1 #endif
3117db7b5426Sblueswir1 
3118db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
3119db7b5426Sblueswir1 }
3120db7b5426Sblueswir1 
3121db7b5426Sblueswir1 static void subpage_writel (void *opaque,
3122c227f099SAnthony Liguori                          target_phys_addr_t addr, uint32_t value)
3123db7b5426Sblueswir1 {
3124db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3125db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3126db7b5426Sblueswir1 #endif
3127db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
3128db7b5426Sblueswir1 }
3129db7b5426Sblueswir1 
3130d60efc6bSBlue Swirl static CPUReadMemoryFunc * const subpage_read[] = {
3131db7b5426Sblueswir1     &subpage_readb,
3132db7b5426Sblueswir1     &subpage_readw,
3133db7b5426Sblueswir1     &subpage_readl,
3134db7b5426Sblueswir1 };
3135db7b5426Sblueswir1 
3136d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const subpage_write[] = {
3137db7b5426Sblueswir1     &subpage_writeb,
3138db7b5426Sblueswir1     &subpage_writew,
3139db7b5426Sblueswir1     &subpage_writel,
3140db7b5426Sblueswir1 };
3141db7b5426Sblueswir1 
3142c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3143c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset)
3144db7b5426Sblueswir1 {
3145db7b5426Sblueswir1     int idx, eidx;
31464254fab8Sblueswir1     unsigned int i;
3147db7b5426Sblueswir1 
3148db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3149db7b5426Sblueswir1         return -1;
3150db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
3151db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
3152db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
31530bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3154db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
3155db7b5426Sblueswir1 #endif
3156db7b5426Sblueswir1     memory >>= IO_MEM_SHIFT;
3157db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
31584254fab8Sblueswir1         for (i = 0; i < 4; i++) {
31593ee89922Sblueswir1             if (io_mem_read[memory][i]) {
31603ee89922Sblueswir1                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
31613ee89922Sblueswir1                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
31628da3ff18Spbrook                 mmio->region_offset[idx][0][i] = region_offset;
31634254fab8Sblueswir1             }
31643ee89922Sblueswir1             if (io_mem_write[memory][i]) {
31653ee89922Sblueswir1                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
31663ee89922Sblueswir1                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
31678da3ff18Spbrook                 mmio->region_offset[idx][1][i] = region_offset;
31683ee89922Sblueswir1             }
31693ee89922Sblueswir1         }
3170db7b5426Sblueswir1     }
3171db7b5426Sblueswir1 
3172db7b5426Sblueswir1     return 0;
3173db7b5426Sblueswir1 }
3174db7b5426Sblueswir1 
3175c227f099SAnthony Liguori static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3176c227f099SAnthony Liguori                            ram_addr_t orig_memory, ram_addr_t region_offset)
3177db7b5426Sblueswir1 {
3178c227f099SAnthony Liguori     subpage_t *mmio;
3179db7b5426Sblueswir1     int subpage_memory;
3180db7b5426Sblueswir1 
3181c227f099SAnthony Liguori     mmio = qemu_mallocz(sizeof(subpage_t));
31821eec614bSaliguori 
3183db7b5426Sblueswir1     mmio->base = base;
31841eed09cbSAvi Kivity     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3185db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3186db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3187db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3188db7b5426Sblueswir1 #endif
3189db7b5426Sblueswir1     *phys = subpage_memory | IO_MEM_SUBPAGE;
31908da3ff18Spbrook     subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
31918da3ff18Spbrook                          region_offset);
3192db7b5426Sblueswir1 
3193db7b5426Sblueswir1     return mmio;
3194db7b5426Sblueswir1 }
3195db7b5426Sblueswir1 
319688715657Saliguori static int get_free_io_mem_idx(void)
319788715657Saliguori {
319888715657Saliguori     int i;
319988715657Saliguori 
320088715657Saliguori     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
320188715657Saliguori         if (!io_mem_used[i]) {
320288715657Saliguori             io_mem_used[i] = 1;
320388715657Saliguori             return i;
320488715657Saliguori         }
3205c6703b47SRiku Voipio     fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
320688715657Saliguori     return -1;
320788715657Saliguori }
320888715657Saliguori 
320933417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
321033417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
32110b4e6e3eSPaul Brook    2). Functions can be omitted with a NULL function pointer.
32123ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
32134254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
32144254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
32154254fab8Sblueswir1    returned if error. */
32161eed09cbSAvi Kivity static int cpu_register_io_memory_fixed(int io_index,
3217d60efc6bSBlue Swirl                                         CPUReadMemoryFunc * const *mem_read,
3218d60efc6bSBlue Swirl                                         CPUWriteMemoryFunc * const *mem_write,
3219a4193c8aSbellard                                         void *opaque)
322033417e70Sbellard {
32214254fab8Sblueswir1     int i, subwidth = 0;
322233417e70Sbellard 
322333417e70Sbellard     if (io_index <= 0) {
322488715657Saliguori         io_index = get_free_io_mem_idx();
322588715657Saliguori         if (io_index == -1)
322688715657Saliguori             return io_index;
322733417e70Sbellard     } else {
32281eed09cbSAvi Kivity         io_index >>= IO_MEM_SHIFT;
322933417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
323033417e70Sbellard             return -1;
323133417e70Sbellard     }
323233417e70Sbellard 
323333417e70Sbellard     for(i = 0;i < 3; i++) {
32344254fab8Sblueswir1         if (!mem_read[i] || !mem_write[i])
32354254fab8Sblueswir1             subwidth = IO_MEM_SUBWIDTH;
323633417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
323733417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
323833417e70Sbellard     }
3239a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
32404254fab8Sblueswir1     return (io_index << IO_MEM_SHIFT) | subwidth;
324133417e70Sbellard }
324261382a50Sbellard 
3243d60efc6bSBlue Swirl int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3244d60efc6bSBlue Swirl                            CPUWriteMemoryFunc * const *mem_write,
32451eed09cbSAvi Kivity                            void *opaque)
32461eed09cbSAvi Kivity {
32471eed09cbSAvi Kivity     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
32481eed09cbSAvi Kivity }
32491eed09cbSAvi Kivity 
325088715657Saliguori void cpu_unregister_io_memory(int io_table_address)
325188715657Saliguori {
325288715657Saliguori     int i;
325388715657Saliguori     int io_index = io_table_address >> IO_MEM_SHIFT;
325488715657Saliguori 
325588715657Saliguori     for (i=0;i < 3; i++) {
325688715657Saliguori         io_mem_read[io_index][i] = unassigned_mem_read[i];
325788715657Saliguori         io_mem_write[io_index][i] = unassigned_mem_write[i];
325888715657Saliguori     }
325988715657Saliguori     io_mem_opaque[io_index] = NULL;
326088715657Saliguori     io_mem_used[io_index] = 0;
326188715657Saliguori }
326288715657Saliguori 
3263e9179ce1SAvi Kivity static void io_mem_init(void)
3264e9179ce1SAvi Kivity {
3265e9179ce1SAvi Kivity     int i;
3266e9179ce1SAvi Kivity 
3267e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3268e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3269e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3270e9179ce1SAvi Kivity     for (i=0; i<5; i++)
3271e9179ce1SAvi Kivity         io_mem_used[i] = 1;
3272e9179ce1SAvi Kivity 
3273e9179ce1SAvi Kivity     io_mem_watch = cpu_register_io_memory(watch_mem_read,
3274e9179ce1SAvi Kivity                                           watch_mem_write, NULL);
3275e9179ce1SAvi Kivity }
3276e9179ce1SAvi Kivity 
3277e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
3278e2eef170Spbrook 
327913eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
328013eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
3281a68fe89cSPaul Brook int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3282a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
328313eb76e0Sbellard {
328413eb76e0Sbellard     int l, flags;
328513eb76e0Sbellard     target_ulong page;
328653a5960aSpbrook     void * p;
328713eb76e0Sbellard 
328813eb76e0Sbellard     while (len > 0) {
328913eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
329013eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
329113eb76e0Sbellard         if (l > len)
329213eb76e0Sbellard             l = len;
329313eb76e0Sbellard         flags = page_get_flags(page);
329413eb76e0Sbellard         if (!(flags & PAGE_VALID))
3295a68fe89cSPaul Brook             return -1;
329613eb76e0Sbellard         if (is_write) {
329713eb76e0Sbellard             if (!(flags & PAGE_WRITE))
3298a68fe89cSPaul Brook                 return -1;
3299579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
330072fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3301a68fe89cSPaul Brook                 return -1;
330272fb7daaSaurel32             memcpy(p, buf, l);
330372fb7daaSaurel32             unlock_user(p, addr, l);
330413eb76e0Sbellard         } else {
330513eb76e0Sbellard             if (!(flags & PAGE_READ))
3306a68fe89cSPaul Brook                 return -1;
3307579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
330872fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3309a68fe89cSPaul Brook                 return -1;
331072fb7daaSaurel32             memcpy(buf, p, l);
33115b257578Saurel32             unlock_user(p, addr, 0);
331213eb76e0Sbellard         }
331313eb76e0Sbellard         len -= l;
331413eb76e0Sbellard         buf += l;
331513eb76e0Sbellard         addr += l;
331613eb76e0Sbellard     }
3317a68fe89cSPaul Brook     return 0;
331813eb76e0Sbellard }
33198df1cd07Sbellard 
332013eb76e0Sbellard #else
3321c227f099SAnthony Liguori void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
332213eb76e0Sbellard                             int len, int is_write)
332313eb76e0Sbellard {
332413eb76e0Sbellard     int l, io_index;
332513eb76e0Sbellard     uint8_t *ptr;
332613eb76e0Sbellard     uint32_t val;
3327c227f099SAnthony Liguori     target_phys_addr_t page;
33282e12669aSbellard     unsigned long pd;
332992e873b9Sbellard     PhysPageDesc *p;
333013eb76e0Sbellard 
333113eb76e0Sbellard     while (len > 0) {
333213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
333313eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
333413eb76e0Sbellard         if (l > len)
333513eb76e0Sbellard             l = len;
333692e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
333713eb76e0Sbellard         if (!p) {
333813eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
333913eb76e0Sbellard         } else {
334013eb76e0Sbellard             pd = p->phys_offset;
334113eb76e0Sbellard         }
334213eb76e0Sbellard 
334313eb76e0Sbellard         if (is_write) {
33443a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3345c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
334613eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33478da3ff18Spbrook                 if (p)
33486c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
33496a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
33506a00d601Sbellard                    potential bugs */
33516c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
33521c213d19Sbellard                     /* 32 bit write access */
3353c27004ecSbellard                     val = ldl_p(buf);
33546c2934dbSaurel32                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
335513eb76e0Sbellard                     l = 4;
33566c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
33571c213d19Sbellard                     /* 16 bit write access */
3358c27004ecSbellard                     val = lduw_p(buf);
33596c2934dbSaurel32                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
336013eb76e0Sbellard                     l = 2;
336113eb76e0Sbellard                 } else {
33621c213d19Sbellard                     /* 8 bit write access */
3363c27004ecSbellard                     val = ldub_p(buf);
33646c2934dbSaurel32                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
336513eb76e0Sbellard                     l = 1;
336613eb76e0Sbellard                 }
336713eb76e0Sbellard             } else {
3368b448f2f3Sbellard                 unsigned long addr1;
3369b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
337013eb76e0Sbellard                 /* RAM case */
33715579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
337213eb76e0Sbellard                 memcpy(ptr, buf, l);
33733a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
3374b448f2f3Sbellard                     /* invalidate code */
3375b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3376b448f2f3Sbellard                     /* set dirty bit */
3377f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3378f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
337913eb76e0Sbellard                 }
33803a7d929eSbellard             }
338113eb76e0Sbellard         } else {
33822a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
33832a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
3384c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
338513eb76e0Sbellard                 /* I/O case */
338613eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33878da3ff18Spbrook                 if (p)
33886c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
33896c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
339013eb76e0Sbellard                     /* 32 bit read access */
33916c2934dbSaurel32                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3392c27004ecSbellard                     stl_p(buf, val);
339313eb76e0Sbellard                     l = 4;
33946c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
339513eb76e0Sbellard                     /* 16 bit read access */
33966c2934dbSaurel32                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3397c27004ecSbellard                     stw_p(buf, val);
339813eb76e0Sbellard                     l = 2;
339913eb76e0Sbellard                 } else {
34001c213d19Sbellard                     /* 8 bit read access */
34016c2934dbSaurel32                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3402c27004ecSbellard                     stb_p(buf, val);
340313eb76e0Sbellard                     l = 1;
340413eb76e0Sbellard                 }
340513eb76e0Sbellard             } else {
340613eb76e0Sbellard                 /* RAM case */
34075579c7f3Spbrook                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
340813eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
340913eb76e0Sbellard                 memcpy(buf, ptr, l);
341013eb76e0Sbellard             }
341113eb76e0Sbellard         }
341213eb76e0Sbellard         len -= l;
341313eb76e0Sbellard         buf += l;
341413eb76e0Sbellard         addr += l;
341513eb76e0Sbellard     }
341613eb76e0Sbellard }
34178df1cd07Sbellard 
3418d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3419c227f099SAnthony Liguori void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3420d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3421d0ecd2aaSbellard {
3422d0ecd2aaSbellard     int l;
3423d0ecd2aaSbellard     uint8_t *ptr;
3424c227f099SAnthony Liguori     target_phys_addr_t page;
3425d0ecd2aaSbellard     unsigned long pd;
3426d0ecd2aaSbellard     PhysPageDesc *p;
3427d0ecd2aaSbellard 
3428d0ecd2aaSbellard     while (len > 0) {
3429d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3430d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3431d0ecd2aaSbellard         if (l > len)
3432d0ecd2aaSbellard             l = len;
3433d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
3434d0ecd2aaSbellard         if (!p) {
3435d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
3436d0ecd2aaSbellard         } else {
3437d0ecd2aaSbellard             pd = p->phys_offset;
3438d0ecd2aaSbellard         }
3439d0ecd2aaSbellard 
3440d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
34412a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
34422a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
3443d0ecd2aaSbellard             /* do nothing */
3444d0ecd2aaSbellard         } else {
3445d0ecd2aaSbellard             unsigned long addr1;
3446d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3447d0ecd2aaSbellard             /* ROM/RAM case */
34485579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
3449d0ecd2aaSbellard             memcpy(ptr, buf, l);
3450d0ecd2aaSbellard         }
3451d0ecd2aaSbellard         len -= l;
3452d0ecd2aaSbellard         buf += l;
3453d0ecd2aaSbellard         addr += l;
3454d0ecd2aaSbellard     }
3455d0ecd2aaSbellard }
3456d0ecd2aaSbellard 
34576d16c2f8Saliguori typedef struct {
34586d16c2f8Saliguori     void *buffer;
3459c227f099SAnthony Liguori     target_phys_addr_t addr;
3460c227f099SAnthony Liguori     target_phys_addr_t len;
34616d16c2f8Saliguori } BounceBuffer;
34626d16c2f8Saliguori 
34636d16c2f8Saliguori static BounceBuffer bounce;
34646d16c2f8Saliguori 
3465ba223c29Saliguori typedef struct MapClient {
3466ba223c29Saliguori     void *opaque;
3467ba223c29Saliguori     void (*callback)(void *opaque);
346872cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
3469ba223c29Saliguori } MapClient;
3470ba223c29Saliguori 
347172cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
347272cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
3473ba223c29Saliguori 
3474ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3475ba223c29Saliguori {
3476ba223c29Saliguori     MapClient *client = qemu_malloc(sizeof(*client));
3477ba223c29Saliguori 
3478ba223c29Saliguori     client->opaque = opaque;
3479ba223c29Saliguori     client->callback = callback;
348072cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
3481ba223c29Saliguori     return client;
3482ba223c29Saliguori }
3483ba223c29Saliguori 
3484ba223c29Saliguori void cpu_unregister_map_client(void *_client)
3485ba223c29Saliguori {
3486ba223c29Saliguori     MapClient *client = (MapClient *)_client;
3487ba223c29Saliguori 
348872cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
348934d5e948SIsaku Yamahata     qemu_free(client);
3490ba223c29Saliguori }
3491ba223c29Saliguori 
3492ba223c29Saliguori static void cpu_notify_map_clients(void)
3493ba223c29Saliguori {
3494ba223c29Saliguori     MapClient *client;
3495ba223c29Saliguori 
349672cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
349772cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
3498ba223c29Saliguori         client->callback(client->opaque);
349934d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
3500ba223c29Saliguori     }
3501ba223c29Saliguori }
3502ba223c29Saliguori 
35036d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
35046d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
35056d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
35066d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
3507ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
3508ba223c29Saliguori  * likely to succeed.
35096d16c2f8Saliguori  */
3510c227f099SAnthony Liguori void *cpu_physical_memory_map(target_phys_addr_t addr,
3511c227f099SAnthony Liguori                               target_phys_addr_t *plen,
35126d16c2f8Saliguori                               int is_write)
35136d16c2f8Saliguori {
3514c227f099SAnthony Liguori     target_phys_addr_t len = *plen;
3515c227f099SAnthony Liguori     target_phys_addr_t done = 0;
35166d16c2f8Saliguori     int l;
35176d16c2f8Saliguori     uint8_t *ret = NULL;
35186d16c2f8Saliguori     uint8_t *ptr;
3519c227f099SAnthony Liguori     target_phys_addr_t page;
35206d16c2f8Saliguori     unsigned long pd;
35216d16c2f8Saliguori     PhysPageDesc *p;
35226d16c2f8Saliguori     unsigned long addr1;
35236d16c2f8Saliguori 
35246d16c2f8Saliguori     while (len > 0) {
35256d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
35266d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
35276d16c2f8Saliguori         if (l > len)
35286d16c2f8Saliguori             l = len;
35296d16c2f8Saliguori         p = phys_page_find(page >> TARGET_PAGE_BITS);
35306d16c2f8Saliguori         if (!p) {
35316d16c2f8Saliguori             pd = IO_MEM_UNASSIGNED;
35326d16c2f8Saliguori         } else {
35336d16c2f8Saliguori             pd = p->phys_offset;
35346d16c2f8Saliguori         }
35356d16c2f8Saliguori 
35366d16c2f8Saliguori         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
35376d16c2f8Saliguori             if (done || bounce.buffer) {
35386d16c2f8Saliguori                 break;
35396d16c2f8Saliguori             }
35406d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
35416d16c2f8Saliguori             bounce.addr = addr;
35426d16c2f8Saliguori             bounce.len = l;
35436d16c2f8Saliguori             if (!is_write) {
35446d16c2f8Saliguori                 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
35456d16c2f8Saliguori             }
35466d16c2f8Saliguori             ptr = bounce.buffer;
35476d16c2f8Saliguori         } else {
35486d16c2f8Saliguori             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
35495579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
35506d16c2f8Saliguori         }
35516d16c2f8Saliguori         if (!done) {
35526d16c2f8Saliguori             ret = ptr;
35536d16c2f8Saliguori         } else if (ret + done != ptr) {
35546d16c2f8Saliguori             break;
35556d16c2f8Saliguori         }
35566d16c2f8Saliguori 
35576d16c2f8Saliguori         len -= l;
35586d16c2f8Saliguori         addr += l;
35596d16c2f8Saliguori         done += l;
35606d16c2f8Saliguori     }
35616d16c2f8Saliguori     *plen = done;
35626d16c2f8Saliguori     return ret;
35636d16c2f8Saliguori }
35646d16c2f8Saliguori 
35656d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
35666d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
35676d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
35686d16c2f8Saliguori  */
3569c227f099SAnthony Liguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3570c227f099SAnthony Liguori                                int is_write, target_phys_addr_t access_len)
35716d16c2f8Saliguori {
35726d16c2f8Saliguori     if (buffer != bounce.buffer) {
35736d16c2f8Saliguori         if (is_write) {
3574c227f099SAnthony Liguori             ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
35756d16c2f8Saliguori             while (access_len) {
35766d16c2f8Saliguori                 unsigned l;
35776d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
35786d16c2f8Saliguori                 if (l > access_len)
35796d16c2f8Saliguori                     l = access_len;
35806d16c2f8Saliguori                 if (!cpu_physical_memory_is_dirty(addr1)) {
35816d16c2f8Saliguori                     /* invalidate code */
35826d16c2f8Saliguori                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
35836d16c2f8Saliguori                     /* set dirty bit */
35846d16c2f8Saliguori                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
35856d16c2f8Saliguori                         (0xff & ~CODE_DIRTY_FLAG);
35866d16c2f8Saliguori                 }
35876d16c2f8Saliguori                 addr1 += l;
35886d16c2f8Saliguori                 access_len -= l;
35896d16c2f8Saliguori             }
35906d16c2f8Saliguori         }
35916d16c2f8Saliguori         return;
35926d16c2f8Saliguori     }
35936d16c2f8Saliguori     if (is_write) {
35946d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
35956d16c2f8Saliguori     }
3596f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
35976d16c2f8Saliguori     bounce.buffer = NULL;
3598ba223c29Saliguori     cpu_notify_map_clients();
35996d16c2f8Saliguori }
3600d0ecd2aaSbellard 
36018df1cd07Sbellard /* warning: addr must be aligned */
3602c227f099SAnthony Liguori uint32_t ldl_phys(target_phys_addr_t addr)
36038df1cd07Sbellard {
36048df1cd07Sbellard     int io_index;
36058df1cd07Sbellard     uint8_t *ptr;
36068df1cd07Sbellard     uint32_t val;
36078df1cd07Sbellard     unsigned long pd;
36088df1cd07Sbellard     PhysPageDesc *p;
36098df1cd07Sbellard 
36108df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
36118df1cd07Sbellard     if (!p) {
36128df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
36138df1cd07Sbellard     } else {
36148df1cd07Sbellard         pd = p->phys_offset;
36158df1cd07Sbellard     }
36168df1cd07Sbellard 
36172a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
36182a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
36198df1cd07Sbellard         /* I/O case */
36208df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
36218da3ff18Spbrook         if (p)
36228da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
36238df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
36248df1cd07Sbellard     } else {
36258df1cd07Sbellard         /* RAM case */
36265579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
36278df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
36288df1cd07Sbellard         val = ldl_p(ptr);
36298df1cd07Sbellard     }
36308df1cd07Sbellard     return val;
36318df1cd07Sbellard }
36328df1cd07Sbellard 
363384b7b8e7Sbellard /* warning: addr must be aligned */
3634c227f099SAnthony Liguori uint64_t ldq_phys(target_phys_addr_t addr)
363584b7b8e7Sbellard {
363684b7b8e7Sbellard     int io_index;
363784b7b8e7Sbellard     uint8_t *ptr;
363884b7b8e7Sbellard     uint64_t val;
363984b7b8e7Sbellard     unsigned long pd;
364084b7b8e7Sbellard     PhysPageDesc *p;
364184b7b8e7Sbellard 
364284b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
364384b7b8e7Sbellard     if (!p) {
364484b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
364584b7b8e7Sbellard     } else {
364684b7b8e7Sbellard         pd = p->phys_offset;
364784b7b8e7Sbellard     }
364884b7b8e7Sbellard 
36492a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
36502a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
365184b7b8e7Sbellard         /* I/O case */
365284b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
36538da3ff18Spbrook         if (p)
36548da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
365584b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
365684b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
365784b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
365884b7b8e7Sbellard #else
365984b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
366084b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
366184b7b8e7Sbellard #endif
366284b7b8e7Sbellard     } else {
366384b7b8e7Sbellard         /* RAM case */
36645579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
366584b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
366684b7b8e7Sbellard         val = ldq_p(ptr);
366784b7b8e7Sbellard     }
366884b7b8e7Sbellard     return val;
366984b7b8e7Sbellard }
367084b7b8e7Sbellard 
3671aab33094Sbellard /* XXX: optimize */
3672c227f099SAnthony Liguori uint32_t ldub_phys(target_phys_addr_t addr)
3673aab33094Sbellard {
3674aab33094Sbellard     uint8_t val;
3675aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
3676aab33094Sbellard     return val;
3677aab33094Sbellard }
3678aab33094Sbellard 
3679aab33094Sbellard /* XXX: optimize */
3680c227f099SAnthony Liguori uint32_t lduw_phys(target_phys_addr_t addr)
3681aab33094Sbellard {
3682aab33094Sbellard     uint16_t val;
3683aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3684aab33094Sbellard     return tswap16(val);
3685aab33094Sbellard }
3686aab33094Sbellard 
36878df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
36888df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
36898df1cd07Sbellard    bits are used to track modified PTEs */
3690c227f099SAnthony Liguori void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
36918df1cd07Sbellard {
36928df1cd07Sbellard     int io_index;
36938df1cd07Sbellard     uint8_t *ptr;
36948df1cd07Sbellard     unsigned long pd;
36958df1cd07Sbellard     PhysPageDesc *p;
36968df1cd07Sbellard 
36978df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
36988df1cd07Sbellard     if (!p) {
36998df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
37008df1cd07Sbellard     } else {
37018df1cd07Sbellard         pd = p->phys_offset;
37028df1cd07Sbellard     }
37038df1cd07Sbellard 
37043a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
37058df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
37068da3ff18Spbrook         if (p)
37078da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
37088df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
37098df1cd07Sbellard     } else {
371074576198Saliguori         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
37115579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
37128df1cd07Sbellard         stl_p(ptr, val);
371374576198Saliguori 
371474576198Saliguori         if (unlikely(in_migration)) {
371574576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
371674576198Saliguori                 /* invalidate code */
371774576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
371874576198Saliguori                 /* set dirty bit */
371974576198Saliguori                 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
372074576198Saliguori                     (0xff & ~CODE_DIRTY_FLAG);
372174576198Saliguori             }
372274576198Saliguori         }
37238df1cd07Sbellard     }
37248df1cd07Sbellard }
37258df1cd07Sbellard 
3726c227f099SAnthony Liguori void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3727bc98a7efSj_mayer {
3728bc98a7efSj_mayer     int io_index;
3729bc98a7efSj_mayer     uint8_t *ptr;
3730bc98a7efSj_mayer     unsigned long pd;
3731bc98a7efSj_mayer     PhysPageDesc *p;
3732bc98a7efSj_mayer 
3733bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3734bc98a7efSj_mayer     if (!p) {
3735bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
3736bc98a7efSj_mayer     } else {
3737bc98a7efSj_mayer         pd = p->phys_offset;
3738bc98a7efSj_mayer     }
3739bc98a7efSj_mayer 
3740bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3741bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
37428da3ff18Spbrook         if (p)
37438da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3744bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
3745bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3746bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3747bc98a7efSj_mayer #else
3748bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3749bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3750bc98a7efSj_mayer #endif
3751bc98a7efSj_mayer     } else {
37525579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3753bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
3754bc98a7efSj_mayer         stq_p(ptr, val);
3755bc98a7efSj_mayer     }
3756bc98a7efSj_mayer }
3757bc98a7efSj_mayer 
37588df1cd07Sbellard /* warning: addr must be aligned */
3759c227f099SAnthony Liguori void stl_phys(target_phys_addr_t addr, uint32_t val)
37608df1cd07Sbellard {
37618df1cd07Sbellard     int io_index;
37628df1cd07Sbellard     uint8_t *ptr;
37638df1cd07Sbellard     unsigned long pd;
37648df1cd07Sbellard     PhysPageDesc *p;
37658df1cd07Sbellard 
37668df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
37678df1cd07Sbellard     if (!p) {
37688df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
37698df1cd07Sbellard     } else {
37708df1cd07Sbellard         pd = p->phys_offset;
37718df1cd07Sbellard     }
37728df1cd07Sbellard 
37733a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
37748df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
37758da3ff18Spbrook         if (p)
37768da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
37778df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
37788df1cd07Sbellard     } else {
37798df1cd07Sbellard         unsigned long addr1;
37808df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
37818df1cd07Sbellard         /* RAM case */
37825579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
37838df1cd07Sbellard         stl_p(ptr, val);
37843a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
37858df1cd07Sbellard             /* invalidate code */
37868df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
37878df1cd07Sbellard             /* set dirty bit */
3788f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3789f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
37908df1cd07Sbellard         }
37918df1cd07Sbellard     }
37923a7d929eSbellard }
37938df1cd07Sbellard 
3794aab33094Sbellard /* XXX: optimize */
3795c227f099SAnthony Liguori void stb_phys(target_phys_addr_t addr, uint32_t val)
3796aab33094Sbellard {
3797aab33094Sbellard     uint8_t v = val;
3798aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
3799aab33094Sbellard }
3800aab33094Sbellard 
3801aab33094Sbellard /* XXX: optimize */
3802c227f099SAnthony Liguori void stw_phys(target_phys_addr_t addr, uint32_t val)
3803aab33094Sbellard {
3804aab33094Sbellard     uint16_t v = tswap16(val);
3805aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3806aab33094Sbellard }
3807aab33094Sbellard 
3808aab33094Sbellard /* XXX: optimize */
3809c227f099SAnthony Liguori void stq_phys(target_phys_addr_t addr, uint64_t val)
3810aab33094Sbellard {
3811aab33094Sbellard     val = tswap64(val);
3812aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3813aab33094Sbellard }
3814aab33094Sbellard 
38155e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
3816b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3817b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
381813eb76e0Sbellard {
381913eb76e0Sbellard     int l;
3820c227f099SAnthony Liguori     target_phys_addr_t phys_addr;
38219b3c35e0Sj_mayer     target_ulong page;
382213eb76e0Sbellard 
382313eb76e0Sbellard     while (len > 0) {
382413eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
382513eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
382613eb76e0Sbellard         /* if no physical page mapped, return an error */
382713eb76e0Sbellard         if (phys_addr == -1)
382813eb76e0Sbellard             return -1;
382913eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
383013eb76e0Sbellard         if (l > len)
383113eb76e0Sbellard             l = len;
38325e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
38335e2972fdSaliguori         if (is_write)
38345e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
38355e2972fdSaliguori         else
38365e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
383713eb76e0Sbellard         len -= l;
383813eb76e0Sbellard         buf += l;
383913eb76e0Sbellard         addr += l;
384013eb76e0Sbellard     }
384113eb76e0Sbellard     return 0;
384213eb76e0Sbellard }
3843a68fe89cSPaul Brook #endif
384413eb76e0Sbellard 
38452e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
38462e70f6efSpbrook    must be at the end of the TB */
38472e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr)
38482e70f6efSpbrook {
38492e70f6efSpbrook     TranslationBlock *tb;
38502e70f6efSpbrook     uint32_t n, cflags;
38512e70f6efSpbrook     target_ulong pc, cs_base;
38522e70f6efSpbrook     uint64_t flags;
38532e70f6efSpbrook 
38542e70f6efSpbrook     tb = tb_find_pc((unsigned long)retaddr);
38552e70f6efSpbrook     if (!tb) {
38562e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
38572e70f6efSpbrook                   retaddr);
38582e70f6efSpbrook     }
38592e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
38602e70f6efSpbrook     cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
38612e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
3862bf20dc07Sths        occurred.  */
38632e70f6efSpbrook     n = n - env->icount_decr.u16.low;
38642e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
38652e70f6efSpbrook     n++;
38662e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
38672e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
3868bf20dc07Sths        the first instruction in a TB then re-execute the preceding
38692e70f6efSpbrook        branch.  */
38702e70f6efSpbrook #if defined(TARGET_MIPS)
38712e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
38722e70f6efSpbrook         env->active_tc.PC -= 4;
38732e70f6efSpbrook         env->icount_decr.u16.low++;
38742e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
38752e70f6efSpbrook     }
38762e70f6efSpbrook #elif defined(TARGET_SH4)
38772e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
38782e70f6efSpbrook             && n > 1) {
38792e70f6efSpbrook         env->pc -= 2;
38802e70f6efSpbrook         env->icount_decr.u16.low++;
38812e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
38822e70f6efSpbrook     }
38832e70f6efSpbrook #endif
38842e70f6efSpbrook     /* This should never happen.  */
38852e70f6efSpbrook     if (n > CF_COUNT_MASK)
38862e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
38872e70f6efSpbrook 
38882e70f6efSpbrook     cflags = n | CF_LAST_IO;
38892e70f6efSpbrook     pc = tb->pc;
38902e70f6efSpbrook     cs_base = tb->cs_base;
38912e70f6efSpbrook     flags = tb->flags;
38922e70f6efSpbrook     tb_phys_invalidate(tb, -1);
38932e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
38942e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
38952e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
3896bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
38972e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
38982e70f6efSpbrook        repeating the fault, which is horribly inefficient.
38992e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
39002e70f6efSpbrook        second new TB.  */
39012e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
39022e70f6efSpbrook }
39032e70f6efSpbrook 
3904e3db7226Sbellard void dump_exec_info(FILE *f,
3905e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3906e3db7226Sbellard {
3907e3db7226Sbellard     int i, target_code_size, max_target_code_size;
3908e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
3909e3db7226Sbellard     TranslationBlock *tb;
3910e3db7226Sbellard 
3911e3db7226Sbellard     target_code_size = 0;
3912e3db7226Sbellard     max_target_code_size = 0;
3913e3db7226Sbellard     cross_page = 0;
3914e3db7226Sbellard     direct_jmp_count = 0;
3915e3db7226Sbellard     direct_jmp2_count = 0;
3916e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
3917e3db7226Sbellard         tb = &tbs[i];
3918e3db7226Sbellard         target_code_size += tb->size;
3919e3db7226Sbellard         if (tb->size > max_target_code_size)
3920e3db7226Sbellard             max_target_code_size = tb->size;
3921e3db7226Sbellard         if (tb->page_addr[1] != -1)
3922e3db7226Sbellard             cross_page++;
3923e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
3924e3db7226Sbellard             direct_jmp_count++;
3925e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
3926e3db7226Sbellard                 direct_jmp2_count++;
3927e3db7226Sbellard             }
3928e3db7226Sbellard         }
3929e3db7226Sbellard     }
3930e3db7226Sbellard     /* XXX: avoid using doubles ? */
393157fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
393226a5f13bSbellard     cpu_fprintf(f, "gen code size       %ld/%ld\n",
393326a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
393426a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
393526a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
3936e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3937e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
3938e3db7226Sbellard                 max_target_code_size);
3939e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3940e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3941e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3942e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3943e3db7226Sbellard             cross_page,
3944e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3945e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3946e3db7226Sbellard                 direct_jmp_count,
3947e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3948e3db7226Sbellard                 direct_jmp2_count,
3949e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
395057fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
3951e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3952e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3953e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3954b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
3955e3db7226Sbellard }
3956e3db7226Sbellard 
395761382a50Sbellard #if !defined(CONFIG_USER_ONLY)
395861382a50Sbellard 
395961382a50Sbellard #define MMUSUFFIX _cmmu
396061382a50Sbellard #define GETPC() NULL
396161382a50Sbellard #define env cpu_single_env
3962b769d8feSbellard #define SOFTMMU_CODE_ACCESS
396361382a50Sbellard 
396461382a50Sbellard #define SHIFT 0
396561382a50Sbellard #include "softmmu_template.h"
396661382a50Sbellard 
396761382a50Sbellard #define SHIFT 1
396861382a50Sbellard #include "softmmu_template.h"
396961382a50Sbellard 
397061382a50Sbellard #define SHIFT 2
397161382a50Sbellard #include "softmmu_template.h"
397261382a50Sbellard 
397361382a50Sbellard #define SHIFT 3
397461382a50Sbellard #include "softmmu_template.h"
397561382a50Sbellard 
397661382a50Sbellard #undef env
397761382a50Sbellard 
397861382a50Sbellard #endif
3979