xref: /qemu/system/physmem.c (revision fd052bf63a2ee8e8aff9bb9a51ce7c5f744561f4)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard #include <stdlib.h>
2754936004Sbellard #include <stdio.h>
2854936004Sbellard #include <stdarg.h>
2954936004Sbellard #include <string.h>
3054936004Sbellard #include <errno.h>
3154936004Sbellard #include <unistd.h>
3254936004Sbellard #include <inttypes.h>
3354936004Sbellard 
346180a181Sbellard #include "cpu.h"
356180a181Sbellard #include "exec-all.h"
36ca10f867Saurel32 #include "qemu-common.h"
37b67d9a52Sbellard #include "tcg.h"
38b3c7724cSpbrook #include "hw/hw.h"
3974576198Saliguori #include "osdep.h"
407ba1e619Saliguori #include "kvm.h"
4153a5960aSpbrook #if defined(CONFIG_USER_ONLY)
4253a5960aSpbrook #include <qemu.h>
43fd052bf6SRiku Voipio #include <signal.h>
4453a5960aSpbrook #endif
4554936004Sbellard 
46fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
4766e85a21Sbellard //#define DEBUG_FLUSH
489fa3e853Sbellard //#define DEBUG_TLB
4967d3b957Spbrook //#define DEBUG_UNASSIGNED
50fd6ce8f6Sbellard 
51fd6ce8f6Sbellard /* make various TB consistency checks */
52fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
5398857888Sbellard //#define DEBUG_TLB_CHECK
54fd6ce8f6Sbellard 
551196be37Sths //#define DEBUG_IOPORT
56db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
571196be37Sths 
5899773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
5999773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
6099773bd4Spbrook #undef DEBUG_TB_CHECK
6199773bd4Spbrook #endif
6299773bd4Spbrook 
639fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
649fa3e853Sbellard 
65108c49b8Sbellard #if defined(TARGET_SPARC64)
66108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 41
675dcb6b91Sblueswir1 #elif defined(TARGET_SPARC)
685dcb6b91Sblueswir1 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69bedb69eaSj_mayer #elif defined(TARGET_ALPHA)
70bedb69eaSj_mayer #define TARGET_PHYS_ADDR_SPACE_BITS 42
71bedb69eaSj_mayer #define TARGET_VIRT_ADDR_SPACE_BITS 42
72108c49b8Sbellard #elif defined(TARGET_PPC64)
73108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 42
744a1418e0SAnthony Liguori #elif defined(TARGET_X86_64)
7500f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 42
764a1418e0SAnthony Liguori #elif defined(TARGET_I386)
7700f82b8aSaurel32 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78108c49b8Sbellard #else
79108c49b8Sbellard #define TARGET_PHYS_ADDR_SPACE_BITS 32
80108c49b8Sbellard #endif
81108c49b8Sbellard 
82bdaf78e0Sblueswir1 static TranslationBlock *tbs;
8326a5f13bSbellard int code_gen_max_blocks;
849fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85bdaf78e0Sblueswir1 static int nb_tbs;
86eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
87c227f099SAnthony Liguori spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88fd6ce8f6Sbellard 
89141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__)
90141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
92d03d860bSblueswir1  section close to code segment. */
93d03d860bSblueswir1 #define code_gen_section                                \
94d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
95d03d860bSblueswir1     __attribute__((aligned (32)))
96f8e2af11SStefan Weil #elif defined(_WIN32)
97f8e2af11SStefan Weil /* Maximum alignment for Win32 is 16. */
98f8e2af11SStefan Weil #define code_gen_section                                \
99f8e2af11SStefan Weil     __attribute__((aligned (16)))
100d03d860bSblueswir1 #else
101d03d860bSblueswir1 #define code_gen_section                                \
102d03d860bSblueswir1     __attribute__((aligned (32)))
103d03d860bSblueswir1 #endif
104d03d860bSblueswir1 
105d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
106bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
107bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size;
10826a5f13bSbellard /* threshold to flush the translated code buffer */
109bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size;
110fd6ce8f6Sbellard uint8_t *code_gen_ptr;
111fd6ce8f6Sbellard 
112e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1139fa3e853Sbellard int phys_ram_fd;
1141ccde1cbSbellard uint8_t *phys_ram_dirty;
11574576198Saliguori static int in_migration;
11694a6b54fSpbrook 
11794a6b54fSpbrook typedef struct RAMBlock {
11894a6b54fSpbrook     uint8_t *host;
119c227f099SAnthony Liguori     ram_addr_t offset;
120c227f099SAnthony Liguori     ram_addr_t length;
12194a6b54fSpbrook     struct RAMBlock *next;
12294a6b54fSpbrook } RAMBlock;
12394a6b54fSpbrook 
12494a6b54fSpbrook static RAMBlock *ram_blocks;
12594a6b54fSpbrook /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126ccbb4d44SStuart Brady    then we can no longer assume contiguous ram offsets, and external uses
12794a6b54fSpbrook    of this variable will break.  */
128c227f099SAnthony Liguori ram_addr_t last_ram_offset;
129e2eef170Spbrook #endif
1309fa3e853Sbellard 
1316a00d601Sbellard CPUState *first_cpu;
1326a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1336a00d601Sbellard    cpu_exec() */
1346a00d601Sbellard CPUState *cpu_single_env;
1352e70f6efSpbrook /* 0 = Do not count executed instructions.
136bf20dc07Sths    1 = Precise instruction counting.
1372e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1382e70f6efSpbrook int use_icount = 0;
1392e70f6efSpbrook /* Current instruction counter.  While executing translated code this may
1402e70f6efSpbrook    include some instructions that have not yet been executed.  */
1412e70f6efSpbrook int64_t qemu_icount;
1426a00d601Sbellard 
14354936004Sbellard typedef struct PageDesc {
14492e873b9Sbellard     /* list of TBs intersecting this ram page */
145fd6ce8f6Sbellard     TranslationBlock *first_tb;
1469fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1479fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1489fa3e853Sbellard     unsigned int code_write_count;
1499fa3e853Sbellard     uint8_t *code_bitmap;
1509fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1519fa3e853Sbellard     unsigned long flags;
1529fa3e853Sbellard #endif
15354936004Sbellard } PageDesc;
15454936004Sbellard 
15592e873b9Sbellard typedef struct PhysPageDesc {
1560f459d16Spbrook     /* offset in host memory of the page + io_index in the low bits */
157c227f099SAnthony Liguori     ram_addr_t phys_offset;
158c227f099SAnthony Liguori     ram_addr_t region_offset;
15992e873b9Sbellard } PhysPageDesc;
16092e873b9Sbellard 
16154936004Sbellard #define L2_BITS 10
162bedb69eaSj_mayer #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163bedb69eaSj_mayer /* XXX: this is a temporary hack for alpha target.
164bedb69eaSj_mayer  *      In the future, this is to be replaced by a multi-level table
165bedb69eaSj_mayer  *      to actually be able to handle the complete 64 bits address space.
166bedb69eaSj_mayer  */
167bedb69eaSj_mayer #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168bedb69eaSj_mayer #else
16903875444Saurel32 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170bedb69eaSj_mayer #endif
17154936004Sbellard 
17254936004Sbellard #define L1_SIZE (1 << L1_BITS)
17354936004Sbellard #define L2_SIZE (1 << L2_BITS)
17454936004Sbellard 
17583fb7adfSbellard unsigned long qemu_real_host_page_size;
17683fb7adfSbellard unsigned long qemu_host_page_bits;
17783fb7adfSbellard unsigned long qemu_host_page_size;
17883fb7adfSbellard unsigned long qemu_host_page_mask;
17954936004Sbellard 
18092e873b9Sbellard /* XXX: for system emulation, it could just be an array */
18154936004Sbellard static PageDesc *l1_map[L1_SIZE];
182bdaf78e0Sblueswir1 static PhysPageDesc **l1_phys_map;
18354936004Sbellard 
184e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
185e2eef170Spbrook static void io_mem_init(void);
186e2eef170Spbrook 
18733417e70Sbellard /* io memory support */
18833417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
18933417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
190a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES];
191511d2b14Sblueswir1 static char io_mem_used[IO_MEM_NB_ENTRIES];
1926658ffb8Spbrook static int io_mem_watch;
1936658ffb8Spbrook #endif
19433417e70Sbellard 
19534865134Sbellard /* log support */
1961e8b27caSJuha Riihimäki #ifdef WIN32
1971e8b27caSJuha Riihimäki static const char *logfilename = "qemu.log";
1981e8b27caSJuha Riihimäki #else
199d9b630fdSblueswir1 static const char *logfilename = "/tmp/qemu.log";
2001e8b27caSJuha Riihimäki #endif
20134865134Sbellard FILE *logfile;
20234865134Sbellard int loglevel;
203e735b91cSpbrook static int log_append = 0;
20434865134Sbellard 
205e3db7226Sbellard /* statistics */
206e3db7226Sbellard static int tlb_flush_count;
207e3db7226Sbellard static int tb_flush_count;
208e3db7226Sbellard static int tb_phys_invalidate_count;
209e3db7226Sbellard 
210db7b5426Sblueswir1 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
211c227f099SAnthony Liguori typedef struct subpage_t {
212c227f099SAnthony Liguori     target_phys_addr_t base;
213d60efc6bSBlue Swirl     CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
214d60efc6bSBlue Swirl     CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2153ee89922Sblueswir1     void *opaque[TARGET_PAGE_SIZE][2][4];
216c227f099SAnthony Liguori     ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
217c227f099SAnthony Liguori } subpage_t;
218db7b5426Sblueswir1 
2197cb69caeSbellard #ifdef _WIN32
2207cb69caeSbellard static void map_exec(void *addr, long size)
2217cb69caeSbellard {
2227cb69caeSbellard     DWORD old_protect;
2237cb69caeSbellard     VirtualProtect(addr, size,
2247cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2257cb69caeSbellard 
2267cb69caeSbellard }
2277cb69caeSbellard #else
2287cb69caeSbellard static void map_exec(void *addr, long size)
2297cb69caeSbellard {
2304369415fSbellard     unsigned long start, end, page_size;
2317cb69caeSbellard 
2324369415fSbellard     page_size = getpagesize();
2337cb69caeSbellard     start = (unsigned long)addr;
2344369415fSbellard     start &= ~(page_size - 1);
2357cb69caeSbellard 
2367cb69caeSbellard     end = (unsigned long)addr + size;
2374369415fSbellard     end += page_size - 1;
2384369415fSbellard     end &= ~(page_size - 1);
2397cb69caeSbellard 
2407cb69caeSbellard     mprotect((void *)start, end - start,
2417cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2427cb69caeSbellard }
2437cb69caeSbellard #endif
2447cb69caeSbellard 
245b346ff46Sbellard static void page_init(void)
24654936004Sbellard {
24783fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
24854936004Sbellard        TARGET_PAGE_SIZE */
249c2b48b69Saliguori #ifdef _WIN32
250c2b48b69Saliguori     {
251c2b48b69Saliguori         SYSTEM_INFO system_info;
252c2b48b69Saliguori 
253c2b48b69Saliguori         GetSystemInfo(&system_info);
254c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
255c2b48b69Saliguori     }
256c2b48b69Saliguori #else
257c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
258c2b48b69Saliguori #endif
25983fb7adfSbellard     if (qemu_host_page_size == 0)
26083fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
26183fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
26283fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
26383fb7adfSbellard     qemu_host_page_bits = 0;
26483fb7adfSbellard     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
26583fb7adfSbellard         qemu_host_page_bits++;
26683fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
267108c49b8Sbellard     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
268108c49b8Sbellard     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
26950a9569bSbalrog 
27050a9569bSbalrog #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
27150a9569bSbalrog     {
27250a9569bSbalrog         long long startaddr, endaddr;
27350a9569bSbalrog         FILE *f;
27450a9569bSbalrog         int n;
27550a9569bSbalrog 
276c8a706feSpbrook         mmap_lock();
2770776590dSpbrook         last_brk = (unsigned long)sbrk(0);
27850a9569bSbalrog         f = fopen("/proc/self/maps", "r");
27950a9569bSbalrog         if (f) {
28050a9569bSbalrog             do {
28150a9569bSbalrog                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
28250a9569bSbalrog                 if (n == 2) {
283e0b8d65aSblueswir1                     startaddr = MIN(startaddr,
284e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
285e0b8d65aSblueswir1                     endaddr = MIN(endaddr,
286e0b8d65aSblueswir1                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
287b5fc909eSpbrook                     page_set_flags(startaddr & TARGET_PAGE_MASK,
28850a9569bSbalrog                                    TARGET_PAGE_ALIGN(endaddr),
28950a9569bSbalrog                                    PAGE_RESERVED);
29050a9569bSbalrog                 }
29150a9569bSbalrog             } while (!feof(f));
29250a9569bSbalrog             fclose(f);
29350a9569bSbalrog         }
294c8a706feSpbrook         mmap_unlock();
29550a9569bSbalrog     }
29650a9569bSbalrog #endif
29754936004Sbellard }
29854936004Sbellard 
299434929bfSaliguori static inline PageDesc **page_l1_map(target_ulong index)
30054936004Sbellard {
30117e2377aSpbrook #if TARGET_LONG_BITS > 32
30217e2377aSpbrook     /* Host memory outside guest VM.  For 32-bit targets we have already
30317e2377aSpbrook        excluded high addresses.  */
304d8173e0fSths     if (index > ((target_ulong)L2_SIZE * L1_SIZE))
30517e2377aSpbrook         return NULL;
30617e2377aSpbrook #endif
307434929bfSaliguori     return &l1_map[index >> L2_BITS];
308434929bfSaliguori }
309434929bfSaliguori 
310434929bfSaliguori static inline PageDesc *page_find_alloc(target_ulong index)
311434929bfSaliguori {
312434929bfSaliguori     PageDesc **lp, *p;
313434929bfSaliguori     lp = page_l1_map(index);
314434929bfSaliguori     if (!lp)
315434929bfSaliguori         return NULL;
316434929bfSaliguori 
31754936004Sbellard     p = *lp;
31854936004Sbellard     if (!p) {
31954936004Sbellard         /* allocate if not found */
32017e2377aSpbrook #if defined(CONFIG_USER_ONLY)
32117e2377aSpbrook         size_t len = sizeof(PageDesc) * L2_SIZE;
32217e2377aSpbrook         /* Don't use qemu_malloc because it may recurse.  */
323660f11beSBlue Swirl         p = mmap(NULL, len, PROT_READ | PROT_WRITE,
32417e2377aSpbrook                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
32554936004Sbellard         *lp = p;
326fb1c2cd7Saurel32         if (h2g_valid(p)) {
327fb1c2cd7Saurel32             unsigned long addr = h2g(p);
32817e2377aSpbrook             page_set_flags(addr & TARGET_PAGE_MASK,
32917e2377aSpbrook                            TARGET_PAGE_ALIGN(addr + len),
33017e2377aSpbrook                            PAGE_RESERVED);
33117e2377aSpbrook         }
33217e2377aSpbrook #else
33317e2377aSpbrook         p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
33417e2377aSpbrook         *lp = p;
33517e2377aSpbrook #endif
33654936004Sbellard     }
33754936004Sbellard     return p + (index & (L2_SIZE - 1));
33854936004Sbellard }
33954936004Sbellard 
34000f82b8aSaurel32 static inline PageDesc *page_find(target_ulong index)
34154936004Sbellard {
342434929bfSaliguori     PageDesc **lp, *p;
343434929bfSaliguori     lp = page_l1_map(index);
344434929bfSaliguori     if (!lp)
345434929bfSaliguori         return NULL;
34654936004Sbellard 
347434929bfSaliguori     p = *lp;
348660f11beSBlue Swirl     if (!p) {
349660f11beSBlue Swirl         return NULL;
350660f11beSBlue Swirl     }
351fd6ce8f6Sbellard     return p + (index & (L2_SIZE - 1));
35254936004Sbellard }
35354936004Sbellard 
354c227f099SAnthony Liguori static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
35592e873b9Sbellard {
356108c49b8Sbellard     void **lp, **p;
357e3f4e2a4Spbrook     PhysPageDesc *pd;
35892e873b9Sbellard 
359108c49b8Sbellard     p = (void **)l1_phys_map;
360108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > 32
361108c49b8Sbellard 
362108c49b8Sbellard #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
363108c49b8Sbellard #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
364108c49b8Sbellard #endif
365108c49b8Sbellard     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
36692e873b9Sbellard     p = *lp;
36792e873b9Sbellard     if (!p) {
36892e873b9Sbellard         /* allocate if not found */
369108c49b8Sbellard         if (!alloc)
370108c49b8Sbellard             return NULL;
371108c49b8Sbellard         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
372108c49b8Sbellard         memset(p, 0, sizeof(void *) * L1_SIZE);
373108c49b8Sbellard         *lp = p;
374108c49b8Sbellard     }
375108c49b8Sbellard #endif
376108c49b8Sbellard     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
377e3f4e2a4Spbrook     pd = *lp;
378e3f4e2a4Spbrook     if (!pd) {
379e3f4e2a4Spbrook         int i;
380108c49b8Sbellard         /* allocate if not found */
381108c49b8Sbellard         if (!alloc)
382108c49b8Sbellard             return NULL;
383e3f4e2a4Spbrook         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
384e3f4e2a4Spbrook         *lp = pd;
38567c4d23cSpbrook         for (i = 0; i < L2_SIZE; i++) {
386e3f4e2a4Spbrook           pd[i].phys_offset = IO_MEM_UNASSIGNED;
38767c4d23cSpbrook           pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
38867c4d23cSpbrook         }
38992e873b9Sbellard     }
390e3f4e2a4Spbrook     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
39192e873b9Sbellard }
39292e873b9Sbellard 
393c227f099SAnthony Liguori static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
39492e873b9Sbellard {
395108c49b8Sbellard     return phys_page_find_alloc(index, 0);
39692e873b9Sbellard }
39792e873b9Sbellard 
3989fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
399c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr);
400c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
4013a7d929eSbellard                                     target_ulong vaddr);
402c8a706feSpbrook #define mmap_lock() do { } while(0)
403c8a706feSpbrook #define mmap_unlock() do { } while(0)
4049fa3e853Sbellard #endif
405fd6ce8f6Sbellard 
4064369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
4074369415fSbellard 
4084369415fSbellard #if defined(CONFIG_USER_ONLY)
409ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in
4104369415fSbellard    user mode. It will change when a dedicated libc will be used */
4114369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
4124369415fSbellard #endif
4134369415fSbellard 
4144369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4154369415fSbellard static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
4164369415fSbellard #endif
4174369415fSbellard 
4188fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size)
41926a5f13bSbellard {
4204369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
4214369415fSbellard     code_gen_buffer = static_code_gen_buffer;
4224369415fSbellard     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4234369415fSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
4244369415fSbellard #else
42526a5f13bSbellard     code_gen_buffer_size = tb_size;
42626a5f13bSbellard     if (code_gen_buffer_size == 0) {
4274369415fSbellard #if defined(CONFIG_USER_ONLY)
4284369415fSbellard         /* in user mode, phys_ram_size is not meaningful */
4294369415fSbellard         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4304369415fSbellard #else
431ccbb4d44SStuart Brady         /* XXX: needs adjustments */
43294a6b54fSpbrook         code_gen_buffer_size = (unsigned long)(ram_size / 4);
4334369415fSbellard #endif
43426a5f13bSbellard     }
43526a5f13bSbellard     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
43626a5f13bSbellard         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
43726a5f13bSbellard     /* The code gen buffer location may have constraints depending on
43826a5f13bSbellard        the host cpu and OS */
43926a5f13bSbellard #if defined(__linux__)
44026a5f13bSbellard     {
44126a5f13bSbellard         int flags;
442141ac468Sblueswir1         void *start = NULL;
443141ac468Sblueswir1 
44426a5f13bSbellard         flags = MAP_PRIVATE | MAP_ANONYMOUS;
44526a5f13bSbellard #if defined(__x86_64__)
44626a5f13bSbellard         flags |= MAP_32BIT;
44726a5f13bSbellard         /* Cannot map more than that */
44826a5f13bSbellard         if (code_gen_buffer_size > (800 * 1024 * 1024))
44926a5f13bSbellard             code_gen_buffer_size = (800 * 1024 * 1024);
450141ac468Sblueswir1 #elif defined(__sparc_v9__)
451141ac468Sblueswir1         // Map the buffer below 2G, so we can use direct calls and branches
452141ac468Sblueswir1         flags |= MAP_FIXED;
453141ac468Sblueswir1         start = (void *) 0x60000000UL;
454141ac468Sblueswir1         if (code_gen_buffer_size > (512 * 1024 * 1024))
455141ac468Sblueswir1             code_gen_buffer_size = (512 * 1024 * 1024);
4561cb0661eSbalrog #elif defined(__arm__)
45763d41246Sbalrog         /* Map the buffer below 32M, so we can use direct calls and branches */
4581cb0661eSbalrog         flags |= MAP_FIXED;
4591cb0661eSbalrog         start = (void *) 0x01000000UL;
4601cb0661eSbalrog         if (code_gen_buffer_size > 16 * 1024 * 1024)
4611cb0661eSbalrog             code_gen_buffer_size = 16 * 1024 * 1024;
46226a5f13bSbellard #endif
463141ac468Sblueswir1         code_gen_buffer = mmap(start, code_gen_buffer_size,
46426a5f13bSbellard                                PROT_WRITE | PROT_READ | PROT_EXEC,
46526a5f13bSbellard                                flags, -1, 0);
46626a5f13bSbellard         if (code_gen_buffer == MAP_FAILED) {
46726a5f13bSbellard             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
46826a5f13bSbellard             exit(1);
46926a5f13bSbellard         }
47026a5f13bSbellard     }
471a167ba50SAurelien Jarno #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
47206e67a82Saliguori     {
47306e67a82Saliguori         int flags;
47406e67a82Saliguori         void *addr = NULL;
47506e67a82Saliguori         flags = MAP_PRIVATE | MAP_ANONYMOUS;
47606e67a82Saliguori #if defined(__x86_64__)
47706e67a82Saliguori         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
47806e67a82Saliguori          * 0x40000000 is free */
47906e67a82Saliguori         flags |= MAP_FIXED;
48006e67a82Saliguori         addr = (void *)0x40000000;
48106e67a82Saliguori         /* Cannot map more than that */
48206e67a82Saliguori         if (code_gen_buffer_size > (800 * 1024 * 1024))
48306e67a82Saliguori             code_gen_buffer_size = (800 * 1024 * 1024);
48406e67a82Saliguori #endif
48506e67a82Saliguori         code_gen_buffer = mmap(addr, code_gen_buffer_size,
48606e67a82Saliguori                                PROT_WRITE | PROT_READ | PROT_EXEC,
48706e67a82Saliguori                                flags, -1, 0);
48806e67a82Saliguori         if (code_gen_buffer == MAP_FAILED) {
48906e67a82Saliguori             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
49006e67a82Saliguori             exit(1);
49106e67a82Saliguori         }
49206e67a82Saliguori     }
49326a5f13bSbellard #else
49426a5f13bSbellard     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
49526a5f13bSbellard     map_exec(code_gen_buffer, code_gen_buffer_size);
49626a5f13bSbellard #endif
4974369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */
49826a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
49926a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
50026a5f13bSbellard         code_gen_max_block_size();
50126a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
50226a5f13bSbellard     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
50326a5f13bSbellard }
50426a5f13bSbellard 
50526a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
50626a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
50726a5f13bSbellard    size. */
50826a5f13bSbellard void cpu_exec_init_all(unsigned long tb_size)
50926a5f13bSbellard {
51026a5f13bSbellard     cpu_gen_init();
51126a5f13bSbellard     code_gen_alloc(tb_size);
51226a5f13bSbellard     code_gen_ptr = code_gen_buffer;
5134369415fSbellard     page_init();
514e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
51526a5f13bSbellard     io_mem_init();
516e2eef170Spbrook #endif
51726a5f13bSbellard }
51826a5f13bSbellard 
5199656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5209656f324Spbrook 
521d4bfa4d7SJuan Quintela static void cpu_common_pre_save(void *opaque)
5229656f324Spbrook {
523d4bfa4d7SJuan Quintela     CPUState *env = opaque;
5249656f324Spbrook 
5254c0960c0SAvi Kivity     cpu_synchronize_state(env);
5269656f324Spbrook }
5279656f324Spbrook 
528e7f4eff7SJuan Quintela static int cpu_common_pre_load(void *opaque)
5299656f324Spbrook {
5309656f324Spbrook     CPUState *env = opaque;
5319656f324Spbrook 
5324c0960c0SAvi Kivity     cpu_synchronize_state(env);
533e7f4eff7SJuan Quintela     return 0;
534e7f4eff7SJuan Quintela }
5359656f324Spbrook 
536e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
537e7f4eff7SJuan Quintela {
538e7f4eff7SJuan Quintela     CPUState *env = opaque;
539e7f4eff7SJuan Quintela 
5403098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
5413098dba0Saurel32        version_id is increased. */
5423098dba0Saurel32     env->interrupt_request &= ~0x01;
5439656f324Spbrook     tlb_flush(env, 1);
5449656f324Spbrook 
5459656f324Spbrook     return 0;
5469656f324Spbrook }
547e7f4eff7SJuan Quintela 
548e7f4eff7SJuan Quintela static const VMStateDescription vmstate_cpu_common = {
549e7f4eff7SJuan Quintela     .name = "cpu_common",
550e7f4eff7SJuan Quintela     .version_id = 1,
551e7f4eff7SJuan Quintela     .minimum_version_id = 1,
552e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
553e7f4eff7SJuan Quintela     .pre_save = cpu_common_pre_save,
554e7f4eff7SJuan Quintela     .pre_load = cpu_common_pre_load,
555e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
556e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
557e7f4eff7SJuan Quintela         VMSTATE_UINT32(halted, CPUState),
558e7f4eff7SJuan Quintela         VMSTATE_UINT32(interrupt_request, CPUState),
559e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
560e7f4eff7SJuan Quintela     }
561e7f4eff7SJuan Quintela };
5629656f324Spbrook #endif
5639656f324Spbrook 
564950f1472SGlauber Costa CPUState *qemu_get_cpu(int cpu)
565950f1472SGlauber Costa {
566950f1472SGlauber Costa     CPUState *env = first_cpu;
567950f1472SGlauber Costa 
568950f1472SGlauber Costa     while (env) {
569950f1472SGlauber Costa         if (env->cpu_index == cpu)
570950f1472SGlauber Costa             break;
571950f1472SGlauber Costa         env = env->next_cpu;
572950f1472SGlauber Costa     }
573950f1472SGlauber Costa 
574950f1472SGlauber Costa     return env;
575950f1472SGlauber Costa }
576950f1472SGlauber Costa 
5776a00d601Sbellard void cpu_exec_init(CPUState *env)
578fd6ce8f6Sbellard {
5796a00d601Sbellard     CPUState **penv;
5806a00d601Sbellard     int cpu_index;
5816a00d601Sbellard 
582c2764719Spbrook #if defined(CONFIG_USER_ONLY)
583c2764719Spbrook     cpu_list_lock();
584c2764719Spbrook #endif
5856a00d601Sbellard     env->next_cpu = NULL;
5866a00d601Sbellard     penv = &first_cpu;
5876a00d601Sbellard     cpu_index = 0;
5886a00d601Sbellard     while (*penv != NULL) {
5891e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
5906a00d601Sbellard         cpu_index++;
5916a00d601Sbellard     }
5926a00d601Sbellard     env->cpu_index = cpu_index;
593268a362cSaliguori     env->numa_node = 0;
59472cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
59572cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
5966a00d601Sbellard     *penv = env;
597c2764719Spbrook #if defined(CONFIG_USER_ONLY)
598c2764719Spbrook     cpu_list_unlock();
599c2764719Spbrook #endif
600b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
601e7f4eff7SJuan Quintela     vmstate_register(cpu_index, &vmstate_cpu_common, env);
602b3c7724cSpbrook     register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
603b3c7724cSpbrook                     cpu_save, cpu_load, env);
604b3c7724cSpbrook #endif
605fd6ce8f6Sbellard }
606fd6ce8f6Sbellard 
6079fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
6089fa3e853Sbellard {
6099fa3e853Sbellard     if (p->code_bitmap) {
61059817ccbSbellard         qemu_free(p->code_bitmap);
6119fa3e853Sbellard         p->code_bitmap = NULL;
6129fa3e853Sbellard     }
6139fa3e853Sbellard     p->code_write_count = 0;
6149fa3e853Sbellard }
6159fa3e853Sbellard 
616fd6ce8f6Sbellard /* set to NULL all the 'first_tb' fields in all PageDescs */
617fd6ce8f6Sbellard static void page_flush_tb(void)
618fd6ce8f6Sbellard {
619fd6ce8f6Sbellard     int i, j;
620fd6ce8f6Sbellard     PageDesc *p;
621fd6ce8f6Sbellard 
622fd6ce8f6Sbellard     for(i = 0; i < L1_SIZE; i++) {
623fd6ce8f6Sbellard         p = l1_map[i];
624fd6ce8f6Sbellard         if (p) {
6259fa3e853Sbellard             for(j = 0; j < L2_SIZE; j++) {
6269fa3e853Sbellard                 p->first_tb = NULL;
6279fa3e853Sbellard                 invalidate_page_bitmap(p);
6289fa3e853Sbellard                 p++;
6299fa3e853Sbellard             }
630fd6ce8f6Sbellard         }
631fd6ce8f6Sbellard     }
632fd6ce8f6Sbellard }
633fd6ce8f6Sbellard 
634fd6ce8f6Sbellard /* flush all the translation blocks */
635d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
6366a00d601Sbellard void tb_flush(CPUState *env1)
637fd6ce8f6Sbellard {
6386a00d601Sbellard     CPUState *env;
6390124311eSbellard #if defined(DEBUG_FLUSH)
640ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
641ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
642ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
643ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
644fd6ce8f6Sbellard #endif
64526a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
646a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
647a208e54aSpbrook 
648fd6ce8f6Sbellard     nb_tbs = 0;
6496a00d601Sbellard 
6506a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
6518a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
6526a00d601Sbellard     }
6539fa3e853Sbellard 
6548a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
655fd6ce8f6Sbellard     page_flush_tb();
6569fa3e853Sbellard 
657fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
658d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
659d4e8164fSbellard        expensive */
660e3db7226Sbellard     tb_flush_count++;
661fd6ce8f6Sbellard }
662fd6ce8f6Sbellard 
663fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
664fd6ce8f6Sbellard 
665bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
666fd6ce8f6Sbellard {
667fd6ce8f6Sbellard     TranslationBlock *tb;
668fd6ce8f6Sbellard     int i;
669fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
67099773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
67199773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
672fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
673fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
6740bf9e31aSBlue Swirl                 printf("ERROR invalidate: address=" TARGET_FMT_lx
6750bf9e31aSBlue Swirl                        " PC=%08lx size=%04x\n",
67699773bd4Spbrook                        address, (long)tb->pc, tb->size);
677fd6ce8f6Sbellard             }
678fd6ce8f6Sbellard         }
679fd6ce8f6Sbellard     }
680fd6ce8f6Sbellard }
681fd6ce8f6Sbellard 
682fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
683fd6ce8f6Sbellard static void tb_page_check(void)
684fd6ce8f6Sbellard {
685fd6ce8f6Sbellard     TranslationBlock *tb;
686fd6ce8f6Sbellard     int i, flags1, flags2;
687fd6ce8f6Sbellard 
68899773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
68999773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
690fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
691fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
692fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
693fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
69499773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
695fd6ce8f6Sbellard             }
696fd6ce8f6Sbellard         }
697fd6ce8f6Sbellard     }
698fd6ce8f6Sbellard }
699fd6ce8f6Sbellard 
700fd6ce8f6Sbellard #endif
701fd6ce8f6Sbellard 
702fd6ce8f6Sbellard /* invalidate one TB */
703fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
704fd6ce8f6Sbellard                              int next_offset)
705fd6ce8f6Sbellard {
706fd6ce8f6Sbellard     TranslationBlock *tb1;
707fd6ce8f6Sbellard     for(;;) {
708fd6ce8f6Sbellard         tb1 = *ptb;
709fd6ce8f6Sbellard         if (tb1 == tb) {
710fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
711fd6ce8f6Sbellard             break;
712fd6ce8f6Sbellard         }
713fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
714fd6ce8f6Sbellard     }
715fd6ce8f6Sbellard }
716fd6ce8f6Sbellard 
7179fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
7189fa3e853Sbellard {
7199fa3e853Sbellard     TranslationBlock *tb1;
7209fa3e853Sbellard     unsigned int n1;
7219fa3e853Sbellard 
7229fa3e853Sbellard     for(;;) {
7239fa3e853Sbellard         tb1 = *ptb;
7249fa3e853Sbellard         n1 = (long)tb1 & 3;
7259fa3e853Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
7269fa3e853Sbellard         if (tb1 == tb) {
7279fa3e853Sbellard             *ptb = tb1->page_next[n1];
7289fa3e853Sbellard             break;
7299fa3e853Sbellard         }
7309fa3e853Sbellard         ptb = &tb1->page_next[n1];
7319fa3e853Sbellard     }
7329fa3e853Sbellard }
7339fa3e853Sbellard 
734d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
735d4e8164fSbellard {
736d4e8164fSbellard     TranslationBlock *tb1, **ptb;
737d4e8164fSbellard     unsigned int n1;
738d4e8164fSbellard 
739d4e8164fSbellard     ptb = &tb->jmp_next[n];
740d4e8164fSbellard     tb1 = *ptb;
741d4e8164fSbellard     if (tb1) {
742d4e8164fSbellard         /* find tb(n) in circular list */
743d4e8164fSbellard         for(;;) {
744d4e8164fSbellard             tb1 = *ptb;
745d4e8164fSbellard             n1 = (long)tb1 & 3;
746d4e8164fSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
747d4e8164fSbellard             if (n1 == n && tb1 == tb)
748d4e8164fSbellard                 break;
749d4e8164fSbellard             if (n1 == 2) {
750d4e8164fSbellard                 ptb = &tb1->jmp_first;
751d4e8164fSbellard             } else {
752d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
753d4e8164fSbellard             }
754d4e8164fSbellard         }
755d4e8164fSbellard         /* now we can suppress tb(n) from the list */
756d4e8164fSbellard         *ptb = tb->jmp_next[n];
757d4e8164fSbellard 
758d4e8164fSbellard         tb->jmp_next[n] = NULL;
759d4e8164fSbellard     }
760d4e8164fSbellard }
761d4e8164fSbellard 
762d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
763d4e8164fSbellard    another TB */
764d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
765d4e8164fSbellard {
766d4e8164fSbellard     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
767d4e8164fSbellard }
768d4e8164fSbellard 
7692e70f6efSpbrook void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
770fd6ce8f6Sbellard {
7716a00d601Sbellard     CPUState *env;
772fd6ce8f6Sbellard     PageDesc *p;
7738a40a180Sbellard     unsigned int h, n1;
774c227f099SAnthony Liguori     target_phys_addr_t phys_pc;
7758a40a180Sbellard     TranslationBlock *tb1, *tb2;
776fd6ce8f6Sbellard 
7779fa3e853Sbellard     /* remove the TB from the hash list */
7789fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
7799fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
7809fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
7819fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
7829fa3e853Sbellard 
7839fa3e853Sbellard     /* remove the TB from the page list */
7849fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
7859fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
7869fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
7879fa3e853Sbellard         invalidate_page_bitmap(p);
7889fa3e853Sbellard     }
7899fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
7909fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
7919fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
7929fa3e853Sbellard         invalidate_page_bitmap(p);
7939fa3e853Sbellard     }
7949fa3e853Sbellard 
7958a40a180Sbellard     tb_invalidated_flag = 1;
7968a40a180Sbellard 
7978a40a180Sbellard     /* remove the TB from the hash list */
7988a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
7996a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
8006a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
8016a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
8026a00d601Sbellard     }
8038a40a180Sbellard 
8048a40a180Sbellard     /* suppress this TB from the two jump lists */
8058a40a180Sbellard     tb_jmp_remove(tb, 0);
8068a40a180Sbellard     tb_jmp_remove(tb, 1);
8078a40a180Sbellard 
8088a40a180Sbellard     /* suppress any remaining jumps to this TB */
8098a40a180Sbellard     tb1 = tb->jmp_first;
8108a40a180Sbellard     for(;;) {
8118a40a180Sbellard         n1 = (long)tb1 & 3;
8128a40a180Sbellard         if (n1 == 2)
8138a40a180Sbellard             break;
8148a40a180Sbellard         tb1 = (TranslationBlock *)((long)tb1 & ~3);
8158a40a180Sbellard         tb2 = tb1->jmp_next[n1];
8168a40a180Sbellard         tb_reset_jump(tb1, n1);
8178a40a180Sbellard         tb1->jmp_next[n1] = NULL;
8188a40a180Sbellard         tb1 = tb2;
8198a40a180Sbellard     }
8208a40a180Sbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
8218a40a180Sbellard 
822e3db7226Sbellard     tb_phys_invalidate_count++;
8239fa3e853Sbellard }
8249fa3e853Sbellard 
8259fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
8269fa3e853Sbellard {
8279fa3e853Sbellard     int end, mask, end1;
8289fa3e853Sbellard 
8299fa3e853Sbellard     end = start + len;
8309fa3e853Sbellard     tab += start >> 3;
8319fa3e853Sbellard     mask = 0xff << (start & 7);
8329fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
8339fa3e853Sbellard         if (start < end) {
8349fa3e853Sbellard             mask &= ~(0xff << (end & 7));
8359fa3e853Sbellard             *tab |= mask;
8369fa3e853Sbellard         }
8379fa3e853Sbellard     } else {
8389fa3e853Sbellard         *tab++ |= mask;
8399fa3e853Sbellard         start = (start + 8) & ~7;
8409fa3e853Sbellard         end1 = end & ~7;
8419fa3e853Sbellard         while (start < end1) {
8429fa3e853Sbellard             *tab++ = 0xff;
8439fa3e853Sbellard             start += 8;
8449fa3e853Sbellard         }
8459fa3e853Sbellard         if (start < end) {
8469fa3e853Sbellard             mask = ~(0xff << (end & 7));
8479fa3e853Sbellard             *tab |= mask;
8489fa3e853Sbellard         }
8499fa3e853Sbellard     }
8509fa3e853Sbellard }
8519fa3e853Sbellard 
8529fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
8539fa3e853Sbellard {
8549fa3e853Sbellard     int n, tb_start, tb_end;
8559fa3e853Sbellard     TranslationBlock *tb;
8569fa3e853Sbellard 
857b2a7081aSpbrook     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
8589fa3e853Sbellard 
8599fa3e853Sbellard     tb = p->first_tb;
8609fa3e853Sbellard     while (tb != NULL) {
8619fa3e853Sbellard         n = (long)tb & 3;
8629fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
8639fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
8649fa3e853Sbellard         if (n == 0) {
8659fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
8669fa3e853Sbellard                it is not a problem */
8679fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
8689fa3e853Sbellard             tb_end = tb_start + tb->size;
8699fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
8709fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
8719fa3e853Sbellard         } else {
8729fa3e853Sbellard             tb_start = 0;
8739fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
8749fa3e853Sbellard         }
8759fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
8769fa3e853Sbellard         tb = tb->page_next[n];
8779fa3e853Sbellard     }
8789fa3e853Sbellard }
8799fa3e853Sbellard 
8802e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env,
8812e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
8822e70f6efSpbrook                               int flags, int cflags)
883d720b93dSbellard {
884d720b93dSbellard     TranslationBlock *tb;
885d720b93dSbellard     uint8_t *tc_ptr;
886d720b93dSbellard     target_ulong phys_pc, phys_page2, virt_page2;
887d720b93dSbellard     int code_gen_size;
888d720b93dSbellard 
889c27004ecSbellard     phys_pc = get_phys_addr_code(env, pc);
890c27004ecSbellard     tb = tb_alloc(pc);
891d720b93dSbellard     if (!tb) {
892d720b93dSbellard         /* flush must be done */
893d720b93dSbellard         tb_flush(env);
894d720b93dSbellard         /* cannot fail at this point */
895c27004ecSbellard         tb = tb_alloc(pc);
8962e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
8972e70f6efSpbrook         tb_invalidated_flag = 1;
898d720b93dSbellard     }
899d720b93dSbellard     tc_ptr = code_gen_ptr;
900d720b93dSbellard     tb->tc_ptr = tc_ptr;
901d720b93dSbellard     tb->cs_base = cs_base;
902d720b93dSbellard     tb->flags = flags;
903d720b93dSbellard     tb->cflags = cflags;
904d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
905d720b93dSbellard     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
906d720b93dSbellard 
907d720b93dSbellard     /* check next page if needed */
908c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
909d720b93dSbellard     phys_page2 = -1;
910c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
911d720b93dSbellard         phys_page2 = get_phys_addr_code(env, virt_page2);
912d720b93dSbellard     }
913d720b93dSbellard     tb_link_phys(tb, phys_pc, phys_page2);
9142e70f6efSpbrook     return tb;
915d720b93dSbellard }
916d720b93dSbellard 
9179fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page
9189fa3e853Sbellard    starting in range [start;end[. NOTE: start and end must refer to
919d720b93dSbellard    the same physical page. 'is_cpu_write_access' should be true if called
920d720b93dSbellard    from a real cpu write access: the virtual CPU will exit the current
921d720b93dSbellard    TB if code is modified inside this TB. */
922c227f099SAnthony Liguori void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
923d720b93dSbellard                                    int is_cpu_write_access)
9249fa3e853Sbellard {
9256b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
926d720b93dSbellard     CPUState *env = cpu_single_env;
9279fa3e853Sbellard     target_ulong tb_start, tb_end;
9286b917547Saliguori     PageDesc *p;
9296b917547Saliguori     int n;
9306b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
9316b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
9326b917547Saliguori     TranslationBlock *current_tb = NULL;
9336b917547Saliguori     int current_tb_modified = 0;
9346b917547Saliguori     target_ulong current_pc = 0;
9356b917547Saliguori     target_ulong current_cs_base = 0;
9366b917547Saliguori     int current_flags = 0;
9376b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
9389fa3e853Sbellard 
9399fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
9409fa3e853Sbellard     if (!p)
9419fa3e853Sbellard         return;
9429fa3e853Sbellard     if (!p->code_bitmap &&
943d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
944d720b93dSbellard         is_cpu_write_access) {
9459fa3e853Sbellard         /* build code bitmap */
9469fa3e853Sbellard         build_page_bitmap(p);
9479fa3e853Sbellard     }
9489fa3e853Sbellard 
9499fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
9509fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
9519fa3e853Sbellard     tb = p->first_tb;
9529fa3e853Sbellard     while (tb != NULL) {
9539fa3e853Sbellard         n = (long)tb & 3;
9549fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
9559fa3e853Sbellard         tb_next = tb->page_next[n];
9569fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
9579fa3e853Sbellard         if (n == 0) {
9589fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
9599fa3e853Sbellard                it is not a problem */
9609fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
9619fa3e853Sbellard             tb_end = tb_start + tb->size;
9629fa3e853Sbellard         } else {
9639fa3e853Sbellard             tb_start = tb->page_addr[1];
9649fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
9659fa3e853Sbellard         }
9669fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
967d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
968d720b93dSbellard             if (current_tb_not_found) {
969d720b93dSbellard                 current_tb_not_found = 0;
970d720b93dSbellard                 current_tb = NULL;
9712e70f6efSpbrook                 if (env->mem_io_pc) {
972d720b93dSbellard                     /* now we have a real cpu fault */
9732e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
974d720b93dSbellard                 }
975d720b93dSbellard             }
976d720b93dSbellard             if (current_tb == tb &&
9772e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
978d720b93dSbellard                 /* If we are modifying the current TB, we must stop
979d720b93dSbellard                 its execution. We could be more precise by checking
980d720b93dSbellard                 that the modification is after the current PC, but it
981d720b93dSbellard                 would require a specialized function to partially
982d720b93dSbellard                 restore the CPU state */
983d720b93dSbellard 
984d720b93dSbellard                 current_tb_modified = 1;
985d720b93dSbellard                 cpu_restore_state(current_tb, env,
9862e70f6efSpbrook                                   env->mem_io_pc, NULL);
9876b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
9886b917547Saliguori                                      &current_flags);
989d720b93dSbellard             }
990d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
9916f5a9f7eSbellard             /* we need to do that to handle the case where a signal
9926f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
9936f5a9f7eSbellard             saved_tb = NULL;
9946f5a9f7eSbellard             if (env) {
995ea1c1802Sbellard                 saved_tb = env->current_tb;
996ea1c1802Sbellard                 env->current_tb = NULL;
9976f5a9f7eSbellard             }
9989fa3e853Sbellard             tb_phys_invalidate(tb, -1);
9996f5a9f7eSbellard             if (env) {
1000ea1c1802Sbellard                 env->current_tb = saved_tb;
1001ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
1002ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
10039fa3e853Sbellard             }
10046f5a9f7eSbellard         }
10059fa3e853Sbellard         tb = tb_next;
10069fa3e853Sbellard     }
10079fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
10089fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
10099fa3e853Sbellard     if (!p->first_tb) {
10109fa3e853Sbellard         invalidate_page_bitmap(p);
1011d720b93dSbellard         if (is_cpu_write_access) {
10122e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1013d720b93dSbellard         }
1014d720b93dSbellard     }
1015d720b93dSbellard #endif
1016d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1017d720b93dSbellard     if (current_tb_modified) {
1018d720b93dSbellard         /* we generate a block containing just the instruction
1019d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1020d720b93dSbellard            itself */
1021ea1c1802Sbellard         env->current_tb = NULL;
10222e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1023d720b93dSbellard         cpu_resume_from_signal(env, NULL);
10249fa3e853Sbellard     }
10259fa3e853Sbellard #endif
10269fa3e853Sbellard }
10279fa3e853Sbellard 
10289fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
1029c227f099SAnthony Liguori static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
10309fa3e853Sbellard {
10319fa3e853Sbellard     PageDesc *p;
10329fa3e853Sbellard     int offset, b;
103359817ccbSbellard #if 0
1034a4193c8aSbellard     if (1) {
103593fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
10362e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1037a4193c8aSbellard                   cpu_single_env->eip,
1038a4193c8aSbellard                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1039a4193c8aSbellard     }
104059817ccbSbellard #endif
10419fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
10429fa3e853Sbellard     if (!p)
10439fa3e853Sbellard         return;
10449fa3e853Sbellard     if (p->code_bitmap) {
10459fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
10469fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
10479fa3e853Sbellard         if (b & ((1 << len) - 1))
10489fa3e853Sbellard             goto do_invalidate;
10499fa3e853Sbellard     } else {
10509fa3e853Sbellard     do_invalidate:
1051d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
10529fa3e853Sbellard     }
10539fa3e853Sbellard }
10549fa3e853Sbellard 
10559fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
1056c227f099SAnthony Liguori static void tb_invalidate_phys_page(target_phys_addr_t addr,
1057d720b93dSbellard                                     unsigned long pc, void *puc)
10589fa3e853Sbellard {
10596b917547Saliguori     TranslationBlock *tb;
10609fa3e853Sbellard     PageDesc *p;
10616b917547Saliguori     int n;
1062d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
10636b917547Saliguori     TranslationBlock *current_tb = NULL;
1064d720b93dSbellard     CPUState *env = cpu_single_env;
10656b917547Saliguori     int current_tb_modified = 0;
10666b917547Saliguori     target_ulong current_pc = 0;
10676b917547Saliguori     target_ulong current_cs_base = 0;
10686b917547Saliguori     int current_flags = 0;
1069d720b93dSbellard #endif
10709fa3e853Sbellard 
10719fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
10729fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1073fd6ce8f6Sbellard     if (!p)
1074fd6ce8f6Sbellard         return;
1075fd6ce8f6Sbellard     tb = p->first_tb;
1076d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1077d720b93dSbellard     if (tb && pc != 0) {
1078d720b93dSbellard         current_tb = tb_find_pc(pc);
1079d720b93dSbellard     }
1080d720b93dSbellard #endif
1081fd6ce8f6Sbellard     while (tb != NULL) {
10829fa3e853Sbellard         n = (long)tb & 3;
10839fa3e853Sbellard         tb = (TranslationBlock *)((long)tb & ~3);
1084d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1085d720b93dSbellard         if (current_tb == tb &&
10862e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1087d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1088d720b93dSbellard                    its execution. We could be more precise by checking
1089d720b93dSbellard                    that the modification is after the current PC, but it
1090d720b93dSbellard                    would require a specialized function to partially
1091d720b93dSbellard                    restore the CPU state */
1092d720b93dSbellard 
1093d720b93dSbellard             current_tb_modified = 1;
1094d720b93dSbellard             cpu_restore_state(current_tb, env, pc, puc);
10956b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
10966b917547Saliguori                                  &current_flags);
1097d720b93dSbellard         }
1098d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
10999fa3e853Sbellard         tb_phys_invalidate(tb, addr);
11009fa3e853Sbellard         tb = tb->page_next[n];
1101fd6ce8f6Sbellard     }
1102fd6ce8f6Sbellard     p->first_tb = NULL;
1103d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1104d720b93dSbellard     if (current_tb_modified) {
1105d720b93dSbellard         /* we generate a block containing just the instruction
1106d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1107d720b93dSbellard            itself */
1108ea1c1802Sbellard         env->current_tb = NULL;
11092e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1110d720b93dSbellard         cpu_resume_from_signal(env, puc);
1111d720b93dSbellard     }
1112d720b93dSbellard #endif
1113fd6ce8f6Sbellard }
11149fa3e853Sbellard #endif
1115fd6ce8f6Sbellard 
1116fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
11179fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
111853a5960aSpbrook                                  unsigned int n, target_ulong page_addr)
1119fd6ce8f6Sbellard {
1120fd6ce8f6Sbellard     PageDesc *p;
11219fa3e853Sbellard     TranslationBlock *last_first_tb;
11229fa3e853Sbellard 
11239fa3e853Sbellard     tb->page_addr[n] = page_addr;
11243a7d929eSbellard     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
11259fa3e853Sbellard     tb->page_next[n] = p->first_tb;
11269fa3e853Sbellard     last_first_tb = p->first_tb;
11279fa3e853Sbellard     p->first_tb = (TranslationBlock *)((long)tb | n);
11289fa3e853Sbellard     invalidate_page_bitmap(p);
11299fa3e853Sbellard 
1130107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1131d720b93dSbellard 
11329fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
11339fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
113453a5960aSpbrook         target_ulong addr;
113553a5960aSpbrook         PageDesc *p2;
1136fd6ce8f6Sbellard         int prot;
1137fd6ce8f6Sbellard 
1138fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1139fd6ce8f6Sbellard            page fault + mprotect overhead) */
114053a5960aSpbrook         page_addr &= qemu_host_page_mask;
1141fd6ce8f6Sbellard         prot = 0;
114253a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
114353a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
114453a5960aSpbrook 
114553a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
114653a5960aSpbrook             if (!p2)
114753a5960aSpbrook                 continue;
114853a5960aSpbrook             prot |= p2->flags;
114953a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
115053a5960aSpbrook             page_get_flags(addr);
115153a5960aSpbrook           }
115253a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1153fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1154fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1155ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
115653a5960aSpbrook                page_addr);
1157fd6ce8f6Sbellard #endif
1158fd6ce8f6Sbellard     }
11599fa3e853Sbellard #else
11609fa3e853Sbellard     /* if some code is already present, then the pages are already
11619fa3e853Sbellard        protected. So we handle the case where only the first TB is
11629fa3e853Sbellard        allocated in a physical page */
11639fa3e853Sbellard     if (!last_first_tb) {
11646a00d601Sbellard         tlb_protect_code(page_addr);
11659fa3e853Sbellard     }
11669fa3e853Sbellard #endif
1167d720b93dSbellard 
1168d720b93dSbellard #endif /* TARGET_HAS_SMC */
1169fd6ce8f6Sbellard }
1170fd6ce8f6Sbellard 
1171fd6ce8f6Sbellard /* Allocate a new translation block. Flush the translation buffer if
1172fd6ce8f6Sbellard    too many translation blocks or too much generated code. */
1173c27004ecSbellard TranslationBlock *tb_alloc(target_ulong pc)
1174fd6ce8f6Sbellard {
1175fd6ce8f6Sbellard     TranslationBlock *tb;
1176fd6ce8f6Sbellard 
117726a5f13bSbellard     if (nb_tbs >= code_gen_max_blocks ||
117826a5f13bSbellard         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1179d4e8164fSbellard         return NULL;
1180fd6ce8f6Sbellard     tb = &tbs[nb_tbs++];
1181fd6ce8f6Sbellard     tb->pc = pc;
1182b448f2f3Sbellard     tb->cflags = 0;
1183d4e8164fSbellard     return tb;
1184d4e8164fSbellard }
1185d4e8164fSbellard 
11862e70f6efSpbrook void tb_free(TranslationBlock *tb)
11872e70f6efSpbrook {
1188bf20dc07Sths     /* In practice this is mostly used for single use temporary TB
11892e70f6efSpbrook        Ignore the hard cases and just back up if this TB happens to
11902e70f6efSpbrook        be the last one generated.  */
11912e70f6efSpbrook     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
11922e70f6efSpbrook         code_gen_ptr = tb->tc_ptr;
11932e70f6efSpbrook         nb_tbs--;
11942e70f6efSpbrook     }
11952e70f6efSpbrook }
11962e70f6efSpbrook 
11979fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
11989fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
11999fa3e853Sbellard void tb_link_phys(TranslationBlock *tb,
12009fa3e853Sbellard                   target_ulong phys_pc, target_ulong phys_page2)
1201d4e8164fSbellard {
12029fa3e853Sbellard     unsigned int h;
12039fa3e853Sbellard     TranslationBlock **ptb;
12049fa3e853Sbellard 
1205c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1206c8a706feSpbrook        before we are done.  */
1207c8a706feSpbrook     mmap_lock();
12089fa3e853Sbellard     /* add in the physical hash table */
12099fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
12109fa3e853Sbellard     ptb = &tb_phys_hash[h];
12119fa3e853Sbellard     tb->phys_hash_next = *ptb;
12129fa3e853Sbellard     *ptb = tb;
1213fd6ce8f6Sbellard 
1214fd6ce8f6Sbellard     /* add in the page list */
12159fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
12169fa3e853Sbellard     if (phys_page2 != -1)
12179fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
12189fa3e853Sbellard     else
12199fa3e853Sbellard         tb->page_addr[1] = -1;
12209fa3e853Sbellard 
1221d4e8164fSbellard     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1222d4e8164fSbellard     tb->jmp_next[0] = NULL;
1223d4e8164fSbellard     tb->jmp_next[1] = NULL;
1224d4e8164fSbellard 
1225d4e8164fSbellard     /* init original jump addresses */
1226d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1227d4e8164fSbellard         tb_reset_jump(tb, 0);
1228d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1229d4e8164fSbellard         tb_reset_jump(tb, 1);
12308a40a180Sbellard 
12318a40a180Sbellard #ifdef DEBUG_TB_CHECK
12328a40a180Sbellard     tb_page_check();
12338a40a180Sbellard #endif
1234c8a706feSpbrook     mmap_unlock();
1235fd6ce8f6Sbellard }
1236fd6ce8f6Sbellard 
1237a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1238a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
1239a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1240a513fe19Sbellard {
1241a513fe19Sbellard     int m_min, m_max, m;
1242a513fe19Sbellard     unsigned long v;
1243a513fe19Sbellard     TranslationBlock *tb;
1244a513fe19Sbellard 
1245a513fe19Sbellard     if (nb_tbs <= 0)
1246a513fe19Sbellard         return NULL;
1247a513fe19Sbellard     if (tc_ptr < (unsigned long)code_gen_buffer ||
1248a513fe19Sbellard         tc_ptr >= (unsigned long)code_gen_ptr)
1249a513fe19Sbellard         return NULL;
1250a513fe19Sbellard     /* binary search (cf Knuth) */
1251a513fe19Sbellard     m_min = 0;
1252a513fe19Sbellard     m_max = nb_tbs - 1;
1253a513fe19Sbellard     while (m_min <= m_max) {
1254a513fe19Sbellard         m = (m_min + m_max) >> 1;
1255a513fe19Sbellard         tb = &tbs[m];
1256a513fe19Sbellard         v = (unsigned long)tb->tc_ptr;
1257a513fe19Sbellard         if (v == tc_ptr)
1258a513fe19Sbellard             return tb;
1259a513fe19Sbellard         else if (tc_ptr < v) {
1260a513fe19Sbellard             m_max = m - 1;
1261a513fe19Sbellard         } else {
1262a513fe19Sbellard             m_min = m + 1;
1263a513fe19Sbellard         }
1264a513fe19Sbellard     }
1265a513fe19Sbellard     return &tbs[m_max];
1266a513fe19Sbellard }
12677501267eSbellard 
1268ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1269ea041c0eSbellard 
1270ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1271ea041c0eSbellard {
1272ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1273ea041c0eSbellard     unsigned int n1;
1274ea041c0eSbellard 
1275ea041c0eSbellard     tb1 = tb->jmp_next[n];
1276ea041c0eSbellard     if (tb1 != NULL) {
1277ea041c0eSbellard         /* find head of list */
1278ea041c0eSbellard         for(;;) {
1279ea041c0eSbellard             n1 = (long)tb1 & 3;
1280ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281ea041c0eSbellard             if (n1 == 2)
1282ea041c0eSbellard                 break;
1283ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1284ea041c0eSbellard         }
1285ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1286ea041c0eSbellard         tb_next = tb1;
1287ea041c0eSbellard 
1288ea041c0eSbellard         /* remove tb from the jmp_first list */
1289ea041c0eSbellard         ptb = &tb_next->jmp_first;
1290ea041c0eSbellard         for(;;) {
1291ea041c0eSbellard             tb1 = *ptb;
1292ea041c0eSbellard             n1 = (long)tb1 & 3;
1293ea041c0eSbellard             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1294ea041c0eSbellard             if (n1 == n && tb1 == tb)
1295ea041c0eSbellard                 break;
1296ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1297ea041c0eSbellard         }
1298ea041c0eSbellard         *ptb = tb->jmp_next[n];
1299ea041c0eSbellard         tb->jmp_next[n] = NULL;
1300ea041c0eSbellard 
1301ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1302ea041c0eSbellard         tb_reset_jump(tb, n);
1303ea041c0eSbellard 
13040124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1305ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1306ea041c0eSbellard     }
1307ea041c0eSbellard }
1308ea041c0eSbellard 
1309ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1310ea041c0eSbellard {
1311ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1312ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1313ea041c0eSbellard }
1314ea041c0eSbellard 
13151fddef4bSbellard #if defined(TARGET_HAS_ICE)
1316d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1317d720b93dSbellard {
1318c227f099SAnthony Liguori     target_phys_addr_t addr;
13199b3c35e0Sj_mayer     target_ulong pd;
1320c227f099SAnthony Liguori     ram_addr_t ram_addr;
1321c2f07f81Spbrook     PhysPageDesc *p;
1322d720b93dSbellard 
1323c2f07f81Spbrook     addr = cpu_get_phys_page_debug(env, pc);
1324c2f07f81Spbrook     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1325c2f07f81Spbrook     if (!p) {
1326c2f07f81Spbrook         pd = IO_MEM_UNASSIGNED;
1327c2f07f81Spbrook     } else {
1328c2f07f81Spbrook         pd = p->phys_offset;
1329c2f07f81Spbrook     }
1330c2f07f81Spbrook     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1331706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1332d720b93dSbellard }
1333c27004ecSbellard #endif
1334d720b93dSbellard 
13356658ffb8Spbrook /* Add a watchpoint.  */
1336a1d1bb31Saliguori int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1337a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
13386658ffb8Spbrook {
1339b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1340c0ce998eSaliguori     CPUWatchpoint *wp;
13416658ffb8Spbrook 
1342b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1343b4051334Saliguori     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1344b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1345b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1346b4051334Saliguori         return -EINVAL;
1347b4051334Saliguori     }
1348a1d1bb31Saliguori     wp = qemu_malloc(sizeof(*wp));
13496658ffb8Spbrook 
1350a1d1bb31Saliguori     wp->vaddr = addr;
1351b4051334Saliguori     wp->len_mask = len_mask;
1352a1d1bb31Saliguori     wp->flags = flags;
1353a1d1bb31Saliguori 
13542dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1355c0ce998eSaliguori     if (flags & BP_GDB)
135672cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1357c0ce998eSaliguori     else
135872cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1359a1d1bb31Saliguori 
13606658ffb8Spbrook     tlb_flush_page(env, addr);
1361a1d1bb31Saliguori 
1362a1d1bb31Saliguori     if (watchpoint)
1363a1d1bb31Saliguori         *watchpoint = wp;
1364a1d1bb31Saliguori     return 0;
13656658ffb8Spbrook }
13666658ffb8Spbrook 
1367a1d1bb31Saliguori /* Remove a specific watchpoint.  */
1368a1d1bb31Saliguori int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1369a1d1bb31Saliguori                           int flags)
13706658ffb8Spbrook {
1371b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1372a1d1bb31Saliguori     CPUWatchpoint *wp;
13736658ffb8Spbrook 
137472cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1375b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
13766e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1377a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
13786658ffb8Spbrook             return 0;
13796658ffb8Spbrook         }
13806658ffb8Spbrook     }
1381a1d1bb31Saliguori     return -ENOENT;
13826658ffb8Spbrook }
13836658ffb8Spbrook 
1384a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
1385a1d1bb31Saliguori void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1386a1d1bb31Saliguori {
138772cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
13887d03f82fSedgar_igl 
1389a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1390a1d1bb31Saliguori 
1391a1d1bb31Saliguori     qemu_free(watchpoint);
13927d03f82fSedgar_igl }
13937d03f82fSedgar_igl 
1394a1d1bb31Saliguori /* Remove all matching watchpoints.  */
1395a1d1bb31Saliguori void cpu_watchpoint_remove_all(CPUState *env, int mask)
1396a1d1bb31Saliguori {
1397c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1398a1d1bb31Saliguori 
139972cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1400a1d1bb31Saliguori         if (wp->flags & mask)
1401a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1402a1d1bb31Saliguori     }
1403c0ce998eSaliguori }
1404a1d1bb31Saliguori 
1405a1d1bb31Saliguori /* Add a breakpoint.  */
1406a1d1bb31Saliguori int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1407a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
14084c3a88a2Sbellard {
14091fddef4bSbellard #if defined(TARGET_HAS_ICE)
1410c0ce998eSaliguori     CPUBreakpoint *bp;
14114c3a88a2Sbellard 
1412a1d1bb31Saliguori     bp = qemu_malloc(sizeof(*bp));
14134c3a88a2Sbellard 
1414a1d1bb31Saliguori     bp->pc = pc;
1415a1d1bb31Saliguori     bp->flags = flags;
1416a1d1bb31Saliguori 
14172dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1418c0ce998eSaliguori     if (flags & BP_GDB)
141972cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1420c0ce998eSaliguori     else
142172cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1422d720b93dSbellard 
1423d720b93dSbellard     breakpoint_invalidate(env, pc);
1424a1d1bb31Saliguori 
1425a1d1bb31Saliguori     if (breakpoint)
1426a1d1bb31Saliguori         *breakpoint = bp;
14274c3a88a2Sbellard     return 0;
14284c3a88a2Sbellard #else
1429a1d1bb31Saliguori     return -ENOSYS;
14304c3a88a2Sbellard #endif
14314c3a88a2Sbellard }
14324c3a88a2Sbellard 
1433a1d1bb31Saliguori /* Remove a specific breakpoint.  */
1434a1d1bb31Saliguori int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1435a1d1bb31Saliguori {
14367d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1437a1d1bb31Saliguori     CPUBreakpoint *bp;
1438a1d1bb31Saliguori 
143972cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1440a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1441a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1442a1d1bb31Saliguori             return 0;
14437d03f82fSedgar_igl         }
1444a1d1bb31Saliguori     }
1445a1d1bb31Saliguori     return -ENOENT;
1446a1d1bb31Saliguori #else
1447a1d1bb31Saliguori     return -ENOSYS;
14487d03f82fSedgar_igl #endif
14497d03f82fSedgar_igl }
14507d03f82fSedgar_igl 
1451a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
1452a1d1bb31Saliguori void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
14534c3a88a2Sbellard {
14541fddef4bSbellard #if defined(TARGET_HAS_ICE)
145572cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1456d720b93dSbellard 
1457a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1458a1d1bb31Saliguori 
1459a1d1bb31Saliguori     qemu_free(breakpoint);
1460a1d1bb31Saliguori #endif
1461a1d1bb31Saliguori }
1462a1d1bb31Saliguori 
1463a1d1bb31Saliguori /* Remove all matching breakpoints. */
1464a1d1bb31Saliguori void cpu_breakpoint_remove_all(CPUState *env, int mask)
1465a1d1bb31Saliguori {
1466a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1467c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1468a1d1bb31Saliguori 
146972cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1470a1d1bb31Saliguori         if (bp->flags & mask)
1471a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1472c0ce998eSaliguori     }
14734c3a88a2Sbellard #endif
14744c3a88a2Sbellard }
14754c3a88a2Sbellard 
1476c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1477c33a346eSbellard    CPU loop after each instruction */
1478c33a346eSbellard void cpu_single_step(CPUState *env, int enabled)
1479c33a346eSbellard {
14801fddef4bSbellard #if defined(TARGET_HAS_ICE)
1481c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1482c33a346eSbellard         env->singlestep_enabled = enabled;
1483e22a25c9Saliguori         if (kvm_enabled())
1484e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
1485e22a25c9Saliguori         else {
1486ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
14879fa3e853Sbellard             /* XXX: only flush what is necessary */
14880124311eSbellard             tb_flush(env);
1489c33a346eSbellard         }
1490e22a25c9Saliguori     }
1491c33a346eSbellard #endif
1492c33a346eSbellard }
1493c33a346eSbellard 
149434865134Sbellard /* enable or disable low levels log */
149534865134Sbellard void cpu_set_log(int log_flags)
149634865134Sbellard {
149734865134Sbellard     loglevel = log_flags;
149834865134Sbellard     if (loglevel && !logfile) {
149911fcfab4Spbrook         logfile = fopen(logfilename, log_append ? "a" : "w");
150034865134Sbellard         if (!logfile) {
150134865134Sbellard             perror(logfilename);
150234865134Sbellard             _exit(1);
150334865134Sbellard         }
15049fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
15059fa3e853Sbellard         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
15069fa3e853Sbellard         {
1507b55266b5Sblueswir1             static char logfile_buf[4096];
15089fa3e853Sbellard             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
15099fa3e853Sbellard         }
1510bf65f53fSFilip Navara #elif !defined(_WIN32)
1511bf65f53fSFilip Navara         /* Win32 doesn't support line-buffering and requires size >= 2 */
151234865134Sbellard         setvbuf(logfile, NULL, _IOLBF, 0);
15139fa3e853Sbellard #endif
1514e735b91cSpbrook         log_append = 1;
1515e735b91cSpbrook     }
1516e735b91cSpbrook     if (!loglevel && logfile) {
1517e735b91cSpbrook         fclose(logfile);
1518e735b91cSpbrook         logfile = NULL;
151934865134Sbellard     }
152034865134Sbellard }
152134865134Sbellard 
152234865134Sbellard void cpu_set_log_filename(const char *filename)
152334865134Sbellard {
152434865134Sbellard     logfilename = strdup(filename);
1525e735b91cSpbrook     if (logfile) {
1526e735b91cSpbrook         fclose(logfile);
1527e735b91cSpbrook         logfile = NULL;
1528e735b91cSpbrook     }
1529e735b91cSpbrook     cpu_set_log(loglevel);
153034865134Sbellard }
1531c33a346eSbellard 
15323098dba0Saurel32 static void cpu_unlink_tb(CPUState *env)
1533ea041c0eSbellard {
1534d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1535d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1536d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1537d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
15383098dba0Saurel32     TranslationBlock *tb;
1539c227f099SAnthony Liguori     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
15403098dba0Saurel32 
1541cab1b4bdSRiku Voipio     spin_lock(&interrupt_lock);
15423098dba0Saurel32     tb = env->current_tb;
15433098dba0Saurel32     /* if the cpu is currently executing code, we must unlink it and
15443098dba0Saurel32        all the potentially executing TB */
1545f76cfe56SRiku Voipio     if (tb) {
15463098dba0Saurel32         env->current_tb = NULL;
15473098dba0Saurel32         tb_reset_jump_recursive(tb);
15483098dba0Saurel32     }
1549cab1b4bdSRiku Voipio     spin_unlock(&interrupt_lock);
15503098dba0Saurel32 }
15513098dba0Saurel32 
15523098dba0Saurel32 /* mask must never be zero, except for A20 change call */
15533098dba0Saurel32 void cpu_interrupt(CPUState *env, int mask)
15543098dba0Saurel32 {
15553098dba0Saurel32     int old_mask;
15563098dba0Saurel32 
15573098dba0Saurel32     old_mask = env->interrupt_request;
15583098dba0Saurel32     env->interrupt_request |= mask;
15593098dba0Saurel32 
15608edac960Saliguori #ifndef CONFIG_USER_ONLY
15618edac960Saliguori     /*
15628edac960Saliguori      * If called from iothread context, wake the target cpu in
15638edac960Saliguori      * case its halted.
15648edac960Saliguori      */
15658edac960Saliguori     if (!qemu_cpu_self(env)) {
15668edac960Saliguori         qemu_cpu_kick(env);
15678edac960Saliguori         return;
15688edac960Saliguori     }
15698edac960Saliguori #endif
15708edac960Saliguori 
15712e70f6efSpbrook     if (use_icount) {
1572266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
15732e70f6efSpbrook #ifndef CONFIG_USER_ONLY
15742e70f6efSpbrook         if (!can_do_io(env)
1575be214e6cSaurel32             && (mask & ~old_mask) != 0) {
15762e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
15772e70f6efSpbrook         }
15782e70f6efSpbrook #endif
15792e70f6efSpbrook     } else {
15803098dba0Saurel32         cpu_unlink_tb(env);
1581ea041c0eSbellard     }
15822e70f6efSpbrook }
1583ea041c0eSbellard 
1584b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask)
1585b54ad049Sbellard {
1586b54ad049Sbellard     env->interrupt_request &= ~mask;
1587b54ad049Sbellard }
1588b54ad049Sbellard 
15893098dba0Saurel32 void cpu_exit(CPUState *env)
15903098dba0Saurel32 {
15913098dba0Saurel32     env->exit_request = 1;
15923098dba0Saurel32     cpu_unlink_tb(env);
15933098dba0Saurel32 }
15943098dba0Saurel32 
1595c7cd6a37Sblueswir1 const CPULogItem cpu_log_items[] = {
1596f193c797Sbellard     { CPU_LOG_TB_OUT_ASM, "out_asm",
1597f193c797Sbellard       "show generated host assembly code for each compiled TB" },
1598f193c797Sbellard     { CPU_LOG_TB_IN_ASM, "in_asm",
1599f193c797Sbellard       "show target assembly code for each compiled TB" },
1600f193c797Sbellard     { CPU_LOG_TB_OP, "op",
160157fec1feSbellard       "show micro ops for each compiled TB" },
1602f193c797Sbellard     { CPU_LOG_TB_OP_OPT, "op_opt",
1603e01a1157Sblueswir1       "show micro ops "
1604e01a1157Sblueswir1 #ifdef TARGET_I386
1605e01a1157Sblueswir1       "before eflags optimization and "
1606f193c797Sbellard #endif
1607e01a1157Sblueswir1       "after liveness analysis" },
1608f193c797Sbellard     { CPU_LOG_INT, "int",
1609f193c797Sbellard       "show interrupts/exceptions in short format" },
1610f193c797Sbellard     { CPU_LOG_EXEC, "exec",
1611f193c797Sbellard       "show trace before each executed TB (lots of logs)" },
16129fddaa0cSbellard     { CPU_LOG_TB_CPU, "cpu",
1613e91c8a77Sths       "show CPU state before block translation" },
1614f193c797Sbellard #ifdef TARGET_I386
1615f193c797Sbellard     { CPU_LOG_PCALL, "pcall",
1616f193c797Sbellard       "show protected mode far calls/returns/exceptions" },
1617eca1bdf4Saliguori     { CPU_LOG_RESET, "cpu_reset",
1618eca1bdf4Saliguori       "show CPU state before CPU resets" },
1619f193c797Sbellard #endif
16208e3a9fd2Sbellard #ifdef DEBUG_IOPORT
1621fd872598Sbellard     { CPU_LOG_IOPORT, "ioport",
1622fd872598Sbellard       "show all i/o ports accesses" },
16238e3a9fd2Sbellard #endif
1624f193c797Sbellard     { 0, NULL, NULL },
1625f193c797Sbellard };
1626f193c797Sbellard 
1627f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2)
1628f193c797Sbellard {
1629f193c797Sbellard     if (strlen(s2) != n)
1630f193c797Sbellard         return 0;
1631f193c797Sbellard     return memcmp(s1, s2, n) == 0;
1632f193c797Sbellard }
1633f193c797Sbellard 
1634f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */
1635f193c797Sbellard int cpu_str_to_log_mask(const char *str)
1636f193c797Sbellard {
1637c7cd6a37Sblueswir1     const CPULogItem *item;
1638f193c797Sbellard     int mask;
1639f193c797Sbellard     const char *p, *p1;
1640f193c797Sbellard 
1641f193c797Sbellard     p = str;
1642f193c797Sbellard     mask = 0;
1643f193c797Sbellard     for(;;) {
1644f193c797Sbellard         p1 = strchr(p, ',');
1645f193c797Sbellard         if (!p1)
1646f193c797Sbellard             p1 = p + strlen(p);
16478e3a9fd2Sbellard 	if(cmp1(p,p1-p,"all")) {
16488e3a9fd2Sbellard 		for(item = cpu_log_items; item->mask != 0; item++) {
16498e3a9fd2Sbellard 			mask |= item->mask;
16508e3a9fd2Sbellard 		}
16518e3a9fd2Sbellard 	} else {
1652f193c797Sbellard         for(item = cpu_log_items; item->mask != 0; item++) {
1653f193c797Sbellard             if (cmp1(p, p1 - p, item->name))
1654f193c797Sbellard                 goto found;
1655f193c797Sbellard         }
1656f193c797Sbellard         return 0;
16578e3a9fd2Sbellard 	}
1658f193c797Sbellard     found:
1659f193c797Sbellard         mask |= item->mask;
1660f193c797Sbellard         if (*p1 != ',')
1661f193c797Sbellard             break;
1662f193c797Sbellard         p = p1 + 1;
1663f193c797Sbellard     }
1664f193c797Sbellard     return mask;
1665f193c797Sbellard }
1666ea041c0eSbellard 
16677501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...)
16687501267eSbellard {
16697501267eSbellard     va_list ap;
1670493ae1f0Spbrook     va_list ap2;
16717501267eSbellard 
16727501267eSbellard     va_start(ap, fmt);
1673493ae1f0Spbrook     va_copy(ap2, ap);
16747501267eSbellard     fprintf(stderr, "qemu: fatal: ");
16757501267eSbellard     vfprintf(stderr, fmt, ap);
16767501267eSbellard     fprintf(stderr, "\n");
16777501267eSbellard #ifdef TARGET_I386
16787fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
16797fe48483Sbellard #else
16807fe48483Sbellard     cpu_dump_state(env, stderr, fprintf, 0);
16817501267eSbellard #endif
168293fcfe39Saliguori     if (qemu_log_enabled()) {
168393fcfe39Saliguori         qemu_log("qemu: fatal: ");
168493fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
168593fcfe39Saliguori         qemu_log("\n");
1686f9373291Sj_mayer #ifdef TARGET_I386
168793fcfe39Saliguori         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1688f9373291Sj_mayer #else
168993fcfe39Saliguori         log_cpu_state(env, 0);
1690f9373291Sj_mayer #endif
169131b1a7b4Saliguori         qemu_log_flush();
169293fcfe39Saliguori         qemu_log_close();
1693924edcaeSbalrog     }
1694493ae1f0Spbrook     va_end(ap2);
1695f9373291Sj_mayer     va_end(ap);
1696fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
1697fd052bf6SRiku Voipio     {
1698fd052bf6SRiku Voipio         struct sigaction act;
1699fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
1700fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
1701fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
1702fd052bf6SRiku Voipio     }
1703fd052bf6SRiku Voipio #endif
17047501267eSbellard     abort();
17057501267eSbellard }
17067501267eSbellard 
1707c5be9f08Sths CPUState *cpu_copy(CPUState *env)
1708c5be9f08Sths {
170901ba9816Sths     CPUState *new_env = cpu_init(env->cpu_model_str);
1710c5be9f08Sths     CPUState *next_cpu = new_env->next_cpu;
1711c5be9f08Sths     int cpu_index = new_env->cpu_index;
17125a38f081Saliguori #if defined(TARGET_HAS_ICE)
17135a38f081Saliguori     CPUBreakpoint *bp;
17145a38f081Saliguori     CPUWatchpoint *wp;
17155a38f081Saliguori #endif
17165a38f081Saliguori 
1717c5be9f08Sths     memcpy(new_env, env, sizeof(CPUState));
17185a38f081Saliguori 
17195a38f081Saliguori     /* Preserve chaining and index. */
1720c5be9f08Sths     new_env->next_cpu = next_cpu;
1721c5be9f08Sths     new_env->cpu_index = cpu_index;
17225a38f081Saliguori 
17235a38f081Saliguori     /* Clone all break/watchpoints.
17245a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
17255a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
172672cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
172772cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
17285a38f081Saliguori #if defined(TARGET_HAS_ICE)
172972cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
17305a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
17315a38f081Saliguori     }
173272cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
17335a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
17345a38f081Saliguori                               wp->flags, NULL);
17355a38f081Saliguori     }
17365a38f081Saliguori #endif
17375a38f081Saliguori 
1738c5be9f08Sths     return new_env;
1739c5be9f08Sths }
1740c5be9f08Sths 
17410124311eSbellard #if !defined(CONFIG_USER_ONLY)
17420124311eSbellard 
17435c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
17445c751e99Sedgar_igl {
17455c751e99Sedgar_igl     unsigned int i;
17465c751e99Sedgar_igl 
17475c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
17485c751e99Sedgar_igl        overlap the flushed page.  */
17495c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
17505c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
17515c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
17525c751e99Sedgar_igl 
17535c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
17545c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
17555c751e99Sedgar_igl 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
17565c751e99Sedgar_igl }
17575c751e99Sedgar_igl 
175808738984SIgor Kovalenko static CPUTLBEntry s_cputlb_empty_entry = {
175908738984SIgor Kovalenko     .addr_read  = -1,
176008738984SIgor Kovalenko     .addr_write = -1,
176108738984SIgor Kovalenko     .addr_code  = -1,
176208738984SIgor Kovalenko     .addend     = -1,
176308738984SIgor Kovalenko };
176408738984SIgor Kovalenko 
1765ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not
1766ee8b7021Sbellard    implemented yet) */
1767ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
176833417e70Sbellard {
176933417e70Sbellard     int i;
17700124311eSbellard 
17719fa3e853Sbellard #if defined(DEBUG_TLB)
17729fa3e853Sbellard     printf("tlb_flush:\n");
17739fa3e853Sbellard #endif
17740124311eSbellard     /* must reset current TB so that interrupts cannot modify the
17750124311eSbellard        links while we are modifying them */
17760124311eSbellard     env->current_tb = NULL;
17770124311eSbellard 
177833417e70Sbellard     for(i = 0; i < CPU_TLB_SIZE; i++) {
1779cfde4bd9SIsaku Yamahata         int mmu_idx;
1780cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
178108738984SIgor Kovalenko             env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1782cfde4bd9SIsaku Yamahata         }
178333417e70Sbellard     }
17849fa3e853Sbellard 
17858a40a180Sbellard     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
17869fa3e853Sbellard 
1787e3db7226Sbellard     tlb_flush_count++;
178833417e70Sbellard }
178933417e70Sbellard 
1790274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
179161382a50Sbellard {
179284b7b8e7Sbellard     if (addr == (tlb_entry->addr_read &
179384b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
179484b7b8e7Sbellard         addr == (tlb_entry->addr_write &
179584b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
179684b7b8e7Sbellard         addr == (tlb_entry->addr_code &
179784b7b8e7Sbellard                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
179808738984SIgor Kovalenko         *tlb_entry = s_cputlb_empty_entry;
179984b7b8e7Sbellard     }
180061382a50Sbellard }
180161382a50Sbellard 
18022e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
180333417e70Sbellard {
18048a40a180Sbellard     int i;
1805cfde4bd9SIsaku Yamahata     int mmu_idx;
18060124311eSbellard 
18079fa3e853Sbellard #if defined(DEBUG_TLB)
1808108c49b8Sbellard     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
18099fa3e853Sbellard #endif
18100124311eSbellard     /* must reset current TB so that interrupts cannot modify the
18110124311eSbellard        links while we are modifying them */
18120124311eSbellard     env->current_tb = NULL;
181333417e70Sbellard 
181461382a50Sbellard     addr &= TARGET_PAGE_MASK;
181533417e70Sbellard     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1816cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1817cfde4bd9SIsaku Yamahata         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
18180124311eSbellard 
18195c751e99Sedgar_igl     tlb_flush_jmp_cache(env, addr);
18209fa3e853Sbellard }
18219fa3e853Sbellard 
18229fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr'
18239fa3e853Sbellard    can be detected */
1824c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr)
182561382a50Sbellard {
18266a00d601Sbellard     cpu_physical_memory_reset_dirty(ram_addr,
18276a00d601Sbellard                                     ram_addr + TARGET_PAGE_SIZE,
18286a00d601Sbellard                                     CODE_DIRTY_FLAG);
18299fa3e853Sbellard }
18309fa3e853Sbellard 
18319fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer
18323a7d929eSbellard    tested for self modifying code */
1833c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
18343a7d929eSbellard                                     target_ulong vaddr)
18359fa3e853Sbellard {
18363a7d929eSbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
18379fa3e853Sbellard }
18389fa3e853Sbellard 
18391ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
18401ccde1cbSbellard                                          unsigned long start, unsigned long length)
18411ccde1cbSbellard {
18421ccde1cbSbellard     unsigned long addr;
184384b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
184484b7b8e7Sbellard         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
18451ccde1cbSbellard         if ((addr - start) < length) {
18460f459d16Spbrook             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
18471ccde1cbSbellard         }
18481ccde1cbSbellard     }
18491ccde1cbSbellard }
18501ccde1cbSbellard 
18515579c7f3Spbrook /* Note: start and end must be within the same ram block.  */
1852c227f099SAnthony Liguori void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
18530a962c02Sbellard                                      int dirty_flags)
18541ccde1cbSbellard {
18551ccde1cbSbellard     CPUState *env;
18564f2ac237Sbellard     unsigned long length, start1;
18570a962c02Sbellard     int i, mask, len;
18580a962c02Sbellard     uint8_t *p;
18591ccde1cbSbellard 
18601ccde1cbSbellard     start &= TARGET_PAGE_MASK;
18611ccde1cbSbellard     end = TARGET_PAGE_ALIGN(end);
18621ccde1cbSbellard 
18631ccde1cbSbellard     length = end - start;
18641ccde1cbSbellard     if (length == 0)
18651ccde1cbSbellard         return;
18660a962c02Sbellard     len = length >> TARGET_PAGE_BITS;
1867f23db169Sbellard     mask = ~dirty_flags;
1868f23db169Sbellard     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1869f23db169Sbellard     for(i = 0; i < len; i++)
1870f23db169Sbellard         p[i] &= mask;
1871f23db169Sbellard 
18721ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
18731ccde1cbSbellard        when accessing the range */
18745579c7f3Spbrook     start1 = (unsigned long)qemu_get_ram_ptr(start);
18755579c7f3Spbrook     /* Chek that we don't span multiple blocks - this breaks the
18765579c7f3Spbrook        address comparisons below.  */
18775579c7f3Spbrook     if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
18785579c7f3Spbrook             != (end - 1) - start) {
18795579c7f3Spbrook         abort();
18805579c7f3Spbrook     }
18815579c7f3Spbrook 
18826a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
1883cfde4bd9SIsaku Yamahata         int mmu_idx;
1884cfde4bd9SIsaku Yamahata         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
18851ccde1cbSbellard             for(i = 0; i < CPU_TLB_SIZE; i++)
1886cfde4bd9SIsaku Yamahata                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1887cfde4bd9SIsaku Yamahata                                       start1, length);
1888cfde4bd9SIsaku Yamahata         }
18896a00d601Sbellard     }
18901ccde1cbSbellard }
18911ccde1cbSbellard 
189274576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
189374576198Saliguori {
189474576198Saliguori     in_migration = enable;
1895b0a46a33SJan Kiszka     if (kvm_enabled()) {
1896b0a46a33SJan Kiszka         return kvm_set_migration_log(enable);
1897b0a46a33SJan Kiszka     }
189874576198Saliguori     return 0;
189974576198Saliguori }
190074576198Saliguori 
190174576198Saliguori int cpu_physical_memory_get_dirty_tracking(void)
190274576198Saliguori {
190374576198Saliguori     return in_migration;
190474576198Saliguori }
190574576198Saliguori 
1906c227f099SAnthony Liguori int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1907c227f099SAnthony Liguori                                    target_phys_addr_t end_addr)
19082bec46dcSaliguori {
1909151f7749SJan Kiszka     int ret = 0;
1910151f7749SJan Kiszka 
19112bec46dcSaliguori     if (kvm_enabled())
1912151f7749SJan Kiszka         ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1913151f7749SJan Kiszka     return ret;
19142bec46dcSaliguori }
19152bec46dcSaliguori 
19163a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
19173a7d929eSbellard {
1918c227f099SAnthony Liguori     ram_addr_t ram_addr;
19195579c7f3Spbrook     void *p;
19203a7d929eSbellard 
192184b7b8e7Sbellard     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
19225579c7f3Spbrook         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
19235579c7f3Spbrook             + tlb_entry->addend);
19245579c7f3Spbrook         ram_addr = qemu_ram_addr_from_host(p);
19253a7d929eSbellard         if (!cpu_physical_memory_is_dirty(ram_addr)) {
19260f459d16Spbrook             tlb_entry->addr_write |= TLB_NOTDIRTY;
19273a7d929eSbellard         }
19283a7d929eSbellard     }
19293a7d929eSbellard }
19303a7d929eSbellard 
19313a7d929eSbellard /* update the TLB according to the current state of the dirty bits */
19323a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env)
19333a7d929eSbellard {
19343a7d929eSbellard     int i;
1935cfde4bd9SIsaku Yamahata     int mmu_idx;
1936cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
19373a7d929eSbellard         for(i = 0; i < CPU_TLB_SIZE; i++)
1938cfde4bd9SIsaku Yamahata             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1939cfde4bd9SIsaku Yamahata     }
19403a7d929eSbellard }
19413a7d929eSbellard 
19420f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
19431ccde1cbSbellard {
19440f459d16Spbrook     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
19450f459d16Spbrook         tlb_entry->addr_write = vaddr;
19461ccde1cbSbellard }
19471ccde1cbSbellard 
19480f459d16Spbrook /* update the TLB corresponding to virtual page vaddr
19490f459d16Spbrook    so that it is no longer dirty */
19500f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
19511ccde1cbSbellard {
19521ccde1cbSbellard     int i;
1953cfde4bd9SIsaku Yamahata     int mmu_idx;
19541ccde1cbSbellard 
19550f459d16Spbrook     vaddr &= TARGET_PAGE_MASK;
19561ccde1cbSbellard     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1957cfde4bd9SIsaku Yamahata     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1958cfde4bd9SIsaku Yamahata         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
19591ccde1cbSbellard }
19601ccde1cbSbellard 
196159817ccbSbellard /* add a new TLB entry. At most one entry for a given virtual address
196259817ccbSbellard    is permitted. Return 0 if OK or 2 if the page could not be mapped
196359817ccbSbellard    (can only happen in non SOFTMMU mode for I/O pages or pages
196459817ccbSbellard    conflicting with the host address space). */
196584b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1966c227f099SAnthony Liguori                       target_phys_addr_t paddr, int prot,
19676ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
19689fa3e853Sbellard {
196992e873b9Sbellard     PhysPageDesc *p;
19704f2ac237Sbellard     unsigned long pd;
19719fa3e853Sbellard     unsigned int index;
19724f2ac237Sbellard     target_ulong address;
19730f459d16Spbrook     target_ulong code_address;
1974c227f099SAnthony Liguori     target_phys_addr_t addend;
19759fa3e853Sbellard     int ret;
197684b7b8e7Sbellard     CPUTLBEntry *te;
1977a1d1bb31Saliguori     CPUWatchpoint *wp;
1978c227f099SAnthony Liguori     target_phys_addr_t iotlb;
19799fa3e853Sbellard 
198092e873b9Sbellard     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
19819fa3e853Sbellard     if (!p) {
19829fa3e853Sbellard         pd = IO_MEM_UNASSIGNED;
19839fa3e853Sbellard     } else {
19849fa3e853Sbellard         pd = p->phys_offset;
19859fa3e853Sbellard     }
19869fa3e853Sbellard #if defined(DEBUG_TLB)
19876ebbf390Sj_mayer     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
19886ebbf390Sj_mayer            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
19899fa3e853Sbellard #endif
19909fa3e853Sbellard 
19919fa3e853Sbellard     ret = 0;
19929fa3e853Sbellard     address = vaddr;
19930f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
19940f459d16Spbrook         /* IO memory case (romd handled later) */
19950f459d16Spbrook         address |= TLB_MMIO;
19960f459d16Spbrook     }
19975579c7f3Spbrook     addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
19980f459d16Spbrook     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
19990f459d16Spbrook         /* Normal RAM.  */
20000f459d16Spbrook         iotlb = pd & TARGET_PAGE_MASK;
20010f459d16Spbrook         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
20020f459d16Spbrook             iotlb |= IO_MEM_NOTDIRTY;
20030f459d16Spbrook         else
20040f459d16Spbrook             iotlb |= IO_MEM_ROM;
20050f459d16Spbrook     } else {
2006ccbb4d44SStuart Brady         /* IO handlers are currently passed a physical address.
20070f459d16Spbrook            It would be nice to pass an offset from the base address
20080f459d16Spbrook            of that region.  This would avoid having to special case RAM,
20090f459d16Spbrook            and avoid full address decoding in every device.
20100f459d16Spbrook            We can't use the high bits of pd for this because
20110f459d16Spbrook            IO_MEM_ROMD uses these as a ram address.  */
20128da3ff18Spbrook         iotlb = (pd & ~TARGET_PAGE_MASK);
20138da3ff18Spbrook         if (p) {
20148da3ff18Spbrook             iotlb += p->region_offset;
20158da3ff18Spbrook         } else {
20168da3ff18Spbrook             iotlb += paddr;
20178da3ff18Spbrook         }
20189fa3e853Sbellard     }
20199fa3e853Sbellard 
20200f459d16Spbrook     code_address = address;
20216658ffb8Spbrook     /* Make accesses to pages with watchpoints go via the
20226658ffb8Spbrook        watchpoint trap routines.  */
202372cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2024a1d1bb31Saliguori         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
20250f459d16Spbrook             iotlb = io_mem_watch + paddr;
20260f459d16Spbrook             /* TODO: The memory case can be optimized by not trapping
20270f459d16Spbrook                reads of pages with a write breakpoint.  */
20280f459d16Spbrook             address |= TLB_MMIO;
20296658ffb8Spbrook         }
20306658ffb8Spbrook     }
20316658ffb8Spbrook 
203290f18422Sbellard     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
20330f459d16Spbrook     env->iotlb[mmu_idx][index] = iotlb - vaddr;
20346ebbf390Sj_mayer     te = &env->tlb_table[mmu_idx][index];
20350f459d16Spbrook     te->addend = addend - vaddr;
203667b915a5Sbellard     if (prot & PAGE_READ) {
203784b7b8e7Sbellard         te->addr_read = address;
20389fa3e853Sbellard     } else {
203984b7b8e7Sbellard         te->addr_read = -1;
204084b7b8e7Sbellard     }
20415c751e99Sedgar_igl 
204284b7b8e7Sbellard     if (prot & PAGE_EXEC) {
20430f459d16Spbrook         te->addr_code = code_address;
204484b7b8e7Sbellard     } else {
204584b7b8e7Sbellard         te->addr_code = -1;
20469fa3e853Sbellard     }
204767b915a5Sbellard     if (prot & PAGE_WRITE) {
2048856074ecSbellard         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2049856074ecSbellard             (pd & IO_MEM_ROMD)) {
20500f459d16Spbrook             /* Write access calls the I/O callback.  */
20510f459d16Spbrook             te->addr_write = address | TLB_MMIO;
20523a7d929eSbellard         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
20531ccde1cbSbellard                    !cpu_physical_memory_is_dirty(pd)) {
20540f459d16Spbrook             te->addr_write = address | TLB_NOTDIRTY;
20559fa3e853Sbellard         } else {
205684b7b8e7Sbellard             te->addr_write = address;
20579fa3e853Sbellard         }
20589fa3e853Sbellard     } else {
205984b7b8e7Sbellard         te->addr_write = -1;
20609fa3e853Sbellard     }
20619fa3e853Sbellard     return ret;
20629fa3e853Sbellard }
20639fa3e853Sbellard 
20640124311eSbellard #else
20650124311eSbellard 
2066ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global)
20670124311eSbellard {
20680124311eSbellard }
20690124311eSbellard 
20702e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr)
20710124311eSbellard {
20720124311eSbellard }
20730124311eSbellard 
207484b7b8e7Sbellard int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2075c227f099SAnthony Liguori                       target_phys_addr_t paddr, int prot,
20766ebbf390Sj_mayer                       int mmu_idx, int is_softmmu)
207733417e70Sbellard {
20789fa3e853Sbellard     return 0;
207933417e70Sbellard }
208033417e70Sbellard 
2081edf8e2afSMika Westerberg /*
2082edf8e2afSMika Westerberg  * Walks guest process memory "regions" one by one
2083edf8e2afSMika Westerberg  * and calls callback function 'fn' for each region.
2084edf8e2afSMika Westerberg  */
2085edf8e2afSMika Westerberg int walk_memory_regions(void *priv,
2086edf8e2afSMika Westerberg     int (*fn)(void *, unsigned long, unsigned long, unsigned long))
208733417e70Sbellard {
20889fa3e853Sbellard     unsigned long start, end;
2089edf8e2afSMika Westerberg     PageDesc *p = NULL;
20909fa3e853Sbellard     int i, j, prot, prot1;
2091edf8e2afSMika Westerberg     int rc = 0;
20929fa3e853Sbellard 
2093edf8e2afSMika Westerberg     start = end = -1;
20949fa3e853Sbellard     prot = 0;
2095edf8e2afSMika Westerberg 
20969fa3e853Sbellard     for (i = 0; i <= L1_SIZE; i++) {
2097edf8e2afSMika Westerberg         p = (i < L1_SIZE) ? l1_map[i] : NULL;
20989fa3e853Sbellard         for (j = 0; j < L2_SIZE; j++) {
2099edf8e2afSMika Westerberg             prot1 = (p == NULL) ? 0 : p[j].flags;
2100edf8e2afSMika Westerberg             /*
2101edf8e2afSMika Westerberg              * "region" is one continuous chunk of memory
2102edf8e2afSMika Westerberg              * that has same protection flags set.
2103edf8e2afSMika Westerberg              */
21049fa3e853Sbellard             if (prot1 != prot) {
21059fa3e853Sbellard                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
21069fa3e853Sbellard                 if (start != -1) {
2107edf8e2afSMika Westerberg                     rc = (*fn)(priv, start, end, prot);
2108edf8e2afSMika Westerberg                     /* callback can stop iteration by returning != 0 */
2109edf8e2afSMika Westerberg                     if (rc != 0)
2110edf8e2afSMika Westerberg                         return (rc);
211133417e70Sbellard                 }
21129fa3e853Sbellard                 if (prot1 != 0)
21139fa3e853Sbellard                     start = end;
21149fa3e853Sbellard                 else
21159fa3e853Sbellard                     start = -1;
21169fa3e853Sbellard                 prot = prot1;
21179fa3e853Sbellard             }
2118edf8e2afSMika Westerberg             if (p == NULL)
21199fa3e853Sbellard                 break;
21209fa3e853Sbellard         }
21219fa3e853Sbellard     }
2122edf8e2afSMika Westerberg     return (rc);
2123edf8e2afSMika Westerberg }
2124edf8e2afSMika Westerberg 
2125edf8e2afSMika Westerberg static int dump_region(void *priv, unsigned long start,
2126edf8e2afSMika Westerberg     unsigned long end, unsigned long prot)
2127edf8e2afSMika Westerberg {
2128edf8e2afSMika Westerberg     FILE *f = (FILE *)priv;
2129edf8e2afSMika Westerberg 
2130edf8e2afSMika Westerberg     (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2131edf8e2afSMika Westerberg         start, end, end - start,
2132edf8e2afSMika Westerberg         ((prot & PAGE_READ) ? 'r' : '-'),
2133edf8e2afSMika Westerberg         ((prot & PAGE_WRITE) ? 'w' : '-'),
2134edf8e2afSMika Westerberg         ((prot & PAGE_EXEC) ? 'x' : '-'));
2135edf8e2afSMika Westerberg 
2136edf8e2afSMika Westerberg     return (0);
2137edf8e2afSMika Westerberg }
2138edf8e2afSMika Westerberg 
2139edf8e2afSMika Westerberg /* dump memory mappings */
2140edf8e2afSMika Westerberg void page_dump(FILE *f)
2141edf8e2afSMika Westerberg {
2142edf8e2afSMika Westerberg     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2143edf8e2afSMika Westerberg             "start", "end", "size", "prot");
2144edf8e2afSMika Westerberg     walk_memory_regions(f, dump_region);
21459fa3e853Sbellard }
21469fa3e853Sbellard 
214753a5960aSpbrook int page_get_flags(target_ulong address)
21489fa3e853Sbellard {
21499fa3e853Sbellard     PageDesc *p;
21509fa3e853Sbellard 
21519fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
21529fa3e853Sbellard     if (!p)
21539fa3e853Sbellard         return 0;
21549fa3e853Sbellard     return p->flags;
21559fa3e853Sbellard }
21569fa3e853Sbellard 
21579fa3e853Sbellard /* modify the flags of a page and invalidate the code if
2158ccbb4d44SStuart Brady    necessary. The flag PAGE_WRITE_ORG is positioned automatically
21599fa3e853Sbellard    depending on PAGE_WRITE */
216053a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
21619fa3e853Sbellard {
21629fa3e853Sbellard     PageDesc *p;
216353a5960aSpbrook     target_ulong addr;
21649fa3e853Sbellard 
2165c8a706feSpbrook     /* mmap_lock should already be held.  */
21669fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
21679fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
21689fa3e853Sbellard     if (flags & PAGE_WRITE)
21699fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
21709fa3e853Sbellard     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
21719fa3e853Sbellard         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
217217e2377aSpbrook         /* We may be called for host regions that are outside guest
217317e2377aSpbrook            address space.  */
217417e2377aSpbrook         if (!p)
217517e2377aSpbrook             return;
21769fa3e853Sbellard         /* if the write protection is set, then we invalidate the code
21779fa3e853Sbellard            inside */
21789fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
21799fa3e853Sbellard             (flags & PAGE_WRITE) &&
21809fa3e853Sbellard             p->first_tb) {
2181d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
21829fa3e853Sbellard         }
21839fa3e853Sbellard         p->flags = flags;
21849fa3e853Sbellard     }
21859fa3e853Sbellard }
21869fa3e853Sbellard 
21873d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
21883d97b40bSths {
21893d97b40bSths     PageDesc *p;
21903d97b40bSths     target_ulong end;
21913d97b40bSths     target_ulong addr;
21923d97b40bSths 
219355f280c9Sbalrog     if (start + len < start)
219455f280c9Sbalrog         /* we've wrapped around */
219555f280c9Sbalrog         return -1;
219655f280c9Sbalrog 
21973d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
21983d97b40bSths     start = start & TARGET_PAGE_MASK;
21993d97b40bSths 
22003d97b40bSths     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
22013d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
22023d97b40bSths         if( !p )
22033d97b40bSths             return -1;
22043d97b40bSths         if( !(p->flags & PAGE_VALID) )
22053d97b40bSths             return -1;
22063d97b40bSths 
2207dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
22083d97b40bSths             return -1;
2209dae3270cSbellard         if (flags & PAGE_WRITE) {
2210dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
22113d97b40bSths                 return -1;
2212dae3270cSbellard             /* unprotect the page if it was put read-only because it
2213dae3270cSbellard                contains translated code */
2214dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2215dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2216dae3270cSbellard                     return -1;
2217dae3270cSbellard             }
2218dae3270cSbellard             return 0;
2219dae3270cSbellard         }
22203d97b40bSths     }
22213d97b40bSths     return 0;
22223d97b40bSths }
22233d97b40bSths 
22249fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
2225ccbb4d44SStuart Brady    page. Return TRUE if the fault was successfully handled. */
222653a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc)
22279fa3e853Sbellard {
22289fa3e853Sbellard     unsigned int page_index, prot, pindex;
22299fa3e853Sbellard     PageDesc *p, *p1;
223053a5960aSpbrook     target_ulong host_start, host_end, addr;
22319fa3e853Sbellard 
2232c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2233c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2234c8a706feSpbrook        practice it seems to be ok.  */
2235c8a706feSpbrook     mmap_lock();
2236c8a706feSpbrook 
223783fb7adfSbellard     host_start = address & qemu_host_page_mask;
22389fa3e853Sbellard     page_index = host_start >> TARGET_PAGE_BITS;
22399fa3e853Sbellard     p1 = page_find(page_index);
2240c8a706feSpbrook     if (!p1) {
2241c8a706feSpbrook         mmap_unlock();
22429fa3e853Sbellard         return 0;
2243c8a706feSpbrook     }
224483fb7adfSbellard     host_end = host_start + qemu_host_page_size;
22459fa3e853Sbellard     p = p1;
22469fa3e853Sbellard     prot = 0;
22479fa3e853Sbellard     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
22489fa3e853Sbellard         prot |= p->flags;
22499fa3e853Sbellard         p++;
22509fa3e853Sbellard     }
22519fa3e853Sbellard     /* if the page was really writable, then we change its
22529fa3e853Sbellard        protection back to writable */
22539fa3e853Sbellard     if (prot & PAGE_WRITE_ORG) {
22549fa3e853Sbellard         pindex = (address - host_start) >> TARGET_PAGE_BITS;
22559fa3e853Sbellard         if (!(p1[pindex].flags & PAGE_WRITE)) {
225653a5960aSpbrook             mprotect((void *)g2h(host_start), qemu_host_page_size,
22579fa3e853Sbellard                      (prot & PAGE_BITS) | PAGE_WRITE);
22589fa3e853Sbellard             p1[pindex].flags |= PAGE_WRITE;
22599fa3e853Sbellard             /* and since the content will be modified, we must invalidate
22609fa3e853Sbellard                the corresponding translated code. */
2261d720b93dSbellard             tb_invalidate_phys_page(address, pc, puc);
22629fa3e853Sbellard #ifdef DEBUG_TB_CHECK
22639fa3e853Sbellard             tb_invalidate_check(address);
22649fa3e853Sbellard #endif
2265c8a706feSpbrook             mmap_unlock();
22669fa3e853Sbellard             return 1;
22679fa3e853Sbellard         }
22689fa3e853Sbellard     }
2269c8a706feSpbrook     mmap_unlock();
22709fa3e853Sbellard     return 0;
22719fa3e853Sbellard }
22729fa3e853Sbellard 
22736a00d601Sbellard static inline void tlb_set_dirty(CPUState *env,
22746a00d601Sbellard                                  unsigned long addr, target_ulong vaddr)
22751ccde1cbSbellard {
22761ccde1cbSbellard }
22779fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
227833417e70Sbellard 
2279e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
22808da3ff18Spbrook 
2281c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2282c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset);
2283c227f099SAnthony Liguori static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2284c227f099SAnthony Liguori                            ram_addr_t orig_memory, ram_addr_t region_offset);
2285db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2286db7b5426Sblueswir1                       need_subpage)                                     \
2287db7b5426Sblueswir1     do {                                                                \
2288db7b5426Sblueswir1         if (addr > start_addr)                                          \
2289db7b5426Sblueswir1             start_addr2 = 0;                                            \
2290db7b5426Sblueswir1         else {                                                          \
2291db7b5426Sblueswir1             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2292db7b5426Sblueswir1             if (start_addr2 > 0)                                        \
2293db7b5426Sblueswir1                 need_subpage = 1;                                       \
2294db7b5426Sblueswir1         }                                                               \
2295db7b5426Sblueswir1                                                                         \
229649e9fba2Sblueswir1         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2297db7b5426Sblueswir1             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2298db7b5426Sblueswir1         else {                                                          \
2299db7b5426Sblueswir1             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2300db7b5426Sblueswir1             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2301db7b5426Sblueswir1                 need_subpage = 1;                                       \
2302db7b5426Sblueswir1         }                                                               \
2303db7b5426Sblueswir1     } while (0)
2304db7b5426Sblueswir1 
23058f2498f9SMichael S. Tsirkin /* register physical memory.
23068f2498f9SMichael S. Tsirkin    For RAM, 'size' must be a multiple of the target page size.
23078f2498f9SMichael S. Tsirkin    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
23088da3ff18Spbrook    io memory page.  The address used when calling the IO function is
23098da3ff18Spbrook    the offset from the start of the region, plus region_offset.  Both
2310ccbb4d44SStuart Brady    start_addr and region_offset are rounded down to a page boundary
23118da3ff18Spbrook    before calculating this offset.  This should not be a problem unless
23128da3ff18Spbrook    the low bits of start_addr and region_offset differ.  */
2313c227f099SAnthony Liguori void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2314c227f099SAnthony Liguori                                          ram_addr_t size,
2315c227f099SAnthony Liguori                                          ram_addr_t phys_offset,
2316c227f099SAnthony Liguori                                          ram_addr_t region_offset)
231733417e70Sbellard {
2318c227f099SAnthony Liguori     target_phys_addr_t addr, end_addr;
231992e873b9Sbellard     PhysPageDesc *p;
23209d42037bSbellard     CPUState *env;
2321c227f099SAnthony Liguori     ram_addr_t orig_size = size;
2322db7b5426Sblueswir1     void *subpage;
232333417e70Sbellard 
23247ba1e619Saliguori     if (kvm_enabled())
23257ba1e619Saliguori         kvm_set_phys_mem(start_addr, size, phys_offset);
23267ba1e619Saliguori 
232767c4d23cSpbrook     if (phys_offset == IO_MEM_UNASSIGNED) {
232867c4d23cSpbrook         region_offset = start_addr;
232967c4d23cSpbrook     }
23308da3ff18Spbrook     region_offset &= TARGET_PAGE_MASK;
23315fd386f6Sbellard     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2332c227f099SAnthony Liguori     end_addr = start_addr + (target_phys_addr_t)size;
233349e9fba2Sblueswir1     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2334db7b5426Sblueswir1         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2335db7b5426Sblueswir1         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2336c227f099SAnthony Liguori             ram_addr_t orig_memory = p->phys_offset;
2337c227f099SAnthony Liguori             target_phys_addr_t start_addr2, end_addr2;
2338db7b5426Sblueswir1             int need_subpage = 0;
2339db7b5426Sblueswir1 
2340db7b5426Sblueswir1             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2341db7b5426Sblueswir1                           need_subpage);
23424254fab8Sblueswir1             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2343db7b5426Sblueswir1                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2344db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
23458da3ff18Spbrook                                            &p->phys_offset, orig_memory,
23468da3ff18Spbrook                                            p->region_offset);
2347db7b5426Sblueswir1                 } else {
2348db7b5426Sblueswir1                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2349db7b5426Sblueswir1                                             >> IO_MEM_SHIFT];
2350db7b5426Sblueswir1                 }
23518da3ff18Spbrook                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
23528da3ff18Spbrook                                  region_offset);
23538da3ff18Spbrook                 p->region_offset = 0;
2354db7b5426Sblueswir1             } else {
2355db7b5426Sblueswir1                 p->phys_offset = phys_offset;
2356db7b5426Sblueswir1                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2357db7b5426Sblueswir1                     (phys_offset & IO_MEM_ROMD))
2358db7b5426Sblueswir1                     phys_offset += TARGET_PAGE_SIZE;
2359db7b5426Sblueswir1             }
2360db7b5426Sblueswir1         } else {
2361108c49b8Sbellard             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
23629fa3e853Sbellard             p->phys_offset = phys_offset;
23638da3ff18Spbrook             p->region_offset = region_offset;
23642a4188a3Sbellard             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
23658da3ff18Spbrook                 (phys_offset & IO_MEM_ROMD)) {
236633417e70Sbellard                 phys_offset += TARGET_PAGE_SIZE;
23678da3ff18Spbrook             } else {
2368c227f099SAnthony Liguori                 target_phys_addr_t start_addr2, end_addr2;
2369db7b5426Sblueswir1                 int need_subpage = 0;
2370db7b5426Sblueswir1 
2371db7b5426Sblueswir1                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2372db7b5426Sblueswir1                               end_addr2, need_subpage);
2373db7b5426Sblueswir1 
23744254fab8Sblueswir1                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2375db7b5426Sblueswir1                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
23768da3ff18Spbrook                                            &p->phys_offset, IO_MEM_UNASSIGNED,
237767c4d23cSpbrook                                            addr & TARGET_PAGE_MASK);
2378db7b5426Sblueswir1                     subpage_register(subpage, start_addr2, end_addr2,
23798da3ff18Spbrook                                      phys_offset, region_offset);
23808da3ff18Spbrook                     p->region_offset = 0;
2381db7b5426Sblueswir1                 }
2382db7b5426Sblueswir1             }
2383db7b5426Sblueswir1         }
23848da3ff18Spbrook         region_offset += TARGET_PAGE_SIZE;
238533417e70Sbellard     }
23869d42037bSbellard 
23879d42037bSbellard     /* since each CPU stores ram addresses in its TLB cache, we must
23889d42037bSbellard        reset the modified entries */
23899d42037bSbellard     /* XXX: slow ! */
23909d42037bSbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
23919d42037bSbellard         tlb_flush(env, 1);
23929d42037bSbellard     }
239333417e70Sbellard }
239433417e70Sbellard 
2395ba863458Sbellard /* XXX: temporary until new memory mapping API */
2396c227f099SAnthony Liguori ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2397ba863458Sbellard {
2398ba863458Sbellard     PhysPageDesc *p;
2399ba863458Sbellard 
2400ba863458Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2401ba863458Sbellard     if (!p)
2402ba863458Sbellard         return IO_MEM_UNASSIGNED;
2403ba863458Sbellard     return p->phys_offset;
2404ba863458Sbellard }
2405ba863458Sbellard 
2406c227f099SAnthony Liguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2407f65ed4c1Saliguori {
2408f65ed4c1Saliguori     if (kvm_enabled())
2409f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2410f65ed4c1Saliguori }
2411f65ed4c1Saliguori 
2412c227f099SAnthony Liguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2413f65ed4c1Saliguori {
2414f65ed4c1Saliguori     if (kvm_enabled())
2415f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2416f65ed4c1Saliguori }
2417f65ed4c1Saliguori 
2418c227f099SAnthony Liguori ram_addr_t qemu_ram_alloc(ram_addr_t size)
241994a6b54fSpbrook {
242094a6b54fSpbrook     RAMBlock *new_block;
242194a6b54fSpbrook 
242294a6b54fSpbrook     size = TARGET_PAGE_ALIGN(size);
242394a6b54fSpbrook     new_block = qemu_malloc(sizeof(*new_block));
242494a6b54fSpbrook 
24256b02494dSAlexander Graf #if defined(TARGET_S390X) && defined(CONFIG_KVM)
24266b02494dSAlexander Graf     /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
24276b02494dSAlexander Graf     new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
24286b02494dSAlexander Graf                            MAP_SHARED | MAP_ANONYMOUS, -1, 0);
24296b02494dSAlexander Graf #else
243094a6b54fSpbrook     new_block->host = qemu_vmalloc(size);
24316b02494dSAlexander Graf #endif
2432ccb167e9SIzik Eidus #ifdef MADV_MERGEABLE
2433ccb167e9SIzik Eidus     madvise(new_block->host, size, MADV_MERGEABLE);
2434ccb167e9SIzik Eidus #endif
243594a6b54fSpbrook     new_block->offset = last_ram_offset;
243694a6b54fSpbrook     new_block->length = size;
243794a6b54fSpbrook 
243894a6b54fSpbrook     new_block->next = ram_blocks;
243994a6b54fSpbrook     ram_blocks = new_block;
244094a6b54fSpbrook 
244194a6b54fSpbrook     phys_ram_dirty = qemu_realloc(phys_ram_dirty,
244294a6b54fSpbrook         (last_ram_offset + size) >> TARGET_PAGE_BITS);
244394a6b54fSpbrook     memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
244494a6b54fSpbrook            0xff, size >> TARGET_PAGE_BITS);
244594a6b54fSpbrook 
244694a6b54fSpbrook     last_ram_offset += size;
244794a6b54fSpbrook 
24486f0437e8SJan Kiszka     if (kvm_enabled())
24496f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
24506f0437e8SJan Kiszka 
245194a6b54fSpbrook     return new_block->offset;
245294a6b54fSpbrook }
2453e9a1ab19Sbellard 
2454c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
2455e9a1ab19Sbellard {
245694a6b54fSpbrook     /* TODO: implement this.  */
2457e9a1ab19Sbellard }
2458e9a1ab19Sbellard 
2459dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
24605579c7f3Spbrook    With the exception of the softmmu code in this file, this should
24615579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
24625579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
24635579c7f3Spbrook 
24645579c7f3Spbrook    It should not be used for general purpose DMA.
24655579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
24665579c7f3Spbrook  */
2467c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr)
2468dc828ca1Spbrook {
246994a6b54fSpbrook     RAMBlock *prev;
247094a6b54fSpbrook     RAMBlock **prevp;
247194a6b54fSpbrook     RAMBlock *block;
247294a6b54fSpbrook 
247394a6b54fSpbrook     prev = NULL;
247494a6b54fSpbrook     prevp = &ram_blocks;
247594a6b54fSpbrook     block = ram_blocks;
247694a6b54fSpbrook     while (block && (block->offset > addr
247794a6b54fSpbrook                      || block->offset + block->length <= addr)) {
247894a6b54fSpbrook         if (prev)
247994a6b54fSpbrook           prevp = &prev->next;
248094a6b54fSpbrook         prev = block;
248194a6b54fSpbrook         block = block->next;
248294a6b54fSpbrook     }
248394a6b54fSpbrook     if (!block) {
248494a6b54fSpbrook         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
248594a6b54fSpbrook         abort();
248694a6b54fSpbrook     }
248794a6b54fSpbrook     /* Move this entry to to start of the list.  */
248894a6b54fSpbrook     if (prev) {
248994a6b54fSpbrook         prev->next = block->next;
249094a6b54fSpbrook         block->next = *prevp;
249194a6b54fSpbrook         *prevp = block;
249294a6b54fSpbrook     }
249394a6b54fSpbrook     return block->host + (addr - block->offset);
2494dc828ca1Spbrook }
2495dc828ca1Spbrook 
24965579c7f3Spbrook /* Some of the softmmu routines need to translate from a host pointer
24975579c7f3Spbrook    (typically a TLB entry) back to a ram offset.  */
2498c227f099SAnthony Liguori ram_addr_t qemu_ram_addr_from_host(void *ptr)
24995579c7f3Spbrook {
250094a6b54fSpbrook     RAMBlock *prev;
250194a6b54fSpbrook     RAMBlock *block;
250294a6b54fSpbrook     uint8_t *host = ptr;
250394a6b54fSpbrook 
250494a6b54fSpbrook     prev = NULL;
250594a6b54fSpbrook     block = ram_blocks;
250694a6b54fSpbrook     while (block && (block->host > host
250794a6b54fSpbrook                      || block->host + block->length <= host)) {
250894a6b54fSpbrook         prev = block;
250994a6b54fSpbrook         block = block->next;
251094a6b54fSpbrook     }
251194a6b54fSpbrook     if (!block) {
251294a6b54fSpbrook         fprintf(stderr, "Bad ram pointer %p\n", ptr);
251394a6b54fSpbrook         abort();
251494a6b54fSpbrook     }
251594a6b54fSpbrook     return block->offset + (host - block->host);
25165579c7f3Spbrook }
25175579c7f3Spbrook 
2518c227f099SAnthony Liguori static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
251933417e70Sbellard {
252067d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2521ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
252267d3b957Spbrook #endif
2523faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2524e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 1);
2525e18231a3Sblueswir1 #endif
2526e18231a3Sblueswir1     return 0;
2527e18231a3Sblueswir1 }
2528e18231a3Sblueswir1 
2529c227f099SAnthony Liguori static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2530e18231a3Sblueswir1 {
2531e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2532e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2533e18231a3Sblueswir1 #endif
2534faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2535e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 2);
2536e18231a3Sblueswir1 #endif
2537e18231a3Sblueswir1     return 0;
2538e18231a3Sblueswir1 }
2539e18231a3Sblueswir1 
2540c227f099SAnthony Liguori static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2541e18231a3Sblueswir1 {
2542e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2543e18231a3Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2544e18231a3Sblueswir1 #endif
2545faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2546e18231a3Sblueswir1     do_unassigned_access(addr, 0, 0, 0, 4);
2547b4f0a316Sblueswir1 #endif
254833417e70Sbellard     return 0;
254933417e70Sbellard }
255033417e70Sbellard 
2551c227f099SAnthony Liguori static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
255233417e70Sbellard {
255367d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2554ab3d1727Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
255567d3b957Spbrook #endif
2556faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2557e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 1);
2558e18231a3Sblueswir1 #endif
2559e18231a3Sblueswir1 }
2560e18231a3Sblueswir1 
2561c227f099SAnthony Liguori static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2562e18231a3Sblueswir1 {
2563e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2564e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2565e18231a3Sblueswir1 #endif
2566faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2567e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 2);
2568e18231a3Sblueswir1 #endif
2569e18231a3Sblueswir1 }
2570e18231a3Sblueswir1 
2571c227f099SAnthony Liguori static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2572e18231a3Sblueswir1 {
2573e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
2574e18231a3Sblueswir1     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2575e18231a3Sblueswir1 #endif
2576faed1c2aSEdgar E. Iglesias #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2577e18231a3Sblueswir1     do_unassigned_access(addr, 1, 0, 0, 4);
2578b4f0a316Sblueswir1 #endif
257933417e70Sbellard }
258033417e70Sbellard 
2581d60efc6bSBlue Swirl static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
258233417e70Sbellard     unassigned_mem_readb,
2583e18231a3Sblueswir1     unassigned_mem_readw,
2584e18231a3Sblueswir1     unassigned_mem_readl,
258533417e70Sbellard };
258633417e70Sbellard 
2587d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
258833417e70Sbellard     unassigned_mem_writeb,
2589e18231a3Sblueswir1     unassigned_mem_writew,
2590e18231a3Sblueswir1     unassigned_mem_writel,
259133417e70Sbellard };
259233417e70Sbellard 
2593c227f099SAnthony Liguori static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
25940f459d16Spbrook                                 uint32_t val)
25951ccde1cbSbellard {
25963a7d929eSbellard     int dirty_flags;
25973a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
25983a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
25993a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
26003a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 1);
26013a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
26023a7d929eSbellard #endif
26033a7d929eSbellard     }
26045579c7f3Spbrook     stb_p(qemu_get_ram_ptr(ram_addr), val);
2605f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2606f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2607f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2608f23db169Sbellard        flushed */
2609f23db169Sbellard     if (dirty_flags == 0xff)
26102e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
26111ccde1cbSbellard }
26121ccde1cbSbellard 
2613c227f099SAnthony Liguori static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
26140f459d16Spbrook                                 uint32_t val)
26151ccde1cbSbellard {
26163a7d929eSbellard     int dirty_flags;
26173a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
26183a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
26193a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
26203a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 2);
26213a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
26223a7d929eSbellard #endif
26233a7d929eSbellard     }
26245579c7f3Spbrook     stw_p(qemu_get_ram_ptr(ram_addr), val);
2625f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2626f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2627f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2628f23db169Sbellard        flushed */
2629f23db169Sbellard     if (dirty_flags == 0xff)
26302e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
26311ccde1cbSbellard }
26321ccde1cbSbellard 
2633c227f099SAnthony Liguori static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
26340f459d16Spbrook                                 uint32_t val)
26351ccde1cbSbellard {
26363a7d929eSbellard     int dirty_flags;
26373a7d929eSbellard     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
26383a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
26393a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
26403a7d929eSbellard         tb_invalidate_phys_page_fast(ram_addr, 4);
26413a7d929eSbellard         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
26423a7d929eSbellard #endif
26433a7d929eSbellard     }
26445579c7f3Spbrook     stl_p(qemu_get_ram_ptr(ram_addr), val);
2645f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2646f23db169Sbellard     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2647f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2648f23db169Sbellard        flushed */
2649f23db169Sbellard     if (dirty_flags == 0xff)
26502e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
26511ccde1cbSbellard }
26521ccde1cbSbellard 
2653d60efc6bSBlue Swirl static CPUReadMemoryFunc * const error_mem_read[3] = {
26543a7d929eSbellard     NULL, /* never used */
26553a7d929eSbellard     NULL, /* never used */
26563a7d929eSbellard     NULL, /* never used */
26573a7d929eSbellard };
26583a7d929eSbellard 
2659d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
26601ccde1cbSbellard     notdirty_mem_writeb,
26611ccde1cbSbellard     notdirty_mem_writew,
26621ccde1cbSbellard     notdirty_mem_writel,
26631ccde1cbSbellard };
26641ccde1cbSbellard 
26650f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
2666b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
26670f459d16Spbrook {
26680f459d16Spbrook     CPUState *env = cpu_single_env;
266906d55cc1Saliguori     target_ulong pc, cs_base;
267006d55cc1Saliguori     TranslationBlock *tb;
26710f459d16Spbrook     target_ulong vaddr;
2672a1d1bb31Saliguori     CPUWatchpoint *wp;
267306d55cc1Saliguori     int cpu_flags;
26740f459d16Spbrook 
267506d55cc1Saliguori     if (env->watchpoint_hit) {
267606d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
267706d55cc1Saliguori          * the debug interrupt so that is will trigger after the
267806d55cc1Saliguori          * current instruction. */
267906d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
268006d55cc1Saliguori         return;
268106d55cc1Saliguori     }
26822e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
268372cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2684b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
2685b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
26866e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
26876e140f28Saliguori             if (!env->watchpoint_hit) {
2688a1d1bb31Saliguori                 env->watchpoint_hit = wp;
268906d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
269006d55cc1Saliguori                 if (!tb) {
26916e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
26926e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
269306d55cc1Saliguori                 }
269406d55cc1Saliguori                 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
269506d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
269606d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
269706d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
269806d55cc1Saliguori                 } else {
269906d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
270006d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
270106d55cc1Saliguori                 }
270206d55cc1Saliguori                 cpu_resume_from_signal(env, NULL);
27030f459d16Spbrook             }
27046e140f28Saliguori         } else {
27056e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
27066e140f28Saliguori         }
27070f459d16Spbrook     }
27080f459d16Spbrook }
27090f459d16Spbrook 
27106658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
27116658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
27126658ffb8Spbrook    phys routines.  */
2713c227f099SAnthony Liguori static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
27146658ffb8Spbrook {
2715b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
27166658ffb8Spbrook     return ldub_phys(addr);
27176658ffb8Spbrook }
27186658ffb8Spbrook 
2719c227f099SAnthony Liguori static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
27206658ffb8Spbrook {
2721b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
27226658ffb8Spbrook     return lduw_phys(addr);
27236658ffb8Spbrook }
27246658ffb8Spbrook 
2725c227f099SAnthony Liguori static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
27266658ffb8Spbrook {
2727b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
27286658ffb8Spbrook     return ldl_phys(addr);
27296658ffb8Spbrook }
27306658ffb8Spbrook 
2731c227f099SAnthony Liguori static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
27326658ffb8Spbrook                              uint32_t val)
27336658ffb8Spbrook {
2734b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
27356658ffb8Spbrook     stb_phys(addr, val);
27366658ffb8Spbrook }
27376658ffb8Spbrook 
2738c227f099SAnthony Liguori static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
27396658ffb8Spbrook                              uint32_t val)
27406658ffb8Spbrook {
2741b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
27426658ffb8Spbrook     stw_phys(addr, val);
27436658ffb8Spbrook }
27446658ffb8Spbrook 
2745c227f099SAnthony Liguori static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
27466658ffb8Spbrook                              uint32_t val)
27476658ffb8Spbrook {
2748b4051334Saliguori     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
27496658ffb8Spbrook     stl_phys(addr, val);
27506658ffb8Spbrook }
27516658ffb8Spbrook 
2752d60efc6bSBlue Swirl static CPUReadMemoryFunc * const watch_mem_read[3] = {
27536658ffb8Spbrook     watch_mem_readb,
27546658ffb8Spbrook     watch_mem_readw,
27556658ffb8Spbrook     watch_mem_readl,
27566658ffb8Spbrook };
27576658ffb8Spbrook 
2758d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const watch_mem_write[3] = {
27596658ffb8Spbrook     watch_mem_writeb,
27606658ffb8Spbrook     watch_mem_writew,
27616658ffb8Spbrook     watch_mem_writel,
27626658ffb8Spbrook };
27636658ffb8Spbrook 
2764c227f099SAnthony Liguori static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2765db7b5426Sblueswir1                                  unsigned int len)
2766db7b5426Sblueswir1 {
2767db7b5426Sblueswir1     uint32_t ret;
2768db7b5426Sblueswir1     unsigned int idx;
2769db7b5426Sblueswir1 
27708da3ff18Spbrook     idx = SUBPAGE_IDX(addr);
2771db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2772db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2773db7b5426Sblueswir1            mmio, len, addr, idx);
2774db7b5426Sblueswir1 #endif
27758da3ff18Spbrook     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
27768da3ff18Spbrook                                        addr + mmio->region_offset[idx][0][len]);
2777db7b5426Sblueswir1 
2778db7b5426Sblueswir1     return ret;
2779db7b5426Sblueswir1 }
2780db7b5426Sblueswir1 
2781c227f099SAnthony Liguori static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2782db7b5426Sblueswir1                               uint32_t value, unsigned int len)
2783db7b5426Sblueswir1 {
2784db7b5426Sblueswir1     unsigned int idx;
2785db7b5426Sblueswir1 
27868da3ff18Spbrook     idx = SUBPAGE_IDX(addr);
2787db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2788db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2789db7b5426Sblueswir1            mmio, len, addr, idx, value);
2790db7b5426Sblueswir1 #endif
27918da3ff18Spbrook     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
27928da3ff18Spbrook                                   addr + mmio->region_offset[idx][1][len],
27938da3ff18Spbrook                                   value);
2794db7b5426Sblueswir1 }
2795db7b5426Sblueswir1 
2796c227f099SAnthony Liguori static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2797db7b5426Sblueswir1 {
2798db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2799db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2800db7b5426Sblueswir1 #endif
2801db7b5426Sblueswir1 
2802db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 0);
2803db7b5426Sblueswir1 }
2804db7b5426Sblueswir1 
2805c227f099SAnthony Liguori static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2806db7b5426Sblueswir1                             uint32_t value)
2807db7b5426Sblueswir1 {
2808db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2809db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2810db7b5426Sblueswir1 #endif
2811db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 0);
2812db7b5426Sblueswir1 }
2813db7b5426Sblueswir1 
2814c227f099SAnthony Liguori static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2815db7b5426Sblueswir1 {
2816db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2817db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2818db7b5426Sblueswir1 #endif
2819db7b5426Sblueswir1 
2820db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 1);
2821db7b5426Sblueswir1 }
2822db7b5426Sblueswir1 
2823c227f099SAnthony Liguori static void subpage_writew (void *opaque, target_phys_addr_t addr,
2824db7b5426Sblueswir1                             uint32_t value)
2825db7b5426Sblueswir1 {
2826db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2827db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2828db7b5426Sblueswir1 #endif
2829db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 1);
2830db7b5426Sblueswir1 }
2831db7b5426Sblueswir1 
2832c227f099SAnthony Liguori static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2833db7b5426Sblueswir1 {
2834db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2835db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2836db7b5426Sblueswir1 #endif
2837db7b5426Sblueswir1 
2838db7b5426Sblueswir1     return subpage_readlen(opaque, addr, 2);
2839db7b5426Sblueswir1 }
2840db7b5426Sblueswir1 
2841db7b5426Sblueswir1 static void subpage_writel (void *opaque,
2842c227f099SAnthony Liguori                          target_phys_addr_t addr, uint32_t value)
2843db7b5426Sblueswir1 {
2844db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2845db7b5426Sblueswir1     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2846db7b5426Sblueswir1 #endif
2847db7b5426Sblueswir1     subpage_writelen(opaque, addr, value, 2);
2848db7b5426Sblueswir1 }
2849db7b5426Sblueswir1 
2850d60efc6bSBlue Swirl static CPUReadMemoryFunc * const subpage_read[] = {
2851db7b5426Sblueswir1     &subpage_readb,
2852db7b5426Sblueswir1     &subpage_readw,
2853db7b5426Sblueswir1     &subpage_readl,
2854db7b5426Sblueswir1 };
2855db7b5426Sblueswir1 
2856d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const subpage_write[] = {
2857db7b5426Sblueswir1     &subpage_writeb,
2858db7b5426Sblueswir1     &subpage_writew,
2859db7b5426Sblueswir1     &subpage_writel,
2860db7b5426Sblueswir1 };
2861db7b5426Sblueswir1 
2862c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2863c227f099SAnthony Liguori                              ram_addr_t memory, ram_addr_t region_offset)
2864db7b5426Sblueswir1 {
2865db7b5426Sblueswir1     int idx, eidx;
28664254fab8Sblueswir1     unsigned int i;
2867db7b5426Sblueswir1 
2868db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2869db7b5426Sblueswir1         return -1;
2870db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
2871db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
2872db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
28730bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2874db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
2875db7b5426Sblueswir1 #endif
2876db7b5426Sblueswir1     memory >>= IO_MEM_SHIFT;
2877db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
28784254fab8Sblueswir1         for (i = 0; i < 4; i++) {
28793ee89922Sblueswir1             if (io_mem_read[memory][i]) {
28803ee89922Sblueswir1                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
28813ee89922Sblueswir1                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
28828da3ff18Spbrook                 mmio->region_offset[idx][0][i] = region_offset;
28834254fab8Sblueswir1             }
28843ee89922Sblueswir1             if (io_mem_write[memory][i]) {
28853ee89922Sblueswir1                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
28863ee89922Sblueswir1                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
28878da3ff18Spbrook                 mmio->region_offset[idx][1][i] = region_offset;
28883ee89922Sblueswir1             }
28893ee89922Sblueswir1         }
2890db7b5426Sblueswir1     }
2891db7b5426Sblueswir1 
2892db7b5426Sblueswir1     return 0;
2893db7b5426Sblueswir1 }
2894db7b5426Sblueswir1 
2895c227f099SAnthony Liguori static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2896c227f099SAnthony Liguori                            ram_addr_t orig_memory, ram_addr_t region_offset)
2897db7b5426Sblueswir1 {
2898c227f099SAnthony Liguori     subpage_t *mmio;
2899db7b5426Sblueswir1     int subpage_memory;
2900db7b5426Sblueswir1 
2901c227f099SAnthony Liguori     mmio = qemu_mallocz(sizeof(subpage_t));
29021eec614bSaliguori 
2903db7b5426Sblueswir1     mmio->base = base;
29041eed09cbSAvi Kivity     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2905db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
2906db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2907db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2908db7b5426Sblueswir1 #endif
2909db7b5426Sblueswir1     *phys = subpage_memory | IO_MEM_SUBPAGE;
29108da3ff18Spbrook     subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
29118da3ff18Spbrook                          region_offset);
2912db7b5426Sblueswir1 
2913db7b5426Sblueswir1     return mmio;
2914db7b5426Sblueswir1 }
2915db7b5426Sblueswir1 
291688715657Saliguori static int get_free_io_mem_idx(void)
291788715657Saliguori {
291888715657Saliguori     int i;
291988715657Saliguori 
292088715657Saliguori     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
292188715657Saliguori         if (!io_mem_used[i]) {
292288715657Saliguori             io_mem_used[i] = 1;
292388715657Saliguori             return i;
292488715657Saliguori         }
2925c6703b47SRiku Voipio     fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
292688715657Saliguori     return -1;
292788715657Saliguori }
292888715657Saliguori 
292933417e70Sbellard /* mem_read and mem_write are arrays of functions containing the
293033417e70Sbellard    function to access byte (index 0), word (index 1) and dword (index
29310b4e6e3eSPaul Brook    2). Functions can be omitted with a NULL function pointer.
29323ee89922Sblueswir1    If io_index is non zero, the corresponding io zone is
29334254fab8Sblueswir1    modified. If it is zero, a new io zone is allocated. The return
29344254fab8Sblueswir1    value can be used with cpu_register_physical_memory(). (-1) is
29354254fab8Sblueswir1    returned if error. */
29361eed09cbSAvi Kivity static int cpu_register_io_memory_fixed(int io_index,
2937d60efc6bSBlue Swirl                                         CPUReadMemoryFunc * const *mem_read,
2938d60efc6bSBlue Swirl                                         CPUWriteMemoryFunc * const *mem_write,
2939a4193c8aSbellard                                         void *opaque)
294033417e70Sbellard {
29414254fab8Sblueswir1     int i, subwidth = 0;
294233417e70Sbellard 
294333417e70Sbellard     if (io_index <= 0) {
294488715657Saliguori         io_index = get_free_io_mem_idx();
294588715657Saliguori         if (io_index == -1)
294688715657Saliguori             return io_index;
294733417e70Sbellard     } else {
29481eed09cbSAvi Kivity         io_index >>= IO_MEM_SHIFT;
294933417e70Sbellard         if (io_index >= IO_MEM_NB_ENTRIES)
295033417e70Sbellard             return -1;
295133417e70Sbellard     }
295233417e70Sbellard 
295333417e70Sbellard     for(i = 0;i < 3; i++) {
29544254fab8Sblueswir1         if (!mem_read[i] || !mem_write[i])
29554254fab8Sblueswir1             subwidth = IO_MEM_SUBWIDTH;
295633417e70Sbellard         io_mem_read[io_index][i] = mem_read[i];
295733417e70Sbellard         io_mem_write[io_index][i] = mem_write[i];
295833417e70Sbellard     }
2959a4193c8aSbellard     io_mem_opaque[io_index] = opaque;
29604254fab8Sblueswir1     return (io_index << IO_MEM_SHIFT) | subwidth;
296133417e70Sbellard }
296261382a50Sbellard 
2963d60efc6bSBlue Swirl int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2964d60efc6bSBlue Swirl                            CPUWriteMemoryFunc * const *mem_write,
29651eed09cbSAvi Kivity                            void *opaque)
29661eed09cbSAvi Kivity {
29671eed09cbSAvi Kivity     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
29681eed09cbSAvi Kivity }
29691eed09cbSAvi Kivity 
297088715657Saliguori void cpu_unregister_io_memory(int io_table_address)
297188715657Saliguori {
297288715657Saliguori     int i;
297388715657Saliguori     int io_index = io_table_address >> IO_MEM_SHIFT;
297488715657Saliguori 
297588715657Saliguori     for (i=0;i < 3; i++) {
297688715657Saliguori         io_mem_read[io_index][i] = unassigned_mem_read[i];
297788715657Saliguori         io_mem_write[io_index][i] = unassigned_mem_write[i];
297888715657Saliguori     }
297988715657Saliguori     io_mem_opaque[io_index] = NULL;
298088715657Saliguori     io_mem_used[io_index] = 0;
298188715657Saliguori }
298288715657Saliguori 
2983e9179ce1SAvi Kivity static void io_mem_init(void)
2984e9179ce1SAvi Kivity {
2985e9179ce1SAvi Kivity     int i;
2986e9179ce1SAvi Kivity 
2987e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2988e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2989e9179ce1SAvi Kivity     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2990e9179ce1SAvi Kivity     for (i=0; i<5; i++)
2991e9179ce1SAvi Kivity         io_mem_used[i] = 1;
2992e9179ce1SAvi Kivity 
2993e9179ce1SAvi Kivity     io_mem_watch = cpu_register_io_memory(watch_mem_read,
2994e9179ce1SAvi Kivity                                           watch_mem_write, NULL);
2995e9179ce1SAvi Kivity }
2996e9179ce1SAvi Kivity 
2997e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
2998e2eef170Spbrook 
299913eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
300013eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
3001c227f099SAnthony Liguori void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
300213eb76e0Sbellard                             int len, int is_write)
300313eb76e0Sbellard {
300413eb76e0Sbellard     int l, flags;
300513eb76e0Sbellard     target_ulong page;
300653a5960aSpbrook     void * p;
300713eb76e0Sbellard 
300813eb76e0Sbellard     while (len > 0) {
300913eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
301013eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
301113eb76e0Sbellard         if (l > len)
301213eb76e0Sbellard             l = len;
301313eb76e0Sbellard         flags = page_get_flags(page);
301413eb76e0Sbellard         if (!(flags & PAGE_VALID))
301513eb76e0Sbellard             return;
301613eb76e0Sbellard         if (is_write) {
301713eb76e0Sbellard             if (!(flags & PAGE_WRITE))
301813eb76e0Sbellard                 return;
3019579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
302072fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3021579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
3022579a97f7Sbellard                 return;
302372fb7daaSaurel32             memcpy(p, buf, l);
302472fb7daaSaurel32             unlock_user(p, addr, l);
302513eb76e0Sbellard         } else {
302613eb76e0Sbellard             if (!(flags & PAGE_READ))
302713eb76e0Sbellard                 return;
3028579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
302972fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3030579a97f7Sbellard                 /* FIXME - should this return an error rather than just fail? */
3031579a97f7Sbellard                 return;
303272fb7daaSaurel32             memcpy(buf, p, l);
30335b257578Saurel32             unlock_user(p, addr, 0);
303413eb76e0Sbellard         }
303513eb76e0Sbellard         len -= l;
303613eb76e0Sbellard         buf += l;
303713eb76e0Sbellard         addr += l;
303813eb76e0Sbellard     }
303913eb76e0Sbellard }
30408df1cd07Sbellard 
304113eb76e0Sbellard #else
3042c227f099SAnthony Liguori void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
304313eb76e0Sbellard                             int len, int is_write)
304413eb76e0Sbellard {
304513eb76e0Sbellard     int l, io_index;
304613eb76e0Sbellard     uint8_t *ptr;
304713eb76e0Sbellard     uint32_t val;
3048c227f099SAnthony Liguori     target_phys_addr_t page;
30492e12669aSbellard     unsigned long pd;
305092e873b9Sbellard     PhysPageDesc *p;
305113eb76e0Sbellard 
305213eb76e0Sbellard     while (len > 0) {
305313eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
305413eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
305513eb76e0Sbellard         if (l > len)
305613eb76e0Sbellard             l = len;
305792e873b9Sbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
305813eb76e0Sbellard         if (!p) {
305913eb76e0Sbellard             pd = IO_MEM_UNASSIGNED;
306013eb76e0Sbellard         } else {
306113eb76e0Sbellard             pd = p->phys_offset;
306213eb76e0Sbellard         }
306313eb76e0Sbellard 
306413eb76e0Sbellard         if (is_write) {
30653a7d929eSbellard             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3066c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
306713eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
30688da3ff18Spbrook                 if (p)
30696c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
30706a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
30716a00d601Sbellard                    potential bugs */
30726c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
30731c213d19Sbellard                     /* 32 bit write access */
3074c27004ecSbellard                     val = ldl_p(buf);
30756c2934dbSaurel32                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
307613eb76e0Sbellard                     l = 4;
30776c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
30781c213d19Sbellard                     /* 16 bit write access */
3079c27004ecSbellard                     val = lduw_p(buf);
30806c2934dbSaurel32                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
308113eb76e0Sbellard                     l = 2;
308213eb76e0Sbellard                 } else {
30831c213d19Sbellard                     /* 8 bit write access */
3084c27004ecSbellard                     val = ldub_p(buf);
30856c2934dbSaurel32                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
308613eb76e0Sbellard                     l = 1;
308713eb76e0Sbellard                 }
308813eb76e0Sbellard             } else {
3089b448f2f3Sbellard                 unsigned long addr1;
3090b448f2f3Sbellard                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
309113eb76e0Sbellard                 /* RAM case */
30925579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
309313eb76e0Sbellard                 memcpy(ptr, buf, l);
30943a7d929eSbellard                 if (!cpu_physical_memory_is_dirty(addr1)) {
3095b448f2f3Sbellard                     /* invalidate code */
3096b448f2f3Sbellard                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3097b448f2f3Sbellard                     /* set dirty bit */
3098f23db169Sbellard                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3099f23db169Sbellard                         (0xff & ~CODE_DIRTY_FLAG);
310013eb76e0Sbellard                 }
31013a7d929eSbellard             }
310213eb76e0Sbellard         } else {
31032a4188a3Sbellard             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
31042a4188a3Sbellard                 !(pd & IO_MEM_ROMD)) {
3105c227f099SAnthony Liguori                 target_phys_addr_t addr1 = addr;
310613eb76e0Sbellard                 /* I/O case */
310713eb76e0Sbellard                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
31088da3ff18Spbrook                 if (p)
31096c2934dbSaurel32                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
31106c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
311113eb76e0Sbellard                     /* 32 bit read access */
31126c2934dbSaurel32                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3113c27004ecSbellard                     stl_p(buf, val);
311413eb76e0Sbellard                     l = 4;
31156c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
311613eb76e0Sbellard                     /* 16 bit read access */
31176c2934dbSaurel32                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3118c27004ecSbellard                     stw_p(buf, val);
311913eb76e0Sbellard                     l = 2;
312013eb76e0Sbellard                 } else {
31211c213d19Sbellard                     /* 8 bit read access */
31226c2934dbSaurel32                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3123c27004ecSbellard                     stb_p(buf, val);
312413eb76e0Sbellard                     l = 1;
312513eb76e0Sbellard                 }
312613eb76e0Sbellard             } else {
312713eb76e0Sbellard                 /* RAM case */
31285579c7f3Spbrook                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
312913eb76e0Sbellard                     (addr & ~TARGET_PAGE_MASK);
313013eb76e0Sbellard                 memcpy(buf, ptr, l);
313113eb76e0Sbellard             }
313213eb76e0Sbellard         }
313313eb76e0Sbellard         len -= l;
313413eb76e0Sbellard         buf += l;
313513eb76e0Sbellard         addr += l;
313613eb76e0Sbellard     }
313713eb76e0Sbellard }
31388df1cd07Sbellard 
3139d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3140c227f099SAnthony Liguori void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3141d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3142d0ecd2aaSbellard {
3143d0ecd2aaSbellard     int l;
3144d0ecd2aaSbellard     uint8_t *ptr;
3145c227f099SAnthony Liguori     target_phys_addr_t page;
3146d0ecd2aaSbellard     unsigned long pd;
3147d0ecd2aaSbellard     PhysPageDesc *p;
3148d0ecd2aaSbellard 
3149d0ecd2aaSbellard     while (len > 0) {
3150d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3151d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3152d0ecd2aaSbellard         if (l > len)
3153d0ecd2aaSbellard             l = len;
3154d0ecd2aaSbellard         p = phys_page_find(page >> TARGET_PAGE_BITS);
3155d0ecd2aaSbellard         if (!p) {
3156d0ecd2aaSbellard             pd = IO_MEM_UNASSIGNED;
3157d0ecd2aaSbellard         } else {
3158d0ecd2aaSbellard             pd = p->phys_offset;
3159d0ecd2aaSbellard         }
3160d0ecd2aaSbellard 
3161d0ecd2aaSbellard         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
31622a4188a3Sbellard             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
31632a4188a3Sbellard             !(pd & IO_MEM_ROMD)) {
3164d0ecd2aaSbellard             /* do nothing */
3165d0ecd2aaSbellard         } else {
3166d0ecd2aaSbellard             unsigned long addr1;
3167d0ecd2aaSbellard             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3168d0ecd2aaSbellard             /* ROM/RAM case */
31695579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
3170d0ecd2aaSbellard             memcpy(ptr, buf, l);
3171d0ecd2aaSbellard         }
3172d0ecd2aaSbellard         len -= l;
3173d0ecd2aaSbellard         buf += l;
3174d0ecd2aaSbellard         addr += l;
3175d0ecd2aaSbellard     }
3176d0ecd2aaSbellard }
3177d0ecd2aaSbellard 
31786d16c2f8Saliguori typedef struct {
31796d16c2f8Saliguori     void *buffer;
3180c227f099SAnthony Liguori     target_phys_addr_t addr;
3181c227f099SAnthony Liguori     target_phys_addr_t len;
31826d16c2f8Saliguori } BounceBuffer;
31836d16c2f8Saliguori 
31846d16c2f8Saliguori static BounceBuffer bounce;
31856d16c2f8Saliguori 
3186ba223c29Saliguori typedef struct MapClient {
3187ba223c29Saliguori     void *opaque;
3188ba223c29Saliguori     void (*callback)(void *opaque);
318972cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
3190ba223c29Saliguori } MapClient;
3191ba223c29Saliguori 
319272cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
319372cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
3194ba223c29Saliguori 
3195ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3196ba223c29Saliguori {
3197ba223c29Saliguori     MapClient *client = qemu_malloc(sizeof(*client));
3198ba223c29Saliguori 
3199ba223c29Saliguori     client->opaque = opaque;
3200ba223c29Saliguori     client->callback = callback;
320172cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
3202ba223c29Saliguori     return client;
3203ba223c29Saliguori }
3204ba223c29Saliguori 
3205ba223c29Saliguori void cpu_unregister_map_client(void *_client)
3206ba223c29Saliguori {
3207ba223c29Saliguori     MapClient *client = (MapClient *)_client;
3208ba223c29Saliguori 
320972cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
321034d5e948SIsaku Yamahata     qemu_free(client);
3211ba223c29Saliguori }
3212ba223c29Saliguori 
3213ba223c29Saliguori static void cpu_notify_map_clients(void)
3214ba223c29Saliguori {
3215ba223c29Saliguori     MapClient *client;
3216ba223c29Saliguori 
321772cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
321872cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
3219ba223c29Saliguori         client->callback(client->opaque);
322034d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
3221ba223c29Saliguori     }
3222ba223c29Saliguori }
3223ba223c29Saliguori 
32246d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
32256d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
32266d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
32276d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
3228ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
3229ba223c29Saliguori  * likely to succeed.
32306d16c2f8Saliguori  */
3231c227f099SAnthony Liguori void *cpu_physical_memory_map(target_phys_addr_t addr,
3232c227f099SAnthony Liguori                               target_phys_addr_t *plen,
32336d16c2f8Saliguori                               int is_write)
32346d16c2f8Saliguori {
3235c227f099SAnthony Liguori     target_phys_addr_t len = *plen;
3236c227f099SAnthony Liguori     target_phys_addr_t done = 0;
32376d16c2f8Saliguori     int l;
32386d16c2f8Saliguori     uint8_t *ret = NULL;
32396d16c2f8Saliguori     uint8_t *ptr;
3240c227f099SAnthony Liguori     target_phys_addr_t page;
32416d16c2f8Saliguori     unsigned long pd;
32426d16c2f8Saliguori     PhysPageDesc *p;
32436d16c2f8Saliguori     unsigned long addr1;
32446d16c2f8Saliguori 
32456d16c2f8Saliguori     while (len > 0) {
32466d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
32476d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
32486d16c2f8Saliguori         if (l > len)
32496d16c2f8Saliguori             l = len;
32506d16c2f8Saliguori         p = phys_page_find(page >> TARGET_PAGE_BITS);
32516d16c2f8Saliguori         if (!p) {
32526d16c2f8Saliguori             pd = IO_MEM_UNASSIGNED;
32536d16c2f8Saliguori         } else {
32546d16c2f8Saliguori             pd = p->phys_offset;
32556d16c2f8Saliguori         }
32566d16c2f8Saliguori 
32576d16c2f8Saliguori         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
32586d16c2f8Saliguori             if (done || bounce.buffer) {
32596d16c2f8Saliguori                 break;
32606d16c2f8Saliguori             }
32616d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
32626d16c2f8Saliguori             bounce.addr = addr;
32636d16c2f8Saliguori             bounce.len = l;
32646d16c2f8Saliguori             if (!is_write) {
32656d16c2f8Saliguori                 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
32666d16c2f8Saliguori             }
32676d16c2f8Saliguori             ptr = bounce.buffer;
32686d16c2f8Saliguori         } else {
32696d16c2f8Saliguori             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
32705579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
32716d16c2f8Saliguori         }
32726d16c2f8Saliguori         if (!done) {
32736d16c2f8Saliguori             ret = ptr;
32746d16c2f8Saliguori         } else if (ret + done != ptr) {
32756d16c2f8Saliguori             break;
32766d16c2f8Saliguori         }
32776d16c2f8Saliguori 
32786d16c2f8Saliguori         len -= l;
32796d16c2f8Saliguori         addr += l;
32806d16c2f8Saliguori         done += l;
32816d16c2f8Saliguori     }
32826d16c2f8Saliguori     *plen = done;
32836d16c2f8Saliguori     return ret;
32846d16c2f8Saliguori }
32856d16c2f8Saliguori 
32866d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
32876d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
32886d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
32896d16c2f8Saliguori  */
3290c227f099SAnthony Liguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3291c227f099SAnthony Liguori                                int is_write, target_phys_addr_t access_len)
32926d16c2f8Saliguori {
32936d16c2f8Saliguori     if (buffer != bounce.buffer) {
32946d16c2f8Saliguori         if (is_write) {
3295c227f099SAnthony Liguori             ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
32966d16c2f8Saliguori             while (access_len) {
32976d16c2f8Saliguori                 unsigned l;
32986d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
32996d16c2f8Saliguori                 if (l > access_len)
33006d16c2f8Saliguori                     l = access_len;
33016d16c2f8Saliguori                 if (!cpu_physical_memory_is_dirty(addr1)) {
33026d16c2f8Saliguori                     /* invalidate code */
33036d16c2f8Saliguori                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
33046d16c2f8Saliguori                     /* set dirty bit */
33056d16c2f8Saliguori                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
33066d16c2f8Saliguori                         (0xff & ~CODE_DIRTY_FLAG);
33076d16c2f8Saliguori                 }
33086d16c2f8Saliguori                 addr1 += l;
33096d16c2f8Saliguori                 access_len -= l;
33106d16c2f8Saliguori             }
33116d16c2f8Saliguori         }
33126d16c2f8Saliguori         return;
33136d16c2f8Saliguori     }
33146d16c2f8Saliguori     if (is_write) {
33156d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
33166d16c2f8Saliguori     }
3317f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
33186d16c2f8Saliguori     bounce.buffer = NULL;
3319ba223c29Saliguori     cpu_notify_map_clients();
33206d16c2f8Saliguori }
3321d0ecd2aaSbellard 
33228df1cd07Sbellard /* warning: addr must be aligned */
3323c227f099SAnthony Liguori uint32_t ldl_phys(target_phys_addr_t addr)
33248df1cd07Sbellard {
33258df1cd07Sbellard     int io_index;
33268df1cd07Sbellard     uint8_t *ptr;
33278df1cd07Sbellard     uint32_t val;
33288df1cd07Sbellard     unsigned long pd;
33298df1cd07Sbellard     PhysPageDesc *p;
33308df1cd07Sbellard 
33318df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
33328df1cd07Sbellard     if (!p) {
33338df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
33348df1cd07Sbellard     } else {
33358df1cd07Sbellard         pd = p->phys_offset;
33368df1cd07Sbellard     }
33378df1cd07Sbellard 
33382a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
33392a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
33408df1cd07Sbellard         /* I/O case */
33418df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33428da3ff18Spbrook         if (p)
33438da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
33448df1cd07Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
33458df1cd07Sbellard     } else {
33468df1cd07Sbellard         /* RAM case */
33475579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
33488df1cd07Sbellard             (addr & ~TARGET_PAGE_MASK);
33498df1cd07Sbellard         val = ldl_p(ptr);
33508df1cd07Sbellard     }
33518df1cd07Sbellard     return val;
33528df1cd07Sbellard }
33538df1cd07Sbellard 
335484b7b8e7Sbellard /* warning: addr must be aligned */
3355c227f099SAnthony Liguori uint64_t ldq_phys(target_phys_addr_t addr)
335684b7b8e7Sbellard {
335784b7b8e7Sbellard     int io_index;
335884b7b8e7Sbellard     uint8_t *ptr;
335984b7b8e7Sbellard     uint64_t val;
336084b7b8e7Sbellard     unsigned long pd;
336184b7b8e7Sbellard     PhysPageDesc *p;
336284b7b8e7Sbellard 
336384b7b8e7Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
336484b7b8e7Sbellard     if (!p) {
336584b7b8e7Sbellard         pd = IO_MEM_UNASSIGNED;
336684b7b8e7Sbellard     } else {
336784b7b8e7Sbellard         pd = p->phys_offset;
336884b7b8e7Sbellard     }
336984b7b8e7Sbellard 
33702a4188a3Sbellard     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
33712a4188a3Sbellard         !(pd & IO_MEM_ROMD)) {
337284b7b8e7Sbellard         /* I/O case */
337384b7b8e7Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
33748da3ff18Spbrook         if (p)
33758da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
337684b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
337784b7b8e7Sbellard         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
337884b7b8e7Sbellard         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
337984b7b8e7Sbellard #else
338084b7b8e7Sbellard         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
338184b7b8e7Sbellard         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
338284b7b8e7Sbellard #endif
338384b7b8e7Sbellard     } else {
338484b7b8e7Sbellard         /* RAM case */
33855579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
338684b7b8e7Sbellard             (addr & ~TARGET_PAGE_MASK);
338784b7b8e7Sbellard         val = ldq_p(ptr);
338884b7b8e7Sbellard     }
338984b7b8e7Sbellard     return val;
339084b7b8e7Sbellard }
339184b7b8e7Sbellard 
3392aab33094Sbellard /* XXX: optimize */
3393c227f099SAnthony Liguori uint32_t ldub_phys(target_phys_addr_t addr)
3394aab33094Sbellard {
3395aab33094Sbellard     uint8_t val;
3396aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
3397aab33094Sbellard     return val;
3398aab33094Sbellard }
3399aab33094Sbellard 
3400aab33094Sbellard /* XXX: optimize */
3401c227f099SAnthony Liguori uint32_t lduw_phys(target_phys_addr_t addr)
3402aab33094Sbellard {
3403aab33094Sbellard     uint16_t val;
3404aab33094Sbellard     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3405aab33094Sbellard     return tswap16(val);
3406aab33094Sbellard }
3407aab33094Sbellard 
34088df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
34098df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
34108df1cd07Sbellard    bits are used to track modified PTEs */
3411c227f099SAnthony Liguori void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
34128df1cd07Sbellard {
34138df1cd07Sbellard     int io_index;
34148df1cd07Sbellard     uint8_t *ptr;
34158df1cd07Sbellard     unsigned long pd;
34168df1cd07Sbellard     PhysPageDesc *p;
34178df1cd07Sbellard 
34188df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
34198df1cd07Sbellard     if (!p) {
34208df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
34218df1cd07Sbellard     } else {
34228df1cd07Sbellard         pd = p->phys_offset;
34238df1cd07Sbellard     }
34248df1cd07Sbellard 
34253a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
34268df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
34278da3ff18Spbrook         if (p)
34288da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
34298df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
34308df1cd07Sbellard     } else {
343174576198Saliguori         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
34325579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
34338df1cd07Sbellard         stl_p(ptr, val);
343474576198Saliguori 
343574576198Saliguori         if (unlikely(in_migration)) {
343674576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
343774576198Saliguori                 /* invalidate code */
343874576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
343974576198Saliguori                 /* set dirty bit */
344074576198Saliguori                 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
344174576198Saliguori                     (0xff & ~CODE_DIRTY_FLAG);
344274576198Saliguori             }
344374576198Saliguori         }
34448df1cd07Sbellard     }
34458df1cd07Sbellard }
34468df1cd07Sbellard 
3447c227f099SAnthony Liguori void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3448bc98a7efSj_mayer {
3449bc98a7efSj_mayer     int io_index;
3450bc98a7efSj_mayer     uint8_t *ptr;
3451bc98a7efSj_mayer     unsigned long pd;
3452bc98a7efSj_mayer     PhysPageDesc *p;
3453bc98a7efSj_mayer 
3454bc98a7efSj_mayer     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3455bc98a7efSj_mayer     if (!p) {
3456bc98a7efSj_mayer         pd = IO_MEM_UNASSIGNED;
3457bc98a7efSj_mayer     } else {
3458bc98a7efSj_mayer         pd = p->phys_offset;
3459bc98a7efSj_mayer     }
3460bc98a7efSj_mayer 
3461bc98a7efSj_mayer     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3462bc98a7efSj_mayer         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
34638da3ff18Spbrook         if (p)
34648da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3465bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
3466bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3467bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3468bc98a7efSj_mayer #else
3469bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3470bc98a7efSj_mayer         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3471bc98a7efSj_mayer #endif
3472bc98a7efSj_mayer     } else {
34735579c7f3Spbrook         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3474bc98a7efSj_mayer             (addr & ~TARGET_PAGE_MASK);
3475bc98a7efSj_mayer         stq_p(ptr, val);
3476bc98a7efSj_mayer     }
3477bc98a7efSj_mayer }
3478bc98a7efSj_mayer 
34798df1cd07Sbellard /* warning: addr must be aligned */
3480c227f099SAnthony Liguori void stl_phys(target_phys_addr_t addr, uint32_t val)
34818df1cd07Sbellard {
34828df1cd07Sbellard     int io_index;
34838df1cd07Sbellard     uint8_t *ptr;
34848df1cd07Sbellard     unsigned long pd;
34858df1cd07Sbellard     PhysPageDesc *p;
34868df1cd07Sbellard 
34878df1cd07Sbellard     p = phys_page_find(addr >> TARGET_PAGE_BITS);
34888df1cd07Sbellard     if (!p) {
34898df1cd07Sbellard         pd = IO_MEM_UNASSIGNED;
34908df1cd07Sbellard     } else {
34918df1cd07Sbellard         pd = p->phys_offset;
34928df1cd07Sbellard     }
34938df1cd07Sbellard 
34943a7d929eSbellard     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
34958df1cd07Sbellard         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
34968da3ff18Spbrook         if (p)
34978da3ff18Spbrook             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
34988df1cd07Sbellard         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
34998df1cd07Sbellard     } else {
35008df1cd07Sbellard         unsigned long addr1;
35018df1cd07Sbellard         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
35028df1cd07Sbellard         /* RAM case */
35035579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
35048df1cd07Sbellard         stl_p(ptr, val);
35053a7d929eSbellard         if (!cpu_physical_memory_is_dirty(addr1)) {
35068df1cd07Sbellard             /* invalidate code */
35078df1cd07Sbellard             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
35088df1cd07Sbellard             /* set dirty bit */
3509f23db169Sbellard             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3510f23db169Sbellard                 (0xff & ~CODE_DIRTY_FLAG);
35118df1cd07Sbellard         }
35128df1cd07Sbellard     }
35133a7d929eSbellard }
35148df1cd07Sbellard 
3515aab33094Sbellard /* XXX: optimize */
3516c227f099SAnthony Liguori void stb_phys(target_phys_addr_t addr, uint32_t val)
3517aab33094Sbellard {
3518aab33094Sbellard     uint8_t v = val;
3519aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
3520aab33094Sbellard }
3521aab33094Sbellard 
3522aab33094Sbellard /* XXX: optimize */
3523c227f099SAnthony Liguori void stw_phys(target_phys_addr_t addr, uint32_t val)
3524aab33094Sbellard {
3525aab33094Sbellard     uint16_t v = tswap16(val);
3526aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3527aab33094Sbellard }
3528aab33094Sbellard 
3529aab33094Sbellard /* XXX: optimize */
3530c227f099SAnthony Liguori void stq_phys(target_phys_addr_t addr, uint64_t val)
3531aab33094Sbellard {
3532aab33094Sbellard     val = tswap64(val);
3533aab33094Sbellard     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3534aab33094Sbellard }
3535aab33094Sbellard 
353613eb76e0Sbellard #endif
353713eb76e0Sbellard 
35385e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
3539b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3540b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
354113eb76e0Sbellard {
354213eb76e0Sbellard     int l;
3543c227f099SAnthony Liguori     target_phys_addr_t phys_addr;
35449b3c35e0Sj_mayer     target_ulong page;
354513eb76e0Sbellard 
354613eb76e0Sbellard     while (len > 0) {
354713eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
354813eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
354913eb76e0Sbellard         /* if no physical page mapped, return an error */
355013eb76e0Sbellard         if (phys_addr == -1)
355113eb76e0Sbellard             return -1;
355213eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
355313eb76e0Sbellard         if (l > len)
355413eb76e0Sbellard             l = len;
35555e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
35565e2972fdSaliguori #if !defined(CONFIG_USER_ONLY)
35575e2972fdSaliguori         if (is_write)
35585e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
35595e2972fdSaliguori         else
35605e2972fdSaliguori #endif
35615e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
356213eb76e0Sbellard         len -= l;
356313eb76e0Sbellard         buf += l;
356413eb76e0Sbellard         addr += l;
356513eb76e0Sbellard     }
356613eb76e0Sbellard     return 0;
356713eb76e0Sbellard }
356813eb76e0Sbellard 
35692e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
35702e70f6efSpbrook    must be at the end of the TB */
35712e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr)
35722e70f6efSpbrook {
35732e70f6efSpbrook     TranslationBlock *tb;
35742e70f6efSpbrook     uint32_t n, cflags;
35752e70f6efSpbrook     target_ulong pc, cs_base;
35762e70f6efSpbrook     uint64_t flags;
35772e70f6efSpbrook 
35782e70f6efSpbrook     tb = tb_find_pc((unsigned long)retaddr);
35792e70f6efSpbrook     if (!tb) {
35802e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
35812e70f6efSpbrook                   retaddr);
35822e70f6efSpbrook     }
35832e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
35842e70f6efSpbrook     cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
35852e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
3586bf20dc07Sths        occurred.  */
35872e70f6efSpbrook     n = n - env->icount_decr.u16.low;
35882e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
35892e70f6efSpbrook     n++;
35902e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
35912e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
3592bf20dc07Sths        the first instruction in a TB then re-execute the preceding
35932e70f6efSpbrook        branch.  */
35942e70f6efSpbrook #if defined(TARGET_MIPS)
35952e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
35962e70f6efSpbrook         env->active_tc.PC -= 4;
35972e70f6efSpbrook         env->icount_decr.u16.low++;
35982e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
35992e70f6efSpbrook     }
36002e70f6efSpbrook #elif defined(TARGET_SH4)
36012e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
36022e70f6efSpbrook             && n > 1) {
36032e70f6efSpbrook         env->pc -= 2;
36042e70f6efSpbrook         env->icount_decr.u16.low++;
36052e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
36062e70f6efSpbrook     }
36072e70f6efSpbrook #endif
36082e70f6efSpbrook     /* This should never happen.  */
36092e70f6efSpbrook     if (n > CF_COUNT_MASK)
36102e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
36112e70f6efSpbrook 
36122e70f6efSpbrook     cflags = n | CF_LAST_IO;
36132e70f6efSpbrook     pc = tb->pc;
36142e70f6efSpbrook     cs_base = tb->cs_base;
36152e70f6efSpbrook     flags = tb->flags;
36162e70f6efSpbrook     tb_phys_invalidate(tb, -1);
36172e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
36182e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
36192e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
3620bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
36212e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
36222e70f6efSpbrook        repeating the fault, which is horribly inefficient.
36232e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
36242e70f6efSpbrook        second new TB.  */
36252e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
36262e70f6efSpbrook }
36272e70f6efSpbrook 
3628e3db7226Sbellard void dump_exec_info(FILE *f,
3629e3db7226Sbellard                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3630e3db7226Sbellard {
3631e3db7226Sbellard     int i, target_code_size, max_target_code_size;
3632e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
3633e3db7226Sbellard     TranslationBlock *tb;
3634e3db7226Sbellard 
3635e3db7226Sbellard     target_code_size = 0;
3636e3db7226Sbellard     max_target_code_size = 0;
3637e3db7226Sbellard     cross_page = 0;
3638e3db7226Sbellard     direct_jmp_count = 0;
3639e3db7226Sbellard     direct_jmp2_count = 0;
3640e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
3641e3db7226Sbellard         tb = &tbs[i];
3642e3db7226Sbellard         target_code_size += tb->size;
3643e3db7226Sbellard         if (tb->size > max_target_code_size)
3644e3db7226Sbellard             max_target_code_size = tb->size;
3645e3db7226Sbellard         if (tb->page_addr[1] != -1)
3646e3db7226Sbellard             cross_page++;
3647e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
3648e3db7226Sbellard             direct_jmp_count++;
3649e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
3650e3db7226Sbellard                 direct_jmp2_count++;
3651e3db7226Sbellard             }
3652e3db7226Sbellard         }
3653e3db7226Sbellard     }
3654e3db7226Sbellard     /* XXX: avoid using doubles ? */
365557fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
365626a5f13bSbellard     cpu_fprintf(f, "gen code size       %ld/%ld\n",
365726a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
365826a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
365926a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
3660e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3661e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
3662e3db7226Sbellard                 max_target_code_size);
3663e3db7226Sbellard     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3664e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3665e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3666e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3667e3db7226Sbellard             cross_page,
3668e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3669e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3670e3db7226Sbellard                 direct_jmp_count,
3671e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3672e3db7226Sbellard                 direct_jmp2_count,
3673e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
367457fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
3675e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3676e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3677e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3678b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
3679e3db7226Sbellard }
3680e3db7226Sbellard 
368161382a50Sbellard #if !defined(CONFIG_USER_ONLY)
368261382a50Sbellard 
368361382a50Sbellard #define MMUSUFFIX _cmmu
368461382a50Sbellard #define GETPC() NULL
368561382a50Sbellard #define env cpu_single_env
3686b769d8feSbellard #define SOFTMMU_CODE_ACCESS
368761382a50Sbellard 
368861382a50Sbellard #define SHIFT 0
368961382a50Sbellard #include "softmmu_template.h"
369061382a50Sbellard 
369161382a50Sbellard #define SHIFT 1
369261382a50Sbellard #include "softmmu_template.h"
369361382a50Sbellard 
369461382a50Sbellard #define SHIFT 2
369561382a50Sbellard #include "softmmu_template.h"
369661382a50Sbellard 
369761382a50Sbellard #define SHIFT 3
369861382a50Sbellard #include "softmmu_template.h"
369961382a50Sbellard 
370061382a50Sbellard #undef env
370161382a50Sbellard 
370261382a50Sbellard #endif
3703