1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* Access guest memory in blocks. */
3
4 #include "qemu/osdep.h"
5 #include "cpu.h"
6 #include "accel/tcg/cpu-ldst.h"
7 #include "accel/tcg/probe.h"
8 #include "exec/target_page.h"
9 #include "access.h"
10
11
access_prepare_mmu(X86Access * ret,CPUX86State * env,vaddr vaddr,unsigned size,MMUAccessType type,int mmu_idx,uintptr_t ra)12 void access_prepare_mmu(X86Access *ret, CPUX86State *env,
13 vaddr vaddr, unsigned size,
14 MMUAccessType type, int mmu_idx, uintptr_t ra)
15 {
16 int size1, size2;
17 void *haddr1, *haddr2;
18
19 assert(size > 0 && size <= TARGET_PAGE_SIZE);
20
21 size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)),
22 size2 = size - size1;
23
24 memset(ret, 0, sizeof(*ret));
25 ret->vaddr = vaddr;
26 ret->size = size;
27 ret->size1 = size1;
28 ret->mmu_idx = mmu_idx;
29 ret->env = env;
30 ret->ra = ra;
31
32 haddr1 = probe_access(env, vaddr, size1, type, mmu_idx, ra);
33 ret->haddr1 = haddr1;
34
35 if (unlikely(size2)) {
36 haddr2 = probe_access(env, vaddr + size1, size2, type, mmu_idx, ra);
37 if (haddr2 == haddr1 + size1) {
38 ret->size1 = size;
39 } else {
40 #ifdef CONFIG_USER_ONLY
41 g_assert_not_reached();
42 #else
43 ret->haddr2 = haddr2;
44 #endif
45 }
46 }
47 }
48
access_prepare(X86Access * ret,CPUX86State * env,vaddr vaddr,unsigned size,MMUAccessType type,uintptr_t ra)49 void access_prepare(X86Access *ret, CPUX86State *env, vaddr vaddr,
50 unsigned size, MMUAccessType type, uintptr_t ra)
51 {
52 int mmu_idx = cpu_mmu_index(env_cpu(env), false);
53 access_prepare_mmu(ret, env, vaddr, size, type, mmu_idx, ra);
54 }
55
access_ptr(X86Access * ac,vaddr addr,unsigned len)56 static void *access_ptr(X86Access *ac, vaddr addr, unsigned len)
57 {
58 vaddr offset = addr - ac->vaddr;
59
60 assert(addr >= ac->vaddr);
61
62 /* No haddr means probe_access wants to force slow path */
63 if (!ac->haddr1) {
64 return NULL;
65 }
66
67 #ifdef CONFIG_USER_ONLY
68 assert(offset <= ac->size1 - len);
69 return ac->haddr1 + offset;
70 #else
71 if (likely(offset <= ac->size1 - len)) {
72 return ac->haddr1 + offset;
73 }
74 assert(offset <= ac->size - len);
75 /*
76 * If the address is not naturally aligned, it might span both pages.
77 * Only return ac->haddr2 if the area is entirely within the second page,
78 * otherwise fall back to slow accesses.
79 */
80 if (likely(offset >= ac->size1)) {
81 return ac->haddr2 + (offset - ac->size1);
82 }
83 return NULL;
84 #endif
85 }
86
access_ldb(X86Access * ac,vaddr addr)87 uint8_t access_ldb(X86Access *ac, vaddr addr)
88 {
89 void *p = access_ptr(ac, addr, sizeof(uint8_t));
90
91 if (likely(p)) {
92 return ldub_p(p);
93 }
94 return cpu_ldub_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
95 }
96
access_ldw(X86Access * ac,vaddr addr)97 uint16_t access_ldw(X86Access *ac, vaddr addr)
98 {
99 void *p = access_ptr(ac, addr, sizeof(uint16_t));
100
101 if (likely(p)) {
102 return lduw_le_p(p);
103 }
104 return cpu_lduw_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
105 }
106
access_ldl(X86Access * ac,vaddr addr)107 uint32_t access_ldl(X86Access *ac, vaddr addr)
108 {
109 void *p = access_ptr(ac, addr, sizeof(uint32_t));
110
111 if (likely(p)) {
112 return ldl_le_p(p);
113 }
114 return cpu_ldl_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
115 }
116
access_ldq(X86Access * ac,vaddr addr)117 uint64_t access_ldq(X86Access *ac, vaddr addr)
118 {
119 void *p = access_ptr(ac, addr, sizeof(uint64_t));
120
121 if (likely(p)) {
122 return ldq_le_p(p);
123 }
124 return cpu_ldq_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
125 }
126
access_stb(X86Access * ac,vaddr addr,uint8_t val)127 void access_stb(X86Access *ac, vaddr addr, uint8_t val)
128 {
129 void *p = access_ptr(ac, addr, sizeof(uint8_t));
130
131 if (likely(p)) {
132 stb_p(p, val);
133 } else {
134 cpu_stb_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
135 }
136 }
137
access_stw(X86Access * ac,vaddr addr,uint16_t val)138 void access_stw(X86Access *ac, vaddr addr, uint16_t val)
139 {
140 void *p = access_ptr(ac, addr, sizeof(uint16_t));
141
142 if (likely(p)) {
143 stw_le_p(p, val);
144 } else {
145 cpu_stw_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
146 }
147 }
148
access_stl(X86Access * ac,vaddr addr,uint32_t val)149 void access_stl(X86Access *ac, vaddr addr, uint32_t val)
150 {
151 void *p = access_ptr(ac, addr, sizeof(uint32_t));
152
153 if (likely(p)) {
154 stl_le_p(p, val);
155 } else {
156 cpu_stl_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
157 }
158 }
159
access_stq(X86Access * ac,vaddr addr,uint64_t val)160 void access_stq(X86Access *ac, vaddr addr, uint64_t val)
161 {
162 void *p = access_ptr(ac, addr, sizeof(uint64_t));
163
164 if (likely(p)) {
165 stq_le_p(p, val);
166 } else {
167 cpu_stq_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
168 }
169 }
170