1 /*
2 * Helpers for HPPA instructions.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "accel/tcg/cpu-ldst.h"
25 #include "accel/tcg/probe.h"
26 #include "qemu/timer.h"
27 #include "trace.h"
28 #ifdef CONFIG_USER_ONLY
29 #include "user/page-protection.h"
30 #endif
31
HELPER(excp)32 G_NORETURN void HELPER(excp)(CPUHPPAState *env, int excp)
33 {
34 CPUState *cs = env_cpu(env);
35
36 cs->exception_index = excp;
37 cpu_loop_exit(cs);
38 }
39
hppa_dynamic_excp(CPUHPPAState * env,int excp,uintptr_t ra)40 G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra)
41 {
42 CPUState *cs = env_cpu(env);
43
44 cs->exception_index = excp;
45 cpu_loop_exit_restore(cs, ra);
46 }
47
atomic_store_mask32(CPUHPPAState * env,target_ulong addr,uint32_t val,uint32_t mask,uintptr_t ra)48 static void atomic_store_mask32(CPUHPPAState *env, target_ulong addr,
49 uint32_t val, uint32_t mask, uintptr_t ra)
50 {
51 int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
52 uint32_t old, new, cmp, *haddr;
53 void *vaddr;
54
55 vaddr = probe_access(env, addr, 3, MMU_DATA_STORE, mmu_idx, ra);
56 if (vaddr == NULL) {
57 cpu_loop_exit_atomic(env_cpu(env), ra);
58 }
59 haddr = (uint32_t *)((uintptr_t)vaddr & -4);
60 mask = addr & 1 ? 0x00ffffffu : 0xffffff00u;
61
62 old = *haddr;
63 while (1) {
64 new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
65 cmp = qatomic_cmpxchg(haddr, old, new);
66 if (cmp == old) {
67 return;
68 }
69 old = cmp;
70 }
71 }
72
atomic_store_mask64(CPUHPPAState * env,target_ulong addr,uint64_t val,uint64_t mask,int size,uintptr_t ra)73 static void atomic_store_mask64(CPUHPPAState *env, target_ulong addr,
74 uint64_t val, uint64_t mask,
75 int size, uintptr_t ra)
76 {
77 #ifdef CONFIG_ATOMIC64
78 int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
79 uint64_t old, new, cmp, *haddr;
80 void *vaddr;
81
82 vaddr = probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, ra);
83 if (vaddr == NULL) {
84 cpu_loop_exit_atomic(env_cpu(env), ra);
85 }
86 haddr = (uint64_t *)((uintptr_t)vaddr & -8);
87
88 old = *haddr;
89 while (1) {
90 new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
91 cmp = qatomic_cmpxchg__nocheck(haddr, old, new);
92 if (cmp == old) {
93 return;
94 }
95 old = cmp;
96 }
97 #else
98 cpu_loop_exit_atomic(env_cpu(env), ra);
99 #endif
100 }
101
do_stby_b(CPUHPPAState * env,target_ulong addr,target_ulong val,bool parallel,uintptr_t ra)102 static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val,
103 bool parallel, uintptr_t ra)
104 {
105 switch (addr & 3) {
106 case 3:
107 cpu_stb_data_ra(env, addr, val, ra);
108 break;
109 case 2:
110 cpu_stw_data_ra(env, addr, val, ra);
111 break;
112 case 1:
113 /* The 3 byte store must appear atomic. */
114 if (parallel) {
115 atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
116 } else {
117 cpu_stb_data_ra(env, addr, val >> 16, ra);
118 cpu_stw_data_ra(env, addr + 1, val, ra);
119 }
120 break;
121 default:
122 cpu_stl_data_ra(env, addr, val, ra);
123 break;
124 }
125 }
126
do_stdby_b(CPUHPPAState * env,target_ulong addr,uint64_t val,bool parallel,uintptr_t ra)127 static void do_stdby_b(CPUHPPAState *env, target_ulong addr, uint64_t val,
128 bool parallel, uintptr_t ra)
129 {
130 switch (addr & 7) {
131 case 7:
132 cpu_stb_data_ra(env, addr, val, ra);
133 break;
134 case 6:
135 cpu_stw_data_ra(env, addr, val, ra);
136 break;
137 case 5:
138 /* The 3 byte store must appear atomic. */
139 if (parallel) {
140 atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
141 } else {
142 cpu_stb_data_ra(env, addr, val >> 16, ra);
143 cpu_stw_data_ra(env, addr + 1, val, ra);
144 }
145 break;
146 case 4:
147 cpu_stl_data_ra(env, addr, val, ra);
148 break;
149 case 3:
150 /* The 5 byte store must appear atomic. */
151 if (parallel) {
152 atomic_store_mask64(env, addr, val, 0x000000ffffffffffull, 5, ra);
153 } else {
154 cpu_stb_data_ra(env, addr, val >> 32, ra);
155 cpu_stl_data_ra(env, addr + 1, val, ra);
156 }
157 break;
158 case 2:
159 /* The 6 byte store must appear atomic. */
160 if (parallel) {
161 atomic_store_mask64(env, addr, val, 0x0000ffffffffffffull, 6, ra);
162 } else {
163 cpu_stw_data_ra(env, addr, val >> 32, ra);
164 cpu_stl_data_ra(env, addr + 2, val, ra);
165 }
166 break;
167 case 1:
168 /* The 7 byte store must appear atomic. */
169 if (parallel) {
170 atomic_store_mask64(env, addr, val, 0x00ffffffffffffffull, 7, ra);
171 } else {
172 cpu_stb_data_ra(env, addr, val >> 48, ra);
173 cpu_stw_data_ra(env, addr + 1, val >> 32, ra);
174 cpu_stl_data_ra(env, addr + 3, val, ra);
175 }
176 break;
177 default:
178 cpu_stq_data_ra(env, addr, val, ra);
179 break;
180 }
181 }
182
HELPER(stby_b)183 void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
184 {
185 do_stby_b(env, addr, val, false, GETPC());
186 }
187
HELPER(stby_b_parallel)188 void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr,
189 target_ulong val)
190 {
191 do_stby_b(env, addr, val, true, GETPC());
192 }
193
HELPER(stdby_b)194 void HELPER(stdby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
195 {
196 do_stdby_b(env, addr, val, false, GETPC());
197 }
198
HELPER(stdby_b_parallel)199 void HELPER(stdby_b_parallel)(CPUHPPAState *env, target_ulong addr,
200 target_ulong val)
201 {
202 do_stdby_b(env, addr, val, true, GETPC());
203 }
204
do_stby_e(CPUHPPAState * env,target_ulong addr,target_ulong val,bool parallel,uintptr_t ra)205 static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
206 bool parallel, uintptr_t ra)
207 {
208 switch (addr & 3) {
209 case 3:
210 /* The 3 byte store must appear atomic. */
211 if (parallel) {
212 atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra);
213 } else {
214 cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
215 cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
216 }
217 break;
218 case 2:
219 cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
220 break;
221 case 1:
222 cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
223 break;
224 default:
225 /* Nothing is stored, but protection is checked and the
226 cacheline is marked dirty. */
227 probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra);
228 break;
229 }
230 }
231
do_stdby_e(CPUHPPAState * env,target_ulong addr,uint64_t val,bool parallel,uintptr_t ra)232 static void do_stdby_e(CPUHPPAState *env, target_ulong addr, uint64_t val,
233 bool parallel, uintptr_t ra)
234 {
235 switch (addr & 7) {
236 case 7:
237 /* The 7 byte store must appear atomic. */
238 if (parallel) {
239 atomic_store_mask64(env, addr - 7, val,
240 0xffffffffffffff00ull, 7, ra);
241 } else {
242 cpu_stl_data_ra(env, addr - 7, val >> 32, ra);
243 cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
244 cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
245 }
246 break;
247 case 6:
248 /* The 6 byte store must appear atomic. */
249 if (parallel) {
250 atomic_store_mask64(env, addr - 6, val,
251 0xffffffffffff0000ull, 6, ra);
252 } else {
253 cpu_stl_data_ra(env, addr - 6, val >> 32, ra);
254 cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
255 }
256 break;
257 case 5:
258 /* The 5 byte store must appear atomic. */
259 if (parallel) {
260 atomic_store_mask64(env, addr - 5, val,
261 0xffffffffff000000ull, 5, ra);
262 } else {
263 cpu_stl_data_ra(env, addr - 5, val >> 32, ra);
264 cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
265 }
266 break;
267 case 4:
268 cpu_stl_data_ra(env, addr - 4, val >> 32, ra);
269 break;
270 case 3:
271 /* The 3 byte store must appear atomic. */
272 if (parallel) {
273 atomic_store_mask32(env, addr - 3, val >> 32, 0xffffff00u, ra);
274 } else {
275 cpu_stw_data_ra(env, addr - 3, val >> 48, ra);
276 cpu_stb_data_ra(env, addr - 1, val >> 40, ra);
277 }
278 break;
279 case 2:
280 cpu_stw_data_ra(env, addr - 2, val >> 48, ra);
281 break;
282 case 1:
283 cpu_stb_data_ra(env, addr - 1, val >> 56, ra);
284 break;
285 default:
286 /* Nothing is stored, but protection is checked and the
287 cacheline is marked dirty. */
288 probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra);
289 break;
290 }
291 }
292
HELPER(stby_e)293 void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
294 {
295 do_stby_e(env, addr, val, false, GETPC());
296 }
297
HELPER(stby_e_parallel)298 void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
299 target_ulong val)
300 {
301 do_stby_e(env, addr, val, true, GETPC());
302 }
303
HELPER(stdby_e)304 void HELPER(stdby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
305 {
306 do_stdby_e(env, addr, val, false, GETPC());
307 }
308
HELPER(stdby_e_parallel)309 void HELPER(stdby_e_parallel)(CPUHPPAState *env, target_ulong addr,
310 target_ulong val)
311 {
312 do_stdby_e(env, addr, val, true, GETPC());
313 }
314
HELPER(ldc_check)315 void HELPER(ldc_check)(target_ulong addr)
316 {
317 if (unlikely(addr & 0xf)) {
318 qemu_log_mask(LOG_GUEST_ERROR,
319 "Undefined ldc to unaligned address mod 16: "
320 TARGET_FMT_lx "\n", addr);
321 }
322 }
323
HELPER(probe)324 target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
325 uint32_t level, uint32_t want)
326 {
327 #ifdef CONFIG_USER_ONLY
328 return page_check_range(addr, 1, want);
329 #else
330 int prot, excp, mmu_idx;
331 hwaddr phys;
332
333 trace_hppa_tlb_probe(addr, level, want);
334 /* Fail if the requested privilege level is higher than current. */
335 if (level < (env->iaoq_f & 3)) {
336 return 0;
337 }
338
339 mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
340 excp = hppa_get_physical_address(env, addr, mmu_idx, 0, 0, &phys, &prot);
341 if (excp >= 0) {
342 cpu_restore_state(env_cpu(env), GETPC());
343 hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx));
344 if (excp == EXCP_DTLB_MISS) {
345 excp = EXCP_NA_DTLB_MISS;
346 }
347 helper_excp(env, excp);
348 }
349 return (want & prot) != 0;
350 #endif
351 }
352
HELPER(read_interval_timer)353 target_ulong HELPER(read_interval_timer)(void)
354 {
355 #ifdef CONFIG_USER_ONLY
356 /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
357 Just pass through the host cpu clock ticks. */
358 return cpu_get_host_ticks();
359 #else
360 /* In system mode we have access to a decent high-resolution clock.
361 In order to make OS-level time accounting work with the cr16,
362 present it with a well-timed clock fixed at 250MHz. */
363 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2;
364 #endif
365 }
366
HELPER(hadd_ss)367 uint64_t HELPER(hadd_ss)(uint64_t r1, uint64_t r2)
368 {
369 uint64_t ret = 0;
370
371 for (int i = 0; i < 64; i += 16) {
372 int f1 = sextract64(r1, i, 16);
373 int f2 = sextract64(r2, i, 16);
374 int fr = f1 + f2;
375
376 fr = MIN(fr, INT16_MAX);
377 fr = MAX(fr, INT16_MIN);
378 ret = deposit64(ret, i, 16, fr);
379 }
380 return ret;
381 }
382
HELPER(hadd_us)383 uint64_t HELPER(hadd_us)(uint64_t r1, uint64_t r2)
384 {
385 uint64_t ret = 0;
386
387 for (int i = 0; i < 64; i += 16) {
388 int f1 = extract64(r1, i, 16);
389 int f2 = sextract64(r2, i, 16);
390 int fr = f1 + f2;
391
392 fr = MIN(fr, UINT16_MAX);
393 fr = MAX(fr, 0);
394 ret = deposit64(ret, i, 16, fr);
395 }
396 return ret;
397 }
398
HELPER(havg)399 uint64_t HELPER(havg)(uint64_t r1, uint64_t r2)
400 {
401 uint64_t ret = 0;
402
403 for (int i = 0; i < 64; i += 16) {
404 int f1 = extract64(r1, i, 16);
405 int f2 = extract64(r2, i, 16);
406 int fr = f1 + f2;
407
408 ret = deposit64(ret, i, 16, (fr >> 1) | (fr & 1));
409 }
410 return ret;
411 }
412
HELPER(hsub_ss)413 uint64_t HELPER(hsub_ss)(uint64_t r1, uint64_t r2)
414 {
415 uint64_t ret = 0;
416
417 for (int i = 0; i < 64; i += 16) {
418 int f1 = sextract64(r1, i, 16);
419 int f2 = sextract64(r2, i, 16);
420 int fr = f1 - f2;
421
422 fr = MIN(fr, INT16_MAX);
423 fr = MAX(fr, INT16_MIN);
424 ret = deposit64(ret, i, 16, fr);
425 }
426 return ret;
427 }
428
HELPER(hsub_us)429 uint64_t HELPER(hsub_us)(uint64_t r1, uint64_t r2)
430 {
431 uint64_t ret = 0;
432
433 for (int i = 0; i < 64; i += 16) {
434 int f1 = extract64(r1, i, 16);
435 int f2 = sextract64(r2, i, 16);
436 int fr = f1 - f2;
437
438 fr = MIN(fr, UINT16_MAX);
439 fr = MAX(fr, 0);
440 ret = deposit64(ret, i, 16, fr);
441 }
442 return ret;
443 }
444
HELPER(hshladd)445 uint64_t HELPER(hshladd)(uint64_t r1, uint64_t r2, uint32_t sh)
446 {
447 uint64_t ret = 0;
448
449 for (int i = 0; i < 64; i += 16) {
450 int f1 = sextract64(r1, i, 16);
451 int f2 = sextract64(r2, i, 16);
452 int fr = (f1 << sh) + f2;
453
454 fr = MIN(fr, INT16_MAX);
455 fr = MAX(fr, INT16_MIN);
456 ret = deposit64(ret, i, 16, fr);
457 }
458 return ret;
459 }
460
HELPER(hshradd)461 uint64_t HELPER(hshradd)(uint64_t r1, uint64_t r2, uint32_t sh)
462 {
463 uint64_t ret = 0;
464
465 for (int i = 0; i < 64; i += 16) {
466 int f1 = sextract64(r1, i, 16);
467 int f2 = sextract64(r2, i, 16);
468 int fr = (f1 >> sh) + f2;
469
470 fr = MIN(fr, INT16_MAX);
471 fr = MAX(fr, INT16_MIN);
472 ret = deposit64(ret, i, 16, fr);
473 }
474 return ret;
475 }
476