1 /*
2 * PowerPC emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "hw/ppc/ppc.h"
22 #include "exec/helper-proto.h"
23 #include "qemu/log.h"
24 #include "qemu/main-loop.h"
25
26 /*****************************************************************************/
27 /* SPR accesses */
28
helper_load_tbl(CPUPPCState * env)29 target_ulong helper_load_tbl(CPUPPCState *env)
30 {
31 return (target_ulong)cpu_ppc_load_tbl(env);
32 }
33
helper_load_tbu(CPUPPCState * env)34 target_ulong helper_load_tbu(CPUPPCState *env)
35 {
36 return cpu_ppc_load_tbu(env);
37 }
38
helper_load_atbl(CPUPPCState * env)39 target_ulong helper_load_atbl(CPUPPCState *env)
40 {
41 return (target_ulong)cpu_ppc_load_atbl(env);
42 }
43
helper_load_atbu(CPUPPCState * env)44 target_ulong helper_load_atbu(CPUPPCState *env)
45 {
46 return cpu_ppc_load_atbu(env);
47 }
48
helper_load_vtb(CPUPPCState * env)49 target_ulong helper_load_vtb(CPUPPCState *env)
50 {
51 return cpu_ppc_load_vtb(env);
52 }
53
54 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
helper_load_purr(CPUPPCState * env)55 target_ulong helper_load_purr(CPUPPCState *env)
56 {
57 return (target_ulong)cpu_ppc_load_purr(env);
58 }
59
helper_store_purr(CPUPPCState * env,target_ulong val)60 void helper_store_purr(CPUPPCState *env, target_ulong val)
61 {
62 CPUState *cs = env_cpu(env);
63 CPUState *ccs;
64
65 if (ppc_cpu_lpar_single_threaded(cs)) {
66 cpu_ppc_store_purr(env, val);
67 return;
68 }
69
70 THREAD_SIBLING_FOREACH(cs, ccs) {
71 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
72 cpu_ppc_store_purr(cenv, val);
73 }
74 }
75 #endif
76
77 #if !defined(CONFIG_USER_ONLY)
helper_store_tbl(CPUPPCState * env,target_ulong val)78 void helper_store_tbl(CPUPPCState *env, target_ulong val)
79 {
80 CPUState *cs = env_cpu(env);
81 CPUState *ccs;
82
83 if (ppc_cpu_lpar_single_threaded(cs)) {
84 cpu_ppc_store_tbl(env, val);
85 return;
86 }
87
88 THREAD_SIBLING_FOREACH(cs, ccs) {
89 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
90 cpu_ppc_store_tbl(cenv, val);
91 }
92 }
93
helper_store_tbu(CPUPPCState * env,target_ulong val)94 void helper_store_tbu(CPUPPCState *env, target_ulong val)
95 {
96 CPUState *cs = env_cpu(env);
97 CPUState *ccs;
98
99 if (ppc_cpu_lpar_single_threaded(cs)) {
100 cpu_ppc_store_tbu(env, val);
101 return;
102 }
103
104 THREAD_SIBLING_FOREACH(cs, ccs) {
105 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
106 cpu_ppc_store_tbu(cenv, val);
107 }
108 }
109
helper_store_atbl(CPUPPCState * env,target_ulong val)110 void helper_store_atbl(CPUPPCState *env, target_ulong val)
111 {
112 cpu_ppc_store_atbl(env, val);
113 }
114
helper_store_atbu(CPUPPCState * env,target_ulong val)115 void helper_store_atbu(CPUPPCState *env, target_ulong val)
116 {
117 cpu_ppc_store_atbu(env, val);
118 }
119
helper_load_decr(CPUPPCState * env)120 target_ulong helper_load_decr(CPUPPCState *env)
121 {
122 return cpu_ppc_load_decr(env);
123 }
124
helper_store_decr(CPUPPCState * env,target_ulong val)125 void helper_store_decr(CPUPPCState *env, target_ulong val)
126 {
127 cpu_ppc_store_decr(env, val);
128 }
129
helper_load_hdecr(CPUPPCState * env)130 target_ulong helper_load_hdecr(CPUPPCState *env)
131 {
132 return cpu_ppc_load_hdecr(env);
133 }
134
helper_store_hdecr(CPUPPCState * env,target_ulong val)135 void helper_store_hdecr(CPUPPCState *env, target_ulong val)
136 {
137 CPUState *cs = env_cpu(env);
138 CPUState *ccs;
139
140 if (ppc_cpu_lpar_single_threaded(cs)) {
141 cpu_ppc_store_hdecr(env, val);
142 return;
143 }
144
145 THREAD_SIBLING_FOREACH(cs, ccs) {
146 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
147 cpu_ppc_store_hdecr(cenv, val);
148 }
149 }
150
helper_store_vtb(CPUPPCState * env,target_ulong val)151 void helper_store_vtb(CPUPPCState *env, target_ulong val)
152 {
153 CPUState *cs = env_cpu(env);
154 CPUState *ccs;
155
156 if (ppc_cpu_lpar_single_threaded(cs)) {
157 cpu_ppc_store_vtb(env, val);
158 return;
159 }
160
161 THREAD_SIBLING_FOREACH(cs, ccs) {
162 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
163 cpu_ppc_store_vtb(cenv, val);
164 }
165 }
166
helper_store_tbu40(CPUPPCState * env,target_ulong val)167 void helper_store_tbu40(CPUPPCState *env, target_ulong val)
168 {
169 CPUState *cs = env_cpu(env);
170 CPUState *ccs;
171
172 if (ppc_cpu_lpar_single_threaded(cs)) {
173 cpu_ppc_store_tbu40(env, val);
174 return;
175 }
176
177 THREAD_SIBLING_FOREACH(cs, ccs) {
178 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
179 cpu_ppc_store_tbu40(cenv, val);
180 }
181 }
182
helper_load_40x_pit(CPUPPCState * env)183 target_ulong helper_load_40x_pit(CPUPPCState *env)
184 {
185 return load_40x_pit(env);
186 }
187
helper_store_40x_pit(CPUPPCState * env,target_ulong val)188 void helper_store_40x_pit(CPUPPCState *env, target_ulong val)
189 {
190 store_40x_pit(env, val);
191 }
192
helper_store_40x_tcr(CPUPPCState * env,target_ulong val)193 void helper_store_40x_tcr(CPUPPCState *env, target_ulong val)
194 {
195 store_40x_tcr(env, val);
196 }
197
helper_store_40x_tsr(CPUPPCState * env,target_ulong val)198 void helper_store_40x_tsr(CPUPPCState *env, target_ulong val)
199 {
200 store_40x_tsr(env, val);
201 }
202
helper_store_booke_tcr(CPUPPCState * env,target_ulong val)203 void helper_store_booke_tcr(CPUPPCState *env, target_ulong val)
204 {
205 store_booke_tcr(env, val);
206 }
207
helper_store_booke_tsr(CPUPPCState * env,target_ulong val)208 void helper_store_booke_tsr(CPUPPCState *env, target_ulong val)
209 {
210 store_booke_tsr(env, val);
211 }
212
213 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
214 /*
215 * qemu-user breaks with pnv headers, so they go under ifdefs for now.
216 * A clean up may be to move powernv specific registers and helpers into
217 * target/ppc/pnv_helper.c
218 */
219 #include "hw/ppc/pnv_core.h"
220 #include "hw/ppc/pnv_chip.h"
221 /*
222 * POWER processor Timebase Facility
223 */
224
225 /*
226 * The TBST is the timebase state machine, which is a per-core machine that
227 * is used to synchronize the core TB with the ChipTOD. States 3,4,5 are
228 * not used in POWER8/9/10.
229 *
230 * The state machine gets driven by writes to TFMR SPR from the core, and
231 * by signals from the ChipTOD. The state machine table for common
232 * transitions is as follows (according to hardware specs, not necessarily
233 * this implementation):
234 *
235 * | Cur | Event | New |
236 * +----------------+----------------------------------+-----+
237 * | 0 RESET | TFMR |= LOAD_TOD_MOD | 1 |
238 * | 1 SEND_TOD_MOD | "immediate transition" | 2 |
239 * | 2 NOT_SET | mttbu/mttbu40/mttbl | 2 |
240 * | 2 NOT_SET | TFMR |= MOVE_CHIP_TOD_TO_TB | 6 |
241 * | 6 SYNC_WAIT | "sync pulse from ChipTOD" | 7 |
242 * | 7 GET_TOD | ChipTOD xscom MOVE_TOD_TO_TB_REG | 8 |
243 * | 8 TB_RUNNING | mttbu/mttbu40 | 8 |
244 * | 8 TB_RUNNING | TFMR |= LOAD_TOD_MOD | 1 |
245 * | 8 TB_RUNNING | mttbl | 9 |
246 * | 9 TB_ERROR | TFMR |= CLEAR_TB_ERRORS | 0 |
247 *
248 * - LOAD_TOD_MOD will also move states 2,6 to state 1, omitted from table
249 * because it's not a typical init flow.
250 *
251 * - The ERROR state can be entered from most/all other states on invalid
252 * states (e.g., if some TFMR control bit is set from a state where it's
253 * not listed to cause a transition away from), omitted to avoid clutter.
254 *
255 * Note: mttbl causes a timebase error because this inevitably causes
256 * ticks to be lost and TB to become unsynchronized, whereas TB can be
257 * adjusted using mttbu* without losing ticks. mttbl behaviour is not
258 * modelled.
259 *
260 * Note: the TB state machine does not actually cause any real TB adjustment!
261 * TB starts out synchronized across all vCPUs (hardware threads) in
262 * QMEU, so for now the purpose of the TBST and ChipTOD model is simply
263 * to step through firmware initialisation sequences.
264 */
tfmr_get_tb_state(uint64_t tfmr)265 static unsigned int tfmr_get_tb_state(uint64_t tfmr)
266 {
267 return (tfmr & TFMR_TBST_ENCODED) >> (63 - 31);
268 }
269
tfmr_new_tb_state(uint64_t tfmr,unsigned int tbst)270 static uint64_t tfmr_new_tb_state(uint64_t tfmr, unsigned int tbst)
271 {
272 tfmr &= ~TFMR_TBST_LAST;
273 tfmr |= (tfmr & TFMR_TBST_ENCODED) >> 4; /* move state to last state */
274 tfmr &= ~TFMR_TBST_ENCODED;
275 tfmr |= (uint64_t)tbst << (63 - 31); /* move new state to state */
276
277 if (tbst == TBST_TB_RUNNING) {
278 tfmr |= TFMR_TB_VALID;
279 } else {
280 tfmr &= ~TFMR_TB_VALID;
281 }
282
283 return tfmr;
284 }
285
write_tfmr(CPUPPCState * env,target_ulong val)286 static void write_tfmr(CPUPPCState *env, target_ulong val)
287 {
288 CPUState *cs = env_cpu(env);
289
290 if (ppc_cpu_core_single_threaded(cs)) {
291 env->spr[SPR_TFMR] = val;
292 } else {
293 CPUState *ccs;
294 THREAD_SIBLING_FOREACH(cs, ccs) {
295 CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
296 cenv->spr[SPR_TFMR] = val;
297 }
298 }
299 }
300
cpu_get_tbst(PowerPCCPU * cpu)301 static PnvCoreTODState *cpu_get_tbst(PowerPCCPU *cpu)
302 {
303 PnvCore *pc = pnv_cpu_state(cpu)->pnv_core;
304
305 if (pc->big_core && pc->tod_state.big_core_quirk) {
306 /* Must operate on the even small core */
307 int core_id = CPU_CORE(pc)->core_id;
308 if (core_id & 1) {
309 pc = pc->chip->cores[core_id & ~1];
310 }
311 }
312
313 return &pc->tod_state;
314 }
315
tb_state_machine_step(CPUPPCState * env)316 static void tb_state_machine_step(CPUPPCState *env)
317 {
318 PowerPCCPU *cpu = env_archcpu(env);
319 PnvCoreTODState *tod_state = cpu_get_tbst(cpu);
320 uint64_t tfmr = env->spr[SPR_TFMR];
321 unsigned int tbst = tfmr_get_tb_state(tfmr);
322
323 if (!(tfmr & TFMR_TB_ECLIPZ) || tbst == TBST_TB_ERROR) {
324 return;
325 }
326
327 if (tod_state->tb_sync_pulse_timer) {
328 tod_state->tb_sync_pulse_timer--;
329 } else {
330 tfmr |= TFMR_TB_SYNC_OCCURED;
331 write_tfmr(env, tfmr);
332 }
333
334 if (tod_state->tb_state_timer) {
335 tod_state->tb_state_timer--;
336 return;
337 }
338
339 if (tfmr & TFMR_LOAD_TOD_MOD) {
340 tfmr &= ~TFMR_LOAD_TOD_MOD;
341 if (tbst == TBST_GET_TOD) {
342 tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
343 tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
344 } else {
345 tfmr = tfmr_new_tb_state(tfmr, TBST_SEND_TOD_MOD);
346 /* State seems to transition immediately */
347 tfmr = tfmr_new_tb_state(tfmr, TBST_NOT_SET);
348 }
349 } else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) {
350 if (tbst == TBST_SYNC_WAIT) {
351 tfmr = tfmr_new_tb_state(tfmr, TBST_GET_TOD);
352 tod_state->tb_state_timer = 3;
353 } else if (tbst == TBST_GET_TOD) {
354 if (tod_state->tod_sent_to_tb) {
355 tfmr = tfmr_new_tb_state(tfmr, TBST_TB_RUNNING);
356 tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB;
357 tod_state->tb_ready_for_tod = 0;
358 tod_state->tod_sent_to_tb = 0;
359 }
360 } else {
361 qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB "
362 "state machine in invalid state 0x%x\n", tbst);
363 tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
364 tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
365 tod_state->tb_ready_for_tod = 0;
366 }
367 }
368
369 write_tfmr(env, tfmr);
370 }
371
helper_load_tfmr(CPUPPCState * env)372 target_ulong helper_load_tfmr(CPUPPCState *env)
373 {
374 tb_state_machine_step(env);
375
376 return env->spr[SPR_TFMR] | TFMR_TB_ECLIPZ;
377 }
378
helper_store_tfmr(CPUPPCState * env,target_ulong val)379 void helper_store_tfmr(CPUPPCState *env, target_ulong val)
380 {
381 PowerPCCPU *cpu = env_archcpu(env);
382 PnvCoreTODState *tod_state = cpu_get_tbst(cpu);
383 uint64_t tfmr = env->spr[SPR_TFMR];
384 uint64_t clear_on_write;
385 unsigned int tbst = tfmr_get_tb_state(tfmr);
386
387 if (!(val & TFMR_TB_ECLIPZ)) {
388 qemu_log_mask(LOG_UNIMP, "TFMR non-ECLIPZ mode not implemented\n");
389 tfmr &= ~TFMR_TBST_ENCODED;
390 tfmr &= ~TFMR_TBST_LAST;
391 goto out;
392 }
393
394 /* Update control bits */
395 tfmr = (tfmr & ~TFMR_CONTROL_MASK) | (val & TFMR_CONTROL_MASK);
396
397 /* Several bits are clear-on-write, only one is implemented so far */
398 clear_on_write = val & TFMR_FIRMWARE_CONTROL_ERROR;
399 tfmr &= ~clear_on_write;
400
401 /*
402 * mtspr always clears this. The sync pulse timer makes it come back
403 * after the second mfspr.
404 */
405 tfmr &= ~TFMR_TB_SYNC_OCCURED;
406 tod_state->tb_sync_pulse_timer = 1;
407
408 if (((tfmr | val) & (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) ==
409 (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) {
410 qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: LOAD_TOD_MOD and "
411 "MOVE_CHIP_TOD_TO_TB both set\n");
412 tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
413 tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
414 tod_state->tb_ready_for_tod = 0;
415 goto out;
416 }
417
418 if (tfmr & TFMR_CLEAR_TB_ERRORS) {
419 /*
420 * Workbook says TFMR_CLEAR_TB_ERRORS should be written twice.
421 * This is not simulated/required here.
422 */
423 tfmr = tfmr_new_tb_state(tfmr, TBST_RESET);
424 tfmr &= ~TFMR_CLEAR_TB_ERRORS;
425 tfmr &= ~TFMR_LOAD_TOD_MOD;
426 tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB;
427 tfmr &= ~TFMR_FIRMWARE_CONTROL_ERROR; /* XXX: should this be cleared? */
428 tod_state->tb_ready_for_tod = 0;
429 tod_state->tod_sent_to_tb = 0;
430 goto out;
431 }
432
433 if (tbst == TBST_TB_ERROR) {
434 qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: mtspr TFMR in TB_ERROR"
435 " state\n");
436 tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
437 return;
438 }
439
440 if (tfmr & TFMR_LOAD_TOD_MOD) {
441 /* Wait for an arbitrary 3 mfspr until the next state transition. */
442 tod_state->tb_state_timer = 3;
443 } else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) {
444 if (tbst == TBST_NOT_SET) {
445 tfmr = tfmr_new_tb_state(tfmr, TBST_SYNC_WAIT);
446 tod_state->tb_ready_for_tod = 1;
447 tod_state->tb_state_timer = 3; /* arbitrary */
448 } else {
449 qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB "
450 "not in TB not set state 0x%x\n",
451 tbst);
452 tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
453 tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
454 tod_state->tb_ready_for_tod = 0;
455 }
456 }
457
458 out:
459 write_tfmr(env, tfmr);
460 }
461 #endif
462
463 /*****************************************************************************/
464 /* Embedded PowerPC specific helpers */
465
466 /* XXX: to be improved to check access rights when in user-mode */
helper_load_dcr(CPUPPCState * env,target_ulong dcrn)467 target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn)
468 {
469 uint32_t val = 0;
470
471 if (unlikely(env->dcr_env == NULL)) {
472 qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
473 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
474 POWERPC_EXCP_INVAL |
475 POWERPC_EXCP_INVAL_INVAL, GETPC());
476 } else {
477 int ret;
478
479 bql_lock();
480 ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val);
481 bql_unlock();
482 if (unlikely(ret != 0)) {
483 qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n",
484 (uint32_t)dcrn, (uint32_t)dcrn);
485 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
486 POWERPC_EXCP_INVAL |
487 POWERPC_EXCP_INVAL_INVAL, GETPC());
488 }
489 }
490 return val;
491 }
492
helper_store_dcr(CPUPPCState * env,target_ulong dcrn,target_ulong val)493 void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val)
494 {
495 if (unlikely(env->dcr_env == NULL)) {
496 qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
497 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
498 POWERPC_EXCP_INVAL |
499 POWERPC_EXCP_INVAL_INVAL, GETPC());
500 } else {
501 int ret;
502 bql_lock();
503 ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val);
504 bql_unlock();
505 if (unlikely(ret != 0)) {
506 qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n",
507 (uint32_t)dcrn, (uint32_t)dcrn);
508 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
509 POWERPC_EXCP_INVAL |
510 POWERPC_EXCP_INVAL_INVAL, GETPC());
511 }
512 }
513 }
514 #endif
515