1 /*
2 * QEMU generic PowerPC hardware System Emulator
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "hw/irq.h"
27 #include "hw/ppc/ppc.h"
28 #include "hw/ppc/ppc_e500.h"
29 #include "qemu/timer.h"
30 #include "exec/cpu-interrupt.h"
31 #include "system/cpus.h"
32 #include "qemu/log.h"
33 #include "qemu/main-loop.h"
34 #include "qemu/error-report.h"
35 #include "system/kvm.h"
36 #include "system/replay.h"
37 #include "system/runstate.h"
38 #include "kvm_ppc.h"
39 #include "migration/vmstate.h"
40 #include "trace.h"
41
42 static void cpu_ppc_tb_stop (CPUPPCState *env);
43 static void cpu_ppc_tb_start (CPUPPCState *env);
44
ppc_set_irq(PowerPCCPU * cpu,int irq,int level)45 void ppc_set_irq(PowerPCCPU *cpu, int irq, int level)
46 {
47 CPUPPCState *env = &cpu->env;
48 unsigned int old_pending;
49
50 /* We may already have the BQL if coming from the reset path */
51 BQL_LOCK_GUARD();
52
53 old_pending = env->pending_interrupts;
54
55 if (level) {
56 env->pending_interrupts |= irq;
57 } else {
58 env->pending_interrupts &= ~irq;
59 }
60
61 if (old_pending != env->pending_interrupts) {
62 ppc_maybe_interrupt(env);
63 if (kvm_enabled()) {
64 kvmppc_set_interrupt(cpu, irq, level);
65 }
66 }
67
68 trace_ppc_irq_set_exit(env, irq, level, env->pending_interrupts,
69 CPU(cpu)->interrupt_request);
70 }
71
72 /* PowerPC 6xx / 7xx internal IRQ controller */
ppc6xx_set_irq(void * opaque,int pin,int level)73 static void ppc6xx_set_irq(void *opaque, int pin, int level)
74 {
75 PowerPCCPU *cpu = opaque;
76 CPUPPCState *env = &cpu->env;
77 int cur_level;
78
79 trace_ppc_irq_set(env, pin, level);
80
81 cur_level = (env->irq_input_state >> pin) & 1;
82 /* Don't generate spurious events */
83 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
84 CPUState *cs = CPU(cpu);
85
86 switch (pin) {
87 case PPC6xx_INPUT_TBEN:
88 /* Level sensitive - active high */
89 trace_ppc_irq_set_state("time base", level);
90 if (level) {
91 cpu_ppc_tb_start(env);
92 } else {
93 cpu_ppc_tb_stop(env);
94 }
95 break;
96 case PPC6xx_INPUT_INT:
97 /* Level sensitive - active high */
98 trace_ppc_irq_set_state("external IRQ", level);
99 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
100 break;
101 case PPC6xx_INPUT_SMI:
102 /* Level sensitive - active high */
103 trace_ppc_irq_set_state("SMI IRQ", level);
104 ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
105 break;
106 case PPC6xx_INPUT_MCP:
107 /* Negative edge sensitive */
108 /* XXX: TODO: actual reaction may depends on HID0 status
109 * 603/604/740/750: check HID0[EMCP]
110 */
111 if (cur_level == 1 && level == 0) {
112 trace_ppc_irq_set_state("machine check", 1);
113 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
114 }
115 break;
116 case PPC6xx_INPUT_CKSTP_IN:
117 /* Level sensitive - active low */
118 /* XXX: TODO: relay the signal to CKSTP_OUT pin */
119 /* XXX: Note that the only way to restart the CPU is to reset it */
120 if (level) {
121 trace_ppc_irq_cpu("stop");
122 cs->halted = 1;
123 }
124 break;
125 case PPC6xx_INPUT_HRESET:
126 /* Level sensitive - active low */
127 if (level) {
128 trace_ppc_irq_reset("CPU");
129 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
130 }
131 break;
132 case PPC6xx_INPUT_SRESET:
133 trace_ppc_irq_set_state("RESET IRQ", level);
134 ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
135 break;
136 default:
137 g_assert_not_reached();
138 }
139 if (level)
140 env->irq_input_state |= 1 << pin;
141 else
142 env->irq_input_state &= ~(1 << pin);
143 }
144 }
145
ppc6xx_irq_init(PowerPCCPU * cpu)146 void ppc6xx_irq_init(PowerPCCPU *cpu)
147 {
148 qdev_init_gpio_in(DEVICE(cpu), ppc6xx_set_irq, PPC6xx_INPUT_NB);
149 }
150
151 #if defined(TARGET_PPC64)
152 /* PowerPC 970 internal IRQ controller */
ppc970_set_irq(void * opaque,int pin,int level)153 static void ppc970_set_irq(void *opaque, int pin, int level)
154 {
155 PowerPCCPU *cpu = opaque;
156 CPUPPCState *env = &cpu->env;
157 int cur_level;
158
159 trace_ppc_irq_set(env, pin, level);
160
161 cur_level = (env->irq_input_state >> pin) & 1;
162 /* Don't generate spurious events */
163 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
164 CPUState *cs = CPU(cpu);
165
166 switch (pin) {
167 case PPC970_INPUT_INT:
168 /* Level sensitive - active high */
169 trace_ppc_irq_set_state("external IRQ", level);
170 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
171 break;
172 case PPC970_INPUT_THINT:
173 /* Level sensitive - active high */
174 trace_ppc_irq_set_state("SMI IRQ", level);
175 ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
176 break;
177 case PPC970_INPUT_MCP:
178 /* Negative edge sensitive */
179 /* XXX: TODO: actual reaction may depends on HID0 status
180 * 603/604/740/750: check HID0[EMCP]
181 */
182 if (cur_level == 1 && level == 0) {
183 trace_ppc_irq_set_state("machine check", 1);
184 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
185 }
186 break;
187 case PPC970_INPUT_CKSTP:
188 /* Level sensitive - active low */
189 /* XXX: TODO: relay the signal to CKSTP_OUT pin */
190 if (level) {
191 trace_ppc_irq_cpu("stop");
192 cs->halted = 1;
193 } else {
194 trace_ppc_irq_cpu("restart");
195 cs->halted = 0;
196 qemu_cpu_kick(cs);
197 }
198 break;
199 case PPC970_INPUT_HRESET:
200 /* Level sensitive - active low */
201 if (level) {
202 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
203 }
204 break;
205 case PPC970_INPUT_SRESET:
206 trace_ppc_irq_set_state("RESET IRQ", level);
207 ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
208 break;
209 case PPC970_INPUT_TBEN:
210 trace_ppc_irq_set_state("TBEN IRQ", level);
211 /* XXX: TODO */
212 break;
213 default:
214 g_assert_not_reached();
215 }
216 if (level)
217 env->irq_input_state |= 1 << pin;
218 else
219 env->irq_input_state &= ~(1 << pin);
220 }
221 }
222
ppc970_irq_init(PowerPCCPU * cpu)223 void ppc970_irq_init(PowerPCCPU *cpu)
224 {
225 qdev_init_gpio_in(DEVICE(cpu), ppc970_set_irq, PPC970_INPUT_NB);
226 }
227
228 /* POWER7 internal IRQ controller */
power7_set_irq(void * opaque,int pin,int level)229 static void power7_set_irq(void *opaque, int pin, int level)
230 {
231 PowerPCCPU *cpu = opaque;
232
233 trace_ppc_irq_set(&cpu->env, pin, level);
234
235 switch (pin) {
236 case POWER7_INPUT_INT:
237 /* Level sensitive - active high */
238 trace_ppc_irq_set_state("external IRQ", level);
239 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
240 break;
241 default:
242 g_assert_not_reached();
243 }
244 }
245
ppcPOWER7_irq_init(PowerPCCPU * cpu)246 void ppcPOWER7_irq_init(PowerPCCPU *cpu)
247 {
248 qdev_init_gpio_in(DEVICE(cpu), power7_set_irq, POWER7_INPUT_NB);
249 }
250
251 /* POWER9 internal IRQ controller */
power9_set_irq(void * opaque,int pin,int level)252 static void power9_set_irq(void *opaque, int pin, int level)
253 {
254 PowerPCCPU *cpu = opaque;
255
256 trace_ppc_irq_set(&cpu->env, pin, level);
257
258 switch (pin) {
259 case POWER9_INPUT_INT:
260 /* Level sensitive - active high */
261 trace_ppc_irq_set_state("external IRQ", level);
262 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
263 break;
264 case POWER9_INPUT_HINT:
265 /* Level sensitive - active high */
266 trace_ppc_irq_set_state("HV external IRQ", level);
267 ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
268 break;
269 default:
270 g_assert_not_reached();
271 }
272 }
273
ppcPOWER9_irq_init(PowerPCCPU * cpu)274 void ppcPOWER9_irq_init(PowerPCCPU *cpu)
275 {
276 qdev_init_gpio_in(DEVICE(cpu), power9_set_irq, POWER9_INPUT_NB);
277 }
278 #endif /* defined(TARGET_PPC64) */
279
ppc40x_core_reset(PowerPCCPU * cpu)280 void ppc40x_core_reset(PowerPCCPU *cpu)
281 {
282 CPUPPCState *env = &cpu->env;
283 target_ulong dbsr;
284
285 qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
286 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
287 dbsr = env->spr[SPR_40x_DBSR];
288 dbsr &= ~0x00000300;
289 dbsr |= 0x00000100;
290 env->spr[SPR_40x_DBSR] = dbsr;
291 }
292
ppc40x_chip_reset(PowerPCCPU * cpu)293 void ppc40x_chip_reset(PowerPCCPU *cpu)
294 {
295 CPUPPCState *env = &cpu->env;
296 target_ulong dbsr;
297
298 qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
299 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
300 /* XXX: TODO reset all internal peripherals */
301 dbsr = env->spr[SPR_40x_DBSR];
302 dbsr &= ~0x00000300;
303 dbsr |= 0x00000200;
304 env->spr[SPR_40x_DBSR] = dbsr;
305 }
306
ppc40x_system_reset(PowerPCCPU * cpu)307 void ppc40x_system_reset(PowerPCCPU *cpu)
308 {
309 qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
310 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
311 }
312
store_40x_dbcr0(CPUPPCState * env,uint32_t val)313 void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
314 {
315 PowerPCCPU *cpu = env_archcpu(env);
316
317 bql_lock();
318
319 switch ((val >> 28) & 0x3) {
320 case 0x0:
321 /* No action */
322 break;
323 case 0x1:
324 /* Core reset */
325 ppc40x_core_reset(cpu);
326 break;
327 case 0x2:
328 /* Chip reset */
329 ppc40x_chip_reset(cpu);
330 break;
331 case 0x3:
332 /* System reset */
333 ppc40x_system_reset(cpu);
334 break;
335 }
336
337 bql_unlock();
338 }
339
340 /* PowerPC 40x internal IRQ controller */
ppc40x_set_irq(void * opaque,int pin,int level)341 static void ppc40x_set_irq(void *opaque, int pin, int level)
342 {
343 PowerPCCPU *cpu = opaque;
344 CPUPPCState *env = &cpu->env;
345 int cur_level;
346
347 trace_ppc_irq_set(env, pin, level);
348
349 cur_level = (env->irq_input_state >> pin) & 1;
350 /* Don't generate spurious events */
351 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
352 CPUState *cs = CPU(cpu);
353
354 switch (pin) {
355 case PPC40x_INPUT_RESET_SYS:
356 if (level) {
357 trace_ppc_irq_reset("system");
358 ppc40x_system_reset(cpu);
359 }
360 break;
361 case PPC40x_INPUT_RESET_CHIP:
362 if (level) {
363 trace_ppc_irq_reset("chip");
364 ppc40x_chip_reset(cpu);
365 }
366 break;
367 case PPC40x_INPUT_RESET_CORE:
368 /* XXX: TODO: update DBSR[MRR] */
369 if (level) {
370 trace_ppc_irq_reset("core");
371 ppc40x_core_reset(cpu);
372 }
373 break;
374 case PPC40x_INPUT_CINT:
375 /* Level sensitive - active high */
376 trace_ppc_irq_set_state("critical IRQ", level);
377 ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
378 break;
379 case PPC40x_INPUT_INT:
380 /* Level sensitive - active high */
381 trace_ppc_irq_set_state("external IRQ", level);
382 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
383 break;
384 case PPC40x_INPUT_HALT:
385 /* Level sensitive - active low */
386 if (level) {
387 trace_ppc_irq_cpu("stop");
388 cs->halted = 1;
389 } else {
390 trace_ppc_irq_cpu("restart");
391 cs->halted = 0;
392 qemu_cpu_kick(cs);
393 }
394 break;
395 case PPC40x_INPUT_DEBUG:
396 /* Level sensitive - active high */
397 trace_ppc_irq_set_state("debug pin", level);
398 ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
399 break;
400 default:
401 g_assert_not_reached();
402 }
403 if (level)
404 env->irq_input_state |= 1 << pin;
405 else
406 env->irq_input_state &= ~(1 << pin);
407 }
408 }
409
ppc40x_irq_init(PowerPCCPU * cpu)410 void ppc40x_irq_init(PowerPCCPU *cpu)
411 {
412 qdev_init_gpio_in(DEVICE(cpu), ppc40x_set_irq, PPC40x_INPUT_NB);
413 }
414
415 /* PowerPC E500 internal IRQ controller */
ppce500_set_irq(void * opaque,int pin,int level)416 static void ppce500_set_irq(void *opaque, int pin, int level)
417 {
418 PowerPCCPU *cpu = opaque;
419 CPUPPCState *env = &cpu->env;
420 int cur_level;
421
422 trace_ppc_irq_set(env, pin, level);
423
424 cur_level = (env->irq_input_state >> pin) & 1;
425 /* Don't generate spurious events */
426 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
427 switch (pin) {
428 case PPCE500_INPUT_MCK:
429 if (level) {
430 trace_ppc_irq_reset("system");
431 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
432 }
433 break;
434 case PPCE500_INPUT_RESET_CORE:
435 if (level) {
436 trace_ppc_irq_reset("core");
437 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
438 }
439 break;
440 case PPCE500_INPUT_CINT:
441 /* Level sensitive - active high */
442 trace_ppc_irq_set_state("critical IRQ", level);
443 ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
444 break;
445 case PPCE500_INPUT_INT:
446 /* Level sensitive - active high */
447 trace_ppc_irq_set_state("core IRQ", level);
448 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
449 break;
450 case PPCE500_INPUT_DEBUG:
451 /* Level sensitive - active high */
452 trace_ppc_irq_set_state("debug pin", level);
453 ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
454 break;
455 default:
456 g_assert_not_reached();
457 }
458 if (level)
459 env->irq_input_state |= 1 << pin;
460 else
461 env->irq_input_state &= ~(1 << pin);
462 }
463 }
464
ppce500_irq_init(PowerPCCPU * cpu)465 void ppce500_irq_init(PowerPCCPU *cpu)
466 {
467 qdev_init_gpio_in(DEVICE(cpu), ppce500_set_irq, PPCE500_INPUT_NB);
468 }
469
470 /* Enable or Disable the E500 EPR capability */
ppce500_set_mpic_proxy(bool enabled)471 void ppce500_set_mpic_proxy(bool enabled)
472 {
473 CPUState *cs;
474
475 CPU_FOREACH(cs) {
476 PowerPCCPU *cpu = POWERPC_CPU(cs);
477
478 cpu->env.mpic_proxy = enabled;
479 if (kvm_enabled()) {
480 kvmppc_set_mpic_proxy(cpu, enabled);
481 }
482 }
483 }
484
485 /*****************************************************************************/
486 /* PowerPC time base and decrementer emulation */
487
488 /*
489 * Conversion between QEMU_CLOCK_VIRTUAL ns and timebase (TB) ticks:
490 * TB ticks are arrived at by multiplying tb_freq then dividing by
491 * ns per second, and rounding down. TB ticks drive all clocks and
492 * timers in the target machine.
493 *
494 * Converting TB intervals to ns for the purpose of setting a
495 * QEMU_CLOCK_VIRTUAL timer should go the other way, but rounding
496 * up. Rounding down could cause the timer to fire before the TB
497 * value has been reached.
498 */
ns_to_tb(uint32_t freq,int64_t clock)499 static uint64_t ns_to_tb(uint32_t freq, int64_t clock)
500 {
501 return muldiv64(clock, freq, NANOSECONDS_PER_SECOND);
502 }
503
504 /* virtual clock in TB ticks, not adjusted by TB offset */
tb_to_ns_round_up(uint32_t freq,uint64_t tb)505 static int64_t tb_to_ns_round_up(uint32_t freq, uint64_t tb)
506 {
507 return muldiv64_round_up(tb, NANOSECONDS_PER_SECOND, freq);
508 }
509
cpu_ppc_get_tb(ppc_tb_t * tb_env,uint64_t vmclk,int64_t tb_offset)510 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
511 {
512 /* TB time in tb periods */
513 return ns_to_tb(tb_env->tb_freq, vmclk) + tb_offset;
514 }
515
cpu_ppc_load_tbl(CPUPPCState * env)516 uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
517 {
518 ppc_tb_t *tb_env = env->tb_env;
519 uint64_t tb;
520
521 if (kvm_enabled()) {
522 return env->spr[SPR_TBL];
523 }
524
525 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
526 tb_env->tb_offset);
527 trace_ppc_tb_load(tb);
528
529 return tb;
530 }
531
_cpu_ppc_load_tbu(CPUPPCState * env)532 static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
533 {
534 ppc_tb_t *tb_env = env->tb_env;
535 uint64_t tb;
536
537 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
538 tb_env->tb_offset);
539 trace_ppc_tb_load(tb);
540
541 return tb >> 32;
542 }
543
cpu_ppc_load_tbu(CPUPPCState * env)544 uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
545 {
546 if (kvm_enabled()) {
547 return env->spr[SPR_TBU];
548 }
549
550 return _cpu_ppc_load_tbu(env);
551 }
552
cpu_ppc_store_tb(ppc_tb_t * tb_env,uint64_t vmclk,int64_t * tb_offsetp,uint64_t value)553 static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
554 int64_t *tb_offsetp, uint64_t value)
555 {
556 *tb_offsetp = value - ns_to_tb(tb_env->tb_freq, vmclk);
557
558 trace_ppc_tb_store(value, *tb_offsetp);
559 }
560
cpu_ppc_store_tbl(CPUPPCState * env,uint32_t value)561 void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
562 {
563 ppc_tb_t *tb_env = env->tb_env;
564 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
565 uint64_t tb;
566
567 tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
568 tb &= 0xFFFFFFFF00000000ULL;
569 cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset, tb | (uint64_t)value);
570 }
571
_cpu_ppc_store_tbu(CPUPPCState * env,uint32_t value)572 static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
573 {
574 ppc_tb_t *tb_env = env->tb_env;
575 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
576 uint64_t tb;
577
578 tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
579 tb &= 0x00000000FFFFFFFFULL;
580 cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset,
581 ((uint64_t)value << 32) | tb);
582 }
583
cpu_ppc_store_tbu(CPUPPCState * env,uint32_t value)584 void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
585 {
586 _cpu_ppc_store_tbu(env, value);
587 }
588
cpu_ppc_load_atbl(CPUPPCState * env)589 uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
590 {
591 ppc_tb_t *tb_env = env->tb_env;
592 uint64_t tb;
593
594 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
595 tb_env->atb_offset);
596 trace_ppc_tb_load(tb);
597
598 return tb;
599 }
600
cpu_ppc_load_atbu(CPUPPCState * env)601 uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
602 {
603 ppc_tb_t *tb_env = env->tb_env;
604 uint64_t tb;
605
606 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
607 tb_env->atb_offset);
608 trace_ppc_tb_load(tb);
609
610 return tb >> 32;
611 }
612
cpu_ppc_store_atbl(CPUPPCState * env,uint32_t value)613 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
614 {
615 ppc_tb_t *tb_env = env->tb_env;
616 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
617 uint64_t tb;
618
619 tb = cpu_ppc_get_tb(tb_env, clock, tb_env->atb_offset);
620 tb &= 0xFFFFFFFF00000000ULL;
621 cpu_ppc_store_tb(tb_env, clock, &tb_env->atb_offset, tb | (uint64_t)value);
622 }
623
cpu_ppc_store_atbu(CPUPPCState * env,uint32_t value)624 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
625 {
626 ppc_tb_t *tb_env = env->tb_env;
627 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
628 uint64_t tb;
629
630 tb = cpu_ppc_get_tb(tb_env, clock, tb_env->atb_offset);
631 tb &= 0x00000000FFFFFFFFULL;
632 cpu_ppc_store_tb(tb_env, clock, &tb_env->atb_offset,
633 ((uint64_t)value << 32) | tb);
634 }
635
cpu_ppc_increase_tb_by_offset(CPUPPCState * env,int64_t offset)636 void cpu_ppc_increase_tb_by_offset(CPUPPCState *env, int64_t offset)
637 {
638 env->tb_env->tb_offset += offset;
639 }
640
cpu_ppc_decrease_tb_by_offset(CPUPPCState * env,int64_t offset)641 void cpu_ppc_decrease_tb_by_offset(CPUPPCState *env, int64_t offset)
642 {
643 env->tb_env->tb_offset -= offset;
644 }
645
cpu_ppc_load_vtb(CPUPPCState * env)646 uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
647 {
648 ppc_tb_t *tb_env = env->tb_env;
649
650 return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
651 tb_env->vtb_offset);
652 }
653
cpu_ppc_store_vtb(CPUPPCState * env,uint64_t value)654 void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value)
655 {
656 ppc_tb_t *tb_env = env->tb_env;
657
658 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
659 &tb_env->vtb_offset, value);
660 }
661
cpu_ppc_store_tbu40(CPUPPCState * env,uint64_t value)662 void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value)
663 {
664 ppc_tb_t *tb_env = env->tb_env;
665 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
666 uint64_t tb;
667
668 tb = cpu_ppc_get_tb(tb_env, clock, tb_env->tb_offset);
669 tb &= 0xFFFFFFUL;
670 tb |= (value & ~0xFFFFFFUL);
671 cpu_ppc_store_tb(tb_env, clock, &tb_env->tb_offset, tb);
672 }
673
cpu_ppc_tb_stop(CPUPPCState * env)674 static void cpu_ppc_tb_stop (CPUPPCState *env)
675 {
676 ppc_tb_t *tb_env = env->tb_env;
677 uint64_t tb, atb, vmclk;
678
679 /* If the time base is already frozen, do nothing */
680 if (tb_env->tb_freq != 0) {
681 vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
682 /* Get the time base */
683 tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
684 /* Get the alternate time base */
685 atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
686 /* Store the time base value (ie compute the current offset) */
687 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
688 /* Store the alternate time base value (compute the current offset) */
689 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
690 /* Set the time base frequency to zero */
691 tb_env->tb_freq = 0;
692 /* Now, the time bases are frozen to tb_offset / atb_offset value */
693 }
694 }
695
cpu_ppc_tb_start(CPUPPCState * env)696 static void cpu_ppc_tb_start (CPUPPCState *env)
697 {
698 ppc_tb_t *tb_env = env->tb_env;
699 uint64_t tb, atb, vmclk;
700
701 /* If the time base is not frozen, do nothing */
702 if (tb_env->tb_freq == 0) {
703 vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
704 /* Get the time base from tb_offset */
705 tb = tb_env->tb_offset;
706 /* Get the alternate time base from atb_offset */
707 atb = tb_env->atb_offset;
708 /* Restore the tb frequency from the decrementer frequency */
709 tb_env->tb_freq = tb_env->decr_freq;
710 /* Store the time base value */
711 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
712 /* Store the alternate time base value */
713 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
714 }
715 }
716
ppc_decr_clear_on_delivery(CPUPPCState * env)717 bool ppc_decr_clear_on_delivery(CPUPPCState *env)
718 {
719 ppc_tb_t *tb_env = env->tb_env;
720 int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
721 return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
722 }
723
__cpu_ppc_load_decr(CPUPPCState * env,int64_t now,uint64_t next)724 static inline int64_t __cpu_ppc_load_decr(CPUPPCState *env, int64_t now,
725 uint64_t next)
726 {
727 ppc_tb_t *tb_env = env->tb_env;
728 uint64_t n;
729 int64_t decr;
730
731 n = ns_to_tb(tb_env->decr_freq, now);
732
733 /* BookE timers stop when reaching 0. */
734 if (next < n && tb_env->flags & PPC_TIMER_BOOKE) {
735 decr = 0;
736 } else {
737 decr = next - n;
738 }
739
740 trace_ppc_decr_load(decr);
741
742 return decr;
743 }
744
_cpu_ppc_load_decr(CPUPPCState * env,int64_t now)745 static target_ulong _cpu_ppc_load_decr(CPUPPCState *env, int64_t now)
746 {
747 ppc_tb_t *tb_env = env->tb_env;
748 uint64_t decr;
749
750 decr = __cpu_ppc_load_decr(env, now, tb_env->decr_next);
751
752 /*
753 * If large decrementer is enabled then the decrementer is signed extended
754 * to 64 bits, otherwise it is a 32 bit value.
755 */
756 if (env->spr[SPR_LPCR] & LPCR_LD) {
757 PowerPCCPU *cpu = env_archcpu(env);
758 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
759 return sextract64(decr, 0, pcc->lrg_decr_bits);
760 }
761 return (uint32_t) decr;
762 }
763
cpu_ppc_load_decr(CPUPPCState * env)764 target_ulong cpu_ppc_load_decr(CPUPPCState *env)
765 {
766 if (kvm_enabled()) {
767 return env->spr[SPR_DECR];
768 } else {
769 return _cpu_ppc_load_decr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
770 }
771 }
772
_cpu_ppc_load_hdecr(CPUPPCState * env,int64_t now)773 static target_ulong _cpu_ppc_load_hdecr(CPUPPCState *env, int64_t now)
774 {
775 PowerPCCPU *cpu = env_archcpu(env);
776 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
777 ppc_tb_t *tb_env = env->tb_env;
778 uint64_t hdecr;
779
780 hdecr = __cpu_ppc_load_decr(env, now, tb_env->hdecr_next);
781
782 /*
783 * If we have a large decrementer (POWER9 or later) then hdecr is sign
784 * extended to 64 bits, otherwise it is 32 bits.
785 */
786 if (pcc->lrg_decr_bits > 32) {
787 return sextract64(hdecr, 0, pcc->lrg_decr_bits);
788 }
789 return (uint32_t) hdecr;
790 }
791
cpu_ppc_load_hdecr(CPUPPCState * env)792 target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
793 {
794 return _cpu_ppc_load_hdecr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
795 }
796
cpu_ppc_load_purr(CPUPPCState * env)797 uint64_t cpu_ppc_load_purr (CPUPPCState *env)
798 {
799 ppc_tb_t *tb_env = env->tb_env;
800
801 return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
802 tb_env->purr_offset);
803 }
804
805 /* When decrementer expires,
806 * all we need to do is generate or queue a CPU exception
807 */
cpu_ppc_decr_excp(PowerPCCPU * cpu)808 static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
809 {
810 /* Raise it */
811 trace_ppc_decr_excp("raise");
812 ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
813 }
814
cpu_ppc_decr_lower(PowerPCCPU * cpu)815 static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
816 {
817 ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
818 }
819
cpu_ppc_hdecr_excp(PowerPCCPU * cpu)820 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
821 {
822 CPUPPCState *env = &cpu->env;
823
824 /* Raise it */
825 trace_ppc_decr_excp("raise HV");
826
827 /* The architecture specifies that we don't deliver HDEC
828 * interrupts in a PM state. Not only they don't cause a
829 * wakeup but they also get effectively discarded.
830 */
831 if (!env->resume_as_sreset) {
832 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
833 }
834 }
835
cpu_ppc_hdecr_lower(PowerPCCPU * cpu)836 static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
837 {
838 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
839 }
840
__cpu_ppc_store_decr(PowerPCCPU * cpu,int64_t now,uint64_t * nextp,QEMUTimer * timer,void (* raise_excp)(void *),void (* lower_excp)(PowerPCCPU *),uint32_t flags,target_ulong decr,target_ulong value,int nr_bits)841 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, int64_t now, uint64_t *nextp,
842 QEMUTimer *timer,
843 void (*raise_excp)(void *),
844 void (*lower_excp)(PowerPCCPU *),
845 uint32_t flags, target_ulong decr,
846 target_ulong value, int nr_bits)
847 {
848 CPUPPCState *env = &cpu->env;
849 ppc_tb_t *tb_env = env->tb_env;
850 uint64_t next;
851 int64_t signed_value;
852 int64_t signed_decr;
853
854 /* Truncate value to decr_width and sign extend for simplicity */
855 value = extract64(value, 0, nr_bits);
856 decr = extract64(decr, 0, nr_bits);
857 signed_value = sextract64(value, 0, nr_bits);
858 signed_decr = sextract64(decr, 0, nr_bits);
859
860 trace_ppc_decr_store(nr_bits, decr, value);
861
862 /*
863 * Calculate the next decrementer event and set a timer.
864 * decr_next is in timebase units to keep rounding simple. Note it is
865 * not adjusted by tb_offset because if TB changes via tb_offset changing,
866 * decrementer does not change, so not directly comparable with TB.
867 */
868 next = ns_to_tb(tb_env->decr_freq, now) + value;
869 *nextp = next; /* nextp is in timebase units */
870
871 /*
872 * Going from 1 -> 0 or 0 -> -1 is the event to generate a DEC interrupt.
873 *
874 * On MSB level based DEC implementations the MSB always means the interrupt
875 * is pending, so raise it on those.
876 *
877 * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
878 * an edge interrupt, so raise it here too.
879 */
880 if (((flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
881 ((flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0
882 && signed_decr >= 0)) {
883 (*raise_excp)(cpu);
884 return;
885 }
886
887 /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
888 if (signed_value >= 0 && (flags & PPC_DECR_UNDERFLOW_LEVEL)) {
889 (*lower_excp)(cpu);
890 }
891
892 /* Adjust timer */
893 timer_mod(timer, tb_to_ns_round_up(tb_env->decr_freq, next));
894 }
895
_cpu_ppc_store_decr(PowerPCCPU * cpu,int64_t now,target_ulong decr,target_ulong value,int nr_bits)896 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, int64_t now,
897 target_ulong decr, target_ulong value,
898 int nr_bits)
899 {
900 ppc_tb_t *tb_env = cpu->env.tb_env;
901
902 __cpu_ppc_store_decr(cpu, now, &tb_env->decr_next, tb_env->decr_timer,
903 tb_env->decr_timer->cb, &cpu_ppc_decr_lower,
904 tb_env->flags, decr, value, nr_bits);
905 }
906
cpu_ppc_store_decr(CPUPPCState * env,target_ulong value)907 void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
908 {
909 PowerPCCPU *cpu = env_archcpu(env);
910 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
911 int64_t now;
912 target_ulong decr;
913 int nr_bits = 32;
914
915 if (kvm_enabled()) {
916 /* KVM handles decrementer exceptions, we don't need our own timer */
917 return;
918 }
919
920 if (env->spr[SPR_LPCR] & LPCR_LD) {
921 nr_bits = pcc->lrg_decr_bits;
922 }
923
924 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
925 decr = _cpu_ppc_load_decr(env, now);
926 _cpu_ppc_store_decr(cpu, now, decr, value, nr_bits);
927 }
928
cpu_ppc_decr_cb(void * opaque)929 static void cpu_ppc_decr_cb(void *opaque)
930 {
931 PowerPCCPU *cpu = opaque;
932
933 cpu_ppc_decr_excp(cpu);
934 }
935
_cpu_ppc_store_hdecr(PowerPCCPU * cpu,int64_t now,target_ulong hdecr,target_ulong value,int nr_bits)936 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, int64_t now,
937 target_ulong hdecr, target_ulong value,
938 int nr_bits)
939 {
940 ppc_tb_t *tb_env = cpu->env.tb_env;
941
942 if (tb_env->hdecr_timer != NULL) {
943 /* HDECR (Book3S 64bit) is edge-based, not level like DECR */
944 __cpu_ppc_store_decr(cpu, now, &tb_env->hdecr_next, tb_env->hdecr_timer,
945 tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
946 PPC_DECR_UNDERFLOW_TRIGGERED,
947 hdecr, value, nr_bits);
948 }
949 }
950
cpu_ppc_store_hdecr(CPUPPCState * env,target_ulong value)951 void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
952 {
953 PowerPCCPU *cpu = env_archcpu(env);
954 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
955 int64_t now;
956 target_ulong hdecr;
957
958 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
959 hdecr = _cpu_ppc_load_hdecr(env, now);
960 _cpu_ppc_store_hdecr(cpu, now, hdecr, value, pcc->lrg_decr_bits);
961 }
962
cpu_ppc_hdecr_cb(void * opaque)963 static void cpu_ppc_hdecr_cb(void *opaque)
964 {
965 PowerPCCPU *cpu = opaque;
966
967 cpu_ppc_hdecr_excp(cpu);
968 }
969
_cpu_ppc_store_purr(CPUPPCState * env,int64_t now,uint64_t value)970 static void _cpu_ppc_store_purr(CPUPPCState *env, int64_t now, uint64_t value)
971 {
972 ppc_tb_t *tb_env = env->tb_env;
973
974 cpu_ppc_store_tb(tb_env, now, &tb_env->purr_offset, value);
975 }
976
cpu_ppc_store_purr(CPUPPCState * env,uint64_t value)977 void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
978 {
979 _cpu_ppc_store_purr(env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), value);
980 }
981
timebase_save(PPCTimebase * tb)982 static void timebase_save(PPCTimebase *tb)
983 {
984 uint64_t ticks = cpu_get_host_ticks();
985 PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
986
987 if (!first_ppc_cpu->env.tb_env) {
988 error_report("No timebase object");
989 return;
990 }
991
992 if (replay_mode == REPLAY_MODE_NONE) {
993 /* not used anymore, we keep it for compatibility */
994 tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
995 } else {
996 /* simpler for record-replay to avoid this event, compat not needed */
997 tb->time_of_the_day_ns = 0;
998 }
999
1000 /*
1001 * tb_offset is only expected to be changed by QEMU so
1002 * there is no need to update it from KVM here
1003 */
1004 tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
1005
1006 tb->runstate_paused =
1007 runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
1008 }
1009
timebase_load(PPCTimebase * tb)1010 static void timebase_load(PPCTimebase *tb)
1011 {
1012 CPUState *cpu;
1013 PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1014 int64_t tb_off_adj, tb_off;
1015 unsigned long freq;
1016
1017 if (!first_ppc_cpu->env.tb_env) {
1018 error_report("No timebase object");
1019 return;
1020 }
1021
1022 freq = first_ppc_cpu->env.tb_env->tb_freq;
1023
1024 tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
1025
1026 tb_off = first_ppc_cpu->env.tb_env->tb_offset;
1027 trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
1028 (tb_off_adj - tb_off) / freq);
1029
1030 /* Set new offset to all CPUs */
1031 CPU_FOREACH(cpu) {
1032 PowerPCCPU *pcpu = POWERPC_CPU(cpu);
1033 pcpu->env.tb_env->tb_offset = tb_off_adj;
1034 kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
1035 }
1036 }
1037
cpu_ppc_clock_vm_state_change(void * opaque,bool running,RunState state)1038 void cpu_ppc_clock_vm_state_change(void *opaque, bool running,
1039 RunState state)
1040 {
1041 PPCTimebase *tb = opaque;
1042
1043 if (running) {
1044 timebase_load(tb);
1045 } else {
1046 timebase_save(tb);
1047 }
1048 }
1049
1050 /*
1051 * When migrating a running guest, read the clock just
1052 * before migration, so that the guest clock counts
1053 * during the events between:
1054 *
1055 * * vm_stop()
1056 * *
1057 * * pre_save()
1058 *
1059 * This reduces clock difference on migration from 5s
1060 * to 0.1s (when max_downtime == 5s), because sending the
1061 * final pages of memory (which happens between vm_stop()
1062 * and pre_save()) takes max_downtime.
1063 */
timebase_pre_save(void * opaque)1064 static int timebase_pre_save(void *opaque)
1065 {
1066 PPCTimebase *tb = opaque;
1067
1068 /* guest_timebase won't be overridden in case of paused guest or savevm */
1069 if (!tb->runstate_paused) {
1070 timebase_save(tb);
1071 }
1072
1073 return 0;
1074 }
1075
1076 const VMStateDescription vmstate_ppc_timebase = {
1077 .name = "timebase",
1078 .version_id = 1,
1079 .minimum_version_id = 1,
1080 .pre_save = timebase_pre_save,
1081 .fields = (const VMStateField []) {
1082 VMSTATE_UINT64(guest_timebase, PPCTimebase),
1083 VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1084 VMSTATE_END_OF_LIST()
1085 },
1086 };
1087
1088 /* Set up (once) timebase frequency (in Hz) */
cpu_ppc_tb_init(CPUPPCState * env,uint32_t freq)1089 void cpu_ppc_tb_init(CPUPPCState *env, uint32_t freq)
1090 {
1091 PowerPCCPU *cpu = env_archcpu(env);
1092 ppc_tb_t *tb_env;
1093
1094 tb_env = g_new0(ppc_tb_t, 1);
1095 env->tb_env = tb_env;
1096 tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1097 if (is_book3s_arch2x(env)) {
1098 /* All Book3S 64bit CPUs implement level based DEC logic */
1099 tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1100 }
1101 /* Create new timer */
1102 tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1103 &cpu_ppc_decr_cb, cpu);
1104 if (env->has_hv_mode && !cpu->vhyp) {
1105 tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1106 &cpu_ppc_hdecr_cb, cpu);
1107 } else {
1108 tb_env->hdecr_timer = NULL;
1109 }
1110
1111 tb_env->tb_freq = freq;
1112 tb_env->decr_freq = freq;
1113 }
1114
cpu_ppc_tb_reset(CPUPPCState * env)1115 void cpu_ppc_tb_reset(CPUPPCState *env)
1116 {
1117 PowerPCCPU *cpu = env_archcpu(env);
1118 ppc_tb_t *tb_env = env->tb_env;
1119
1120 timer_del(tb_env->decr_timer);
1121 ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
1122 tb_env->decr_next = 0;
1123 if (tb_env->hdecr_timer != NULL) {
1124 timer_del(tb_env->hdecr_timer);
1125 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
1126 tb_env->hdecr_next = 0;
1127 _cpu_ppc_store_hdecr(cpu, 0, 0, 0, 64);
1128 }
1129
1130 /*
1131 * There is a bug in Linux 2.4 kernels:
1132 * if a decrementer exception is pending when it enables msr_ee at startup,
1133 * it's not ready to handle it...
1134 *
1135 * On machine reset, this is called before icount is reset, so for
1136 * icount-mode, setting TB registers using now == qemu_clock_get_ns()
1137 * results in them being garbage after icount is reset. Use an
1138 * explicit now == 0 to get a consistent reset state.
1139 */
1140 _cpu_ppc_store_decr(cpu, 0, 0, -1, 64);
1141 _cpu_ppc_store_purr(env, 0, 0);
1142 }
1143
cpu_ppc_tb_free(CPUPPCState * env)1144 void cpu_ppc_tb_free(CPUPPCState *env)
1145 {
1146 timer_free(env->tb_env->decr_timer);
1147 timer_free(env->tb_env->hdecr_timer);
1148 g_free(env->tb_env);
1149 }
1150
1151 /* cpu_ppc_hdecr_init may be used if the timer is not used by HDEC emulation */
cpu_ppc_hdecr_init(CPUPPCState * env)1152 void cpu_ppc_hdecr_init(CPUPPCState *env)
1153 {
1154 PowerPCCPU *cpu = env_archcpu(env);
1155
1156 assert(env->tb_env->hdecr_timer == NULL);
1157
1158 env->tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1159 &cpu_ppc_hdecr_cb, cpu);
1160 }
1161
cpu_ppc_hdecr_exit(CPUPPCState * env)1162 void cpu_ppc_hdecr_exit(CPUPPCState *env)
1163 {
1164 PowerPCCPU *cpu = env_archcpu(env);
1165
1166 timer_free(env->tb_env->hdecr_timer);
1167 env->tb_env->hdecr_timer = NULL;
1168
1169 cpu_ppc_hdecr_lower(cpu);
1170 }
1171
1172 /*****************************************************************************/
1173 /* PowerPC 40x timers */
1174
1175 /* PIT, FIT & WDT */
1176 typedef struct ppc40x_timer_t ppc40x_timer_t;
1177 struct ppc40x_timer_t {
1178 uint64_t pit_reload; /* PIT auto-reload value */
1179 uint64_t fit_next; /* Tick for next FIT interrupt */
1180 QEMUTimer *fit_timer;
1181 uint64_t wdt_next; /* Tick for next WDT interrupt */
1182 QEMUTimer *wdt_timer;
1183
1184 /* 405 have the PIT, 440 have a DECR. */
1185 unsigned int decr_excp;
1186 };
1187
1188 /* Fixed interval timer */
cpu_4xx_fit_cb(void * opaque)1189 static void cpu_4xx_fit_cb (void *opaque)
1190 {
1191 PowerPCCPU *cpu = opaque;
1192 CPUPPCState *env = &cpu->env;
1193 ppc_tb_t *tb_env;
1194 ppc40x_timer_t *ppc40x_timer;
1195 uint64_t now, next;
1196
1197 tb_env = env->tb_env;
1198 ppc40x_timer = tb_env->opaque;
1199 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1200 switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1201 case 0:
1202 next = 1 << 9;
1203 break;
1204 case 1:
1205 next = 1 << 13;
1206 break;
1207 case 2:
1208 next = 1 << 17;
1209 break;
1210 case 3:
1211 next = 1 << 21;
1212 break;
1213 default:
1214 /* Cannot occur, but makes gcc happy */
1215 return;
1216 }
1217 next = now + tb_to_ns_round_up(tb_env->tb_freq, next);
1218 timer_mod(ppc40x_timer->fit_timer, next);
1219 env->spr[SPR_40x_TSR] |= 1 << 26;
1220 if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1221 ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1222 }
1223 trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1224 env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1225 }
1226
1227 /* Programmable interval timer */
start_stop_pit(CPUPPCState * env,ppc_tb_t * tb_env,int is_excp)1228 static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1229 {
1230 ppc40x_timer_t *ppc40x_timer;
1231 uint64_t now, next;
1232
1233 ppc40x_timer = tb_env->opaque;
1234 if (ppc40x_timer->pit_reload <= 1 ||
1235 !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1236 (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1237 /* Stop PIT */
1238 trace_ppc4xx_pit_stop();
1239 timer_del(tb_env->decr_timer);
1240 } else {
1241 trace_ppc4xx_pit_start(ppc40x_timer->pit_reload);
1242 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1243
1244 if (is_excp) {
1245 tb_env->decr_next += ppc40x_timer->pit_reload;
1246 } else {
1247 tb_env->decr_next = ns_to_tb(tb_env->decr_freq, now)
1248 + ppc40x_timer->pit_reload;
1249 }
1250 next = tb_to_ns_round_up(tb_env->decr_freq, tb_env->decr_next);
1251 timer_mod(tb_env->decr_timer, next);
1252 }
1253 }
1254
cpu_4xx_pit_cb(void * opaque)1255 static void cpu_4xx_pit_cb (void *opaque)
1256 {
1257 PowerPCCPU *cpu = opaque;
1258 CPUPPCState *env = &cpu->env;
1259 ppc_tb_t *tb_env;
1260 ppc40x_timer_t *ppc40x_timer;
1261
1262 tb_env = env->tb_env;
1263 ppc40x_timer = tb_env->opaque;
1264 env->spr[SPR_40x_TSR] |= 1 << 27;
1265 if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1266 ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1267 }
1268 start_stop_pit(env, tb_env, 1);
1269 trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1270 (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1271 env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1272 ppc40x_timer->pit_reload);
1273 }
1274
1275 /* Watchdog timer */
cpu_4xx_wdt_cb(void * opaque)1276 static void cpu_4xx_wdt_cb (void *opaque)
1277 {
1278 PowerPCCPU *cpu = opaque;
1279 CPUPPCState *env = &cpu->env;
1280 ppc_tb_t *tb_env;
1281 ppc40x_timer_t *ppc40x_timer;
1282 uint64_t now, next;
1283
1284 tb_env = env->tb_env;
1285 ppc40x_timer = tb_env->opaque;
1286 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1287 switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1288 case 0:
1289 next = 1 << 17;
1290 break;
1291 case 1:
1292 next = 1 << 21;
1293 break;
1294 case 2:
1295 next = 1 << 25;
1296 break;
1297 case 3:
1298 next = 1 << 29;
1299 break;
1300 default:
1301 /* Cannot occur, but makes gcc happy */
1302 return;
1303 }
1304 next = now + tb_to_ns_round_up(tb_env->decr_freq, next);
1305 trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1306 switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1307 case 0x0:
1308 case 0x1:
1309 timer_mod(ppc40x_timer->wdt_timer, next);
1310 ppc40x_timer->wdt_next = next;
1311 env->spr[SPR_40x_TSR] |= 1U << 31;
1312 break;
1313 case 0x2:
1314 timer_mod(ppc40x_timer->wdt_timer, next);
1315 ppc40x_timer->wdt_next = next;
1316 env->spr[SPR_40x_TSR] |= 1 << 30;
1317 if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1318 ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1319 }
1320 break;
1321 case 0x3:
1322 env->spr[SPR_40x_TSR] &= ~0x30000000;
1323 env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1324 switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1325 case 0x0:
1326 /* No reset */
1327 break;
1328 case 0x1: /* Core reset */
1329 ppc40x_core_reset(cpu);
1330 break;
1331 case 0x2: /* Chip reset */
1332 ppc40x_chip_reset(cpu);
1333 break;
1334 case 0x3: /* System reset */
1335 ppc40x_system_reset(cpu);
1336 break;
1337 }
1338 }
1339 }
1340
store_40x_pit(CPUPPCState * env,target_ulong val)1341 void store_40x_pit (CPUPPCState *env, target_ulong val)
1342 {
1343 ppc_tb_t *tb_env;
1344 ppc40x_timer_t *ppc40x_timer;
1345
1346 tb_env = env->tb_env;
1347 ppc40x_timer = tb_env->opaque;
1348 trace_ppc40x_store_pit(val);
1349 ppc40x_timer->pit_reload = val;
1350 start_stop_pit(env, tb_env, 0);
1351 }
1352
load_40x_pit(CPUPPCState * env)1353 target_ulong load_40x_pit (CPUPPCState *env)
1354 {
1355 return cpu_ppc_load_decr(env);
1356 }
1357
store_40x_tsr(CPUPPCState * env,target_ulong val)1358 void store_40x_tsr(CPUPPCState *env, target_ulong val)
1359 {
1360 PowerPCCPU *cpu = env_archcpu(env);
1361
1362 trace_ppc40x_store_tcr(val);
1363
1364 env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000);
1365 if (val & 0x80000000) {
1366 ppc_set_irq(cpu, PPC_INTERRUPT_PIT, 0);
1367 }
1368 }
1369
store_40x_tcr(CPUPPCState * env,target_ulong val)1370 void store_40x_tcr(CPUPPCState *env, target_ulong val)
1371 {
1372 PowerPCCPU *cpu = env_archcpu(env);
1373 ppc_tb_t *tb_env;
1374
1375 trace_ppc40x_store_tsr(val);
1376
1377 tb_env = env->tb_env;
1378 env->spr[SPR_40x_TCR] = val & 0xFFC00000;
1379 start_stop_pit(env, tb_env, 1);
1380 cpu_4xx_wdt_cb(cpu);
1381 }
1382
ppc_40x_set_tb_clk(void * opaque,uint32_t freq)1383 static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1384 {
1385 CPUPPCState *env = opaque;
1386 ppc_tb_t *tb_env = env->tb_env;
1387
1388 trace_ppc40x_set_tb_clk(freq);
1389 tb_env->tb_freq = freq;
1390 tb_env->decr_freq = freq;
1391 /* XXX: we should also update all timers */
1392 }
1393
ppc_40x_timers_init(CPUPPCState * env,uint32_t freq,unsigned int decr_excp)1394 clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1395 unsigned int decr_excp)
1396 {
1397 ppc_tb_t *tb_env;
1398 ppc40x_timer_t *ppc40x_timer;
1399 PowerPCCPU *cpu = env_archcpu(env);
1400
1401 trace_ppc40x_timers_init(freq);
1402
1403 tb_env = g_new0(ppc_tb_t, 1);
1404 ppc40x_timer = g_new0(ppc40x_timer_t, 1);
1405
1406 env->tb_env = tb_env;
1407 tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1408 tb_env->tb_freq = freq;
1409 tb_env->decr_freq = freq;
1410 tb_env->opaque = ppc40x_timer;
1411
1412 /* We use decr timer for PIT */
1413 tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, cpu);
1414 ppc40x_timer->fit_timer =
1415 timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, cpu);
1416 ppc40x_timer->wdt_timer =
1417 timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, cpu);
1418 ppc40x_timer->decr_excp = decr_excp;
1419
1420 return &ppc_40x_set_tb_clk;
1421 }
1422
1423 /*****************************************************************************/
1424 /* Embedded PowerPC Device Control Registers */
1425 typedef struct ppc_dcrn_t ppc_dcrn_t;
1426 struct ppc_dcrn_t {
1427 dcr_read_cb dcr_read;
1428 dcr_write_cb dcr_write;
1429 void *opaque;
1430 };
1431
1432 /* XXX: on 460, DCR addresses are 32 bits wide,
1433 * using DCRIPR to get the 22 upper bits of the DCR address
1434 */
1435 #define DCRN_NB 1024
1436 struct ppc_dcr_t {
1437 ppc_dcrn_t dcrn[DCRN_NB];
1438 int (*read_error)(int dcrn);
1439 int (*write_error)(int dcrn);
1440 };
1441
ppc_dcr_read(ppc_dcr_t * dcr_env,int dcrn,uint32_t * valp)1442 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1443 {
1444 ppc_dcrn_t *dcr;
1445
1446 if (dcrn < 0 || dcrn >= DCRN_NB)
1447 goto error;
1448 dcr = &dcr_env->dcrn[dcrn];
1449 if (dcr->dcr_read == NULL)
1450 goto error;
1451 *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1452 trace_ppc_dcr_read(dcrn, *valp);
1453
1454 return 0;
1455
1456 error:
1457 if (dcr_env->read_error != NULL)
1458 return (*dcr_env->read_error)(dcrn);
1459
1460 return -1;
1461 }
1462
ppc_dcr_write(ppc_dcr_t * dcr_env,int dcrn,uint32_t val)1463 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1464 {
1465 ppc_dcrn_t *dcr;
1466
1467 if (dcrn < 0 || dcrn >= DCRN_NB)
1468 goto error;
1469 dcr = &dcr_env->dcrn[dcrn];
1470 if (dcr->dcr_write == NULL)
1471 goto error;
1472 trace_ppc_dcr_write(dcrn, val);
1473 (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1474
1475 return 0;
1476
1477 error:
1478 if (dcr_env->write_error != NULL)
1479 return (*dcr_env->write_error)(dcrn);
1480
1481 return -1;
1482 }
1483
ppc_dcr_register(CPUPPCState * env,int dcrn,void * opaque,dcr_read_cb dcr_read,dcr_write_cb dcr_write)1484 int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1485 dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1486 {
1487 ppc_dcr_t *dcr_env;
1488 ppc_dcrn_t *dcr;
1489
1490 dcr_env = env->dcr_env;
1491 if (dcr_env == NULL)
1492 return -1;
1493 if (dcrn < 0 || dcrn >= DCRN_NB)
1494 return -1;
1495 dcr = &dcr_env->dcrn[dcrn];
1496 if (dcr->opaque != NULL ||
1497 dcr->dcr_read != NULL ||
1498 dcr->dcr_write != NULL)
1499 return -1;
1500 dcr->opaque = opaque;
1501 dcr->dcr_read = dcr_read;
1502 dcr->dcr_write = dcr_write;
1503
1504 return 0;
1505 }
1506
ppc_dcr_init(CPUPPCState * env,int (* read_error)(int dcrn),int (* write_error)(int dcrn))1507 int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1508 int (*write_error)(int dcrn))
1509 {
1510 ppc_dcr_t *dcr_env;
1511
1512 dcr_env = g_new0(ppc_dcr_t, 1);
1513 dcr_env->read_error = read_error;
1514 dcr_env->write_error = write_error;
1515 env->dcr_env = dcr_env;
1516
1517 return 0;
1518 }
1519
1520 /*****************************************************************************/
1521
ppc_cpu_pir(PowerPCCPU * cpu)1522 int ppc_cpu_pir(PowerPCCPU *cpu)
1523 {
1524 CPUPPCState *env = &cpu->env;
1525 return env->spr_cb[SPR_PIR].default_value;
1526 }
1527
ppc_cpu_tir(PowerPCCPU * cpu)1528 int ppc_cpu_tir(PowerPCCPU *cpu)
1529 {
1530 CPUPPCState *env = &cpu->env;
1531 return env->spr_cb[SPR_TIR].default_value;
1532 }
1533
ppc_get_vcpu_by_pir(int pir)1534 PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1535 {
1536 CPUState *cs;
1537
1538 CPU_FOREACH(cs) {
1539 PowerPCCPU *cpu = POWERPC_CPU(cs);
1540
1541 if (ppc_cpu_pir(cpu) == pir) {
1542 return cpu;
1543 }
1544 }
1545
1546 return NULL;
1547 }
1548
ppc_irq_reset(PowerPCCPU * cpu)1549 void ppc_irq_reset(PowerPCCPU *cpu)
1550 {
1551 CPUPPCState *env = &cpu->env;
1552
1553 env->irq_input_state = 0;
1554 if (kvm_enabled()) {
1555 kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0);
1556 }
1557 }
1558