1 /*
2 * APIC support
3 *
4 * Copyright (c) 2004-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/thread.h"
21 #include "qemu/error-report.h"
22 #include "hw/i386/apic_internal.h"
23 #include "hw/i386/apic.h"
24 #include "hw/intc/ioapic.h"
25 #include "hw/intc/i8259.h"
26 #include "hw/intc/kvm_irqcount.h"
27 #include "hw/pci/msi.h"
28 #include "qemu/host-utils.h"
29 #include "system/kvm.h"
30 #include "trace.h"
31 #include "hw/i386/apic-msidef.h"
32 #include "qapi/error.h"
33 #include "qom/object.h"
34
35 #define SYNC_FROM_VAPIC 0x1
36 #define SYNC_TO_VAPIC 0x2
37 #define SYNC_ISR_IRR_TO_VAPIC 0x4
38
39 static APICCommonState **local_apics;
40 static uint32_t max_apics;
41 static uint32_t max_apic_words;
42
43 #define TYPE_APIC "apic"
44 /*This is reusing the APICCommonState typedef from APIC_COMMON */
45 DECLARE_INSTANCE_CHECKER(APICCommonState, APIC,
46 TYPE_APIC)
47
48 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode);
49 static void apic_update_irq(APICCommonState *s);
50 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
51 uint32_t dest, uint8_t dest_mode);
52
apic_set_max_apic_id(uint32_t max_apic_id)53 void apic_set_max_apic_id(uint32_t max_apic_id)
54 {
55 int word_size = 32;
56
57 /* round up the max apic id to next multiple of words */
58 max_apics = (max_apic_id + word_size - 1) & ~(word_size - 1);
59
60 local_apics = g_malloc0(sizeof(*local_apics) * max_apics);
61 max_apic_words = max_apics >> 5;
62 }
63
64
65 /* Find first bit starting from msb */
apic_fls_bit(uint32_t value)66 static int apic_fls_bit(uint32_t value)
67 {
68 return 31 - clz32(value);
69 }
70
71 /* Find first bit starting from lsb */
apic_ffs_bit(uint32_t value)72 static int apic_ffs_bit(uint32_t value)
73 {
74 return ctz32(value);
75 }
76
apic_reset_bit(uint32_t * tab,int index)77 static inline void apic_reset_bit(uint32_t *tab, int index)
78 {
79 int i, mask;
80 i = index >> 5;
81 mask = 1 << (index & 0x1f);
82 tab[i] &= ~mask;
83 }
84
85 /* return -1 if no bit is set */
get_highest_priority_int(uint32_t * tab)86 static int get_highest_priority_int(uint32_t *tab)
87 {
88 int i;
89 for (i = 7; i >= 0; i--) {
90 if (tab[i] != 0) {
91 return i * 32 + apic_fls_bit(tab[i]);
92 }
93 }
94 return -1;
95 }
96
apic_sync_vapic(APICCommonState * s,int sync_type)97 static void apic_sync_vapic(APICCommonState *s, int sync_type)
98 {
99 VAPICState vapic_state;
100 size_t length;
101 off_t start;
102 int vector;
103
104 if (!s->vapic_paddr) {
105 return;
106 }
107 if (sync_type & SYNC_FROM_VAPIC) {
108 cpu_physical_memory_read(s->vapic_paddr, &vapic_state,
109 sizeof(vapic_state));
110 s->tpr = vapic_state.tpr;
111 }
112 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) {
113 start = offsetof(VAPICState, isr);
114 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr);
115
116 if (sync_type & SYNC_TO_VAPIC) {
117 assert(qemu_cpu_is_self(CPU(s->cpu)));
118
119 vapic_state.tpr = s->tpr;
120 vapic_state.enabled = 1;
121 start = 0;
122 length = sizeof(VAPICState);
123 }
124
125 vector = get_highest_priority_int(s->isr);
126 if (vector < 0) {
127 vector = 0;
128 }
129 vapic_state.isr = vector & 0xf0;
130
131 vapic_state.zero = 0;
132
133 vector = get_highest_priority_int(s->irr);
134 if (vector < 0) {
135 vector = 0;
136 }
137 vapic_state.irr = vector & 0xff;
138
139 address_space_write_rom(&address_space_memory,
140 s->vapic_paddr + start,
141 MEMTXATTRS_UNSPECIFIED,
142 ((void *)&vapic_state) + start, length);
143 }
144 }
145
apic_vapic_base_update(APICCommonState * s)146 static void apic_vapic_base_update(APICCommonState *s)
147 {
148 apic_sync_vapic(s, SYNC_TO_VAPIC);
149 }
150
apic_local_deliver(APICCommonState * s,int vector)151 static void apic_local_deliver(APICCommonState *s, int vector)
152 {
153 uint32_t lvt = s->lvt[vector];
154 int trigger_mode;
155
156 trace_apic_local_deliver(vector, (lvt >> 8) & 7);
157
158 if (lvt & APIC_LVT_MASKED)
159 return;
160
161 switch ((lvt >> 8) & 7) {
162 case APIC_DM_SMI:
163 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI);
164 break;
165
166 case APIC_DM_NMI:
167 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI);
168 break;
169
170 case APIC_DM_EXTINT:
171 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD);
172 break;
173
174 case APIC_DM_FIXED:
175 trigger_mode = APIC_TRIGGER_EDGE;
176 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) &&
177 (lvt & APIC_LVT_LEVEL_TRIGGER))
178 trigger_mode = APIC_TRIGGER_LEVEL;
179 apic_set_irq(s, lvt & 0xff, trigger_mode);
180 }
181 }
182
apic_deliver_pic_intr(DeviceState * dev,int level)183 void apic_deliver_pic_intr(DeviceState *dev, int level)
184 {
185 APICCommonState *s = APIC(dev);
186
187 if (level) {
188 apic_local_deliver(s, APIC_LVT_LINT0);
189 } else {
190 uint32_t lvt = s->lvt[APIC_LVT_LINT0];
191
192 switch ((lvt >> 8) & 7) {
193 case APIC_DM_FIXED:
194 if (!(lvt & APIC_LVT_LEVEL_TRIGGER))
195 break;
196 apic_reset_bit(s->irr, lvt & 0xff);
197 /* fall through */
198 case APIC_DM_EXTINT:
199 apic_update_irq(s);
200 break;
201 }
202 }
203 }
204
apic_external_nmi(APICCommonState * s)205 static void apic_external_nmi(APICCommonState *s)
206 {
207 apic_local_deliver(s, APIC_LVT_LINT1);
208 }
209
210 #define foreach_apic(apic, deliver_bitmask, code) \
211 {\
212 int __i, __j;\
213 for (__i = 0; __i < max_apic_words; __i++) {\
214 uint32_t __mask = deliver_bitmask[__i];\
215 if (__mask) {\
216 for (__j = 0; __j < 32; __j++) {\
217 if (__mask & (1U << __j)) {\
218 apic = local_apics[__i * 32 + __j];\
219 if (apic) {\
220 code;\
221 }\
222 }\
223 }\
224 }\
225 }\
226 }
227
apic_bus_deliver(const uint32_t * deliver_bitmask,uint8_t delivery_mode,uint8_t vector_num,uint8_t trigger_mode)228 static void apic_bus_deliver(const uint32_t *deliver_bitmask,
229 uint8_t delivery_mode, uint8_t vector_num,
230 uint8_t trigger_mode)
231 {
232 APICCommonState *apic_iter;
233
234 switch (delivery_mode) {
235 case APIC_DM_LOWPRI:
236 /* XXX: search for focus processor, arbitration */
237 {
238 int i, d;
239 d = -1;
240 for (i = 0; i < max_apic_words; i++) {
241 if (deliver_bitmask[i]) {
242 d = i * 32 + apic_ffs_bit(deliver_bitmask[i]);
243 break;
244 }
245 }
246 if (d >= 0) {
247 apic_iter = local_apics[d];
248 if (apic_iter) {
249 apic_set_irq(apic_iter, vector_num, trigger_mode);
250 }
251 }
252 }
253 return;
254
255 case APIC_DM_FIXED:
256 break;
257
258 case APIC_DM_SMI:
259 foreach_apic(apic_iter, deliver_bitmask,
260 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI)
261 );
262 return;
263
264 case APIC_DM_NMI:
265 foreach_apic(apic_iter, deliver_bitmask,
266 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI)
267 );
268 return;
269
270 case APIC_DM_INIT:
271 /* normal INIT IPI sent to processors */
272 foreach_apic(apic_iter, deliver_bitmask,
273 cpu_interrupt(CPU(apic_iter->cpu),
274 CPU_INTERRUPT_INIT)
275 );
276 return;
277
278 case APIC_DM_EXTINT:
279 /* handled in I/O APIC code */
280 break;
281
282 default:
283 return;
284 }
285
286 foreach_apic(apic_iter, deliver_bitmask,
287 apic_set_irq(apic_iter, vector_num, trigger_mode) );
288 }
289
apic_deliver_irq(uint32_t dest,uint8_t dest_mode,uint8_t delivery_mode,uint8_t vector_num,uint8_t trigger_mode)290 static void apic_deliver_irq(uint32_t dest, uint8_t dest_mode,
291 uint8_t delivery_mode, uint8_t vector_num,
292 uint8_t trigger_mode)
293 {
294 g_autofree uint32_t *deliver_bitmask = g_new(uint32_t, max_apic_words);
295
296 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num,
297 trigger_mode);
298
299 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
300 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
301 }
302
is_x2apic_mode(DeviceState * dev)303 bool is_x2apic_mode(DeviceState *dev)
304 {
305 APICCommonState *s = APIC(dev);
306
307 return s->apicbase & MSR_IA32_APICBASE_EXTD;
308 }
309
apic_set_base_check(APICCommonState * s,uint64_t val)310 static int apic_set_base_check(APICCommonState *s, uint64_t val)
311 {
312 /* Enable x2apic when x2apic is not supported by CPU */
313 if (!cpu_has_x2apic_feature(&s->cpu->env) &&
314 val & MSR_IA32_APICBASE_EXTD) {
315 return -1;
316 }
317
318 /*
319 * Transition into invalid state
320 * (s->apicbase & MSR_IA32_APICBASE_ENABLE == 0) &&
321 * (s->apicbase & MSR_IA32_APICBASE_EXTD) == 1
322 */
323 if (!(val & MSR_IA32_APICBASE_ENABLE) &&
324 (val & MSR_IA32_APICBASE_EXTD)) {
325 return -1;
326 }
327
328 /* Invalid transition from disabled mode to x2APIC */
329 if (!(s->apicbase & MSR_IA32_APICBASE_ENABLE) &&
330 !(s->apicbase & MSR_IA32_APICBASE_EXTD) &&
331 (val & MSR_IA32_APICBASE_ENABLE) &&
332 (val & MSR_IA32_APICBASE_EXTD)) {
333 return -1;
334 }
335
336 /* Invalid transition from x2APIC to xAPIC */
337 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) &&
338 (s->apicbase & MSR_IA32_APICBASE_EXTD) &&
339 (val & MSR_IA32_APICBASE_ENABLE) &&
340 !(val & MSR_IA32_APICBASE_EXTD)) {
341 return -1;
342 }
343
344 return 0;
345 }
346
apic_set_base(APICCommonState * s,uint64_t val)347 static int apic_set_base(APICCommonState *s, uint64_t val)
348 {
349 if (apic_set_base_check(s, val) < 0) {
350 return -1;
351 }
352
353 s->apicbase = (val & MSR_IA32_APICBASE_BASE) |
354 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE));
355 if (!(val & MSR_IA32_APICBASE_ENABLE)) {
356 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE;
357 cpu_clear_apic_feature(&s->cpu->env);
358 s->spurious_vec &= ~APIC_SV_ENABLE;
359 }
360
361 /* Transition from disabled mode to xAPIC */
362 if (!(s->apicbase & MSR_IA32_APICBASE_ENABLE) &&
363 (val & MSR_IA32_APICBASE_ENABLE)) {
364 s->apicbase |= MSR_IA32_APICBASE_ENABLE;
365 cpu_set_apic_feature(&s->cpu->env);
366 }
367
368 /* Transition from xAPIC to x2APIC */
369 if (cpu_has_x2apic_feature(&s->cpu->env) &&
370 !(s->apicbase & MSR_IA32_APICBASE_EXTD) &&
371 (val & MSR_IA32_APICBASE_EXTD)) {
372 s->apicbase |= MSR_IA32_APICBASE_EXTD;
373
374 s->log_dest = ((s->initial_apic_id & 0xffff0) << 16) |
375 (1 << (s->initial_apic_id & 0xf));
376 }
377
378 return 0;
379 }
380
apic_set_tpr(APICCommonState * s,uint8_t val)381 static void apic_set_tpr(APICCommonState *s, uint8_t val)
382 {
383 /* Updates from cr8 are ignored while the VAPIC is active */
384 if (!s->vapic_paddr) {
385 s->tpr = val << 4;
386 apic_update_irq(s);
387 }
388 }
389
apic_get_highest_priority_irr(DeviceState * dev)390 int apic_get_highest_priority_irr(DeviceState *dev)
391 {
392 APICCommonState *s;
393
394 if (!dev) {
395 /* no interrupts */
396 return -1;
397 }
398 s = APIC_COMMON(dev);
399 return get_highest_priority_int(s->irr);
400 }
401
apic_get_tpr(APICCommonState * s)402 static uint8_t apic_get_tpr(APICCommonState *s)
403 {
404 apic_sync_vapic(s, SYNC_FROM_VAPIC);
405 return s->tpr >> 4;
406 }
407
apic_get_ppr(APICCommonState * s)408 int apic_get_ppr(APICCommonState *s)
409 {
410 int tpr, isrv, ppr;
411
412 tpr = (s->tpr >> 4);
413 isrv = get_highest_priority_int(s->isr);
414 if (isrv < 0)
415 isrv = 0;
416 isrv >>= 4;
417 if (tpr >= isrv)
418 ppr = s->tpr;
419 else
420 ppr = isrv << 4;
421 return ppr;
422 }
423
apic_get_arb_pri(APICCommonState * s)424 static int apic_get_arb_pri(APICCommonState *s)
425 {
426 /* XXX: arbitration */
427 return 0;
428 }
429
430
431 /*
432 * <0 - low prio interrupt,
433 * 0 - no interrupt,
434 * >0 - interrupt number
435 */
apic_irq_pending(APICCommonState * s)436 static int apic_irq_pending(APICCommonState *s)
437 {
438 int irrv, ppr;
439
440 if (!(s->spurious_vec & APIC_SV_ENABLE)) {
441 return 0;
442 }
443
444 irrv = get_highest_priority_int(s->irr);
445 if (irrv < 0) {
446 return 0;
447 }
448 ppr = apic_get_ppr(s);
449 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) {
450 return -1;
451 }
452
453 return irrv;
454 }
455
456 /* signal the CPU if an irq is pending */
apic_update_irq(APICCommonState * s)457 static void apic_update_irq(APICCommonState *s)
458 {
459 CPUState *cpu;
460 DeviceState *dev = (DeviceState *)s;
461
462 cpu = CPU(s->cpu);
463 if (!qemu_cpu_is_self(cpu)) {
464 cpu_interrupt(cpu, CPU_INTERRUPT_POLL);
465 } else if (apic_irq_pending(s) > 0) {
466 cpu_interrupt(cpu, CPU_INTERRUPT_HARD);
467 } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) {
468 cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
469 }
470 }
471
apic_poll_irq(DeviceState * dev)472 void apic_poll_irq(DeviceState *dev)
473 {
474 APICCommonState *s = APIC(dev);
475
476 apic_sync_vapic(s, SYNC_FROM_VAPIC);
477 apic_update_irq(s);
478 }
479
apic_set_irq(APICCommonState * s,int vector_num,int trigger_mode)480 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode)
481 {
482 kvm_report_irq_delivered(!apic_get_bit(s->irr, vector_num));
483
484 apic_set_bit(s->irr, vector_num);
485 if (trigger_mode)
486 apic_set_bit(s->tmr, vector_num);
487 else
488 apic_reset_bit(s->tmr, vector_num);
489 if (s->vapic_paddr) {
490 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC);
491 /*
492 * The vcpu thread needs to see the new IRR before we pull its current
493 * TPR value. That way, if we miss a lowering of the TRP, the guest
494 * has the chance to notice the new IRR and poll for IRQs on its own.
495 */
496 smp_wmb();
497 apic_sync_vapic(s, SYNC_FROM_VAPIC);
498 }
499 apic_update_irq(s);
500 }
501
apic_eoi(APICCommonState * s)502 static void apic_eoi(APICCommonState *s)
503 {
504 int isrv;
505 isrv = get_highest_priority_int(s->isr);
506 if (isrv < 0)
507 return;
508 apic_reset_bit(s->isr, isrv);
509 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) {
510 ioapic_eoi_broadcast(isrv);
511 }
512 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC);
513 apic_update_irq(s);
514 }
515
apic_match_dest(APICCommonState * apic,uint32_t dest)516 static bool apic_match_dest(APICCommonState *apic, uint32_t dest)
517 {
518 if (is_x2apic_mode(&apic->parent_obj)) {
519 return apic->initial_apic_id == dest;
520 } else {
521 return apic->id == (uint8_t)dest;
522 }
523 }
524
apic_find_dest(uint32_t * deliver_bitmask,uint32_t dest)525 static void apic_find_dest(uint32_t *deliver_bitmask, uint32_t dest)
526 {
527 APICCommonState *apic = NULL;
528 int i;
529
530 for (i = 0; i < max_apics; i++) {
531 apic = local_apics[i];
532 if (apic && apic_match_dest(apic, dest)) {
533 apic_set_bit(deliver_bitmask, i);
534 }
535 }
536 }
537
538 /*
539 * Deliver interrupt to x2APIC CPUs if it is x2APIC broadcast.
540 * Otherwise, deliver interrupt to xAPIC CPUs if it is xAPIC
541 * broadcast.
542 */
apic_get_broadcast_bitmask(uint32_t * deliver_bitmask,bool is_x2apic_broadcast)543 static void apic_get_broadcast_bitmask(uint32_t *deliver_bitmask,
544 bool is_x2apic_broadcast)
545 {
546 int i;
547 APICCommonState *apic_iter;
548
549 for (i = 0; i < max_apics; i++) {
550 apic_iter = local_apics[i];
551 if (apic_iter) {
552 bool apic_in_x2apic = is_x2apic_mode(&apic_iter->parent_obj);
553
554 if (is_x2apic_broadcast && apic_in_x2apic) {
555 apic_set_bit(deliver_bitmask, i);
556 } else if (!is_x2apic_broadcast && !apic_in_x2apic) {
557 apic_set_bit(deliver_bitmask, i);
558 }
559 }
560 }
561 }
562
apic_get_delivery_bitmask(uint32_t * deliver_bitmask,uint32_t dest,uint8_t dest_mode)563 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
564 uint32_t dest, uint8_t dest_mode)
565 {
566 APICCommonState *apic;
567 int i;
568
569 memset(deliver_bitmask, 0x00, max_apic_words * sizeof(uint32_t));
570
571 /*
572 * x2APIC broadcast is delivered to all x2APIC CPUs regardless of
573 * destination mode. In case the destination mode is physical, it is
574 * broadcasted to all xAPIC CPUs too. Otherwise, if the destination
575 * mode is logical, we need to continue checking if xAPIC CPUs accepts
576 * the interrupt.
577 */
578 if (dest == 0xffffffff) {
579 if (dest_mode == APIC_DESTMODE_PHYSICAL) {
580 memset(deliver_bitmask, 0xff, max_apic_words * sizeof(uint32_t));
581 return;
582 } else {
583 apic_get_broadcast_bitmask(deliver_bitmask, true);
584 }
585 }
586
587 if (dest_mode == APIC_DESTMODE_PHYSICAL) {
588 apic_find_dest(deliver_bitmask, dest);
589 /* Any APIC in xAPIC mode will interpret 0xFF as broadcast */
590 if (dest == 0xff) {
591 apic_get_broadcast_bitmask(deliver_bitmask, false);
592 }
593 } else {
594 /* XXX: logical mode */
595 for (i = 0; i < max_apics; i++) {
596 apic = local_apics[i];
597 if (apic) {
598 /* x2APIC logical mode */
599 if (apic->apicbase & MSR_IA32_APICBASE_EXTD) {
600 if ((dest >> 16) == (apic->extended_log_dest >> 16) &&
601 (dest & apic->extended_log_dest & 0xffff)) {
602 apic_set_bit(deliver_bitmask, i);
603 }
604 continue;
605 }
606
607 /* xAPIC logical mode */
608 dest = (uint8_t)dest;
609 if (apic->dest_mode == APIC_DESTMODE_LOGICAL_FLAT) {
610 if (dest & apic->log_dest) {
611 apic_set_bit(deliver_bitmask, i);
612 }
613 } else if (apic->dest_mode == APIC_DESTMODE_LOGICAL_CLUSTER) {
614 /*
615 * In cluster model of xAPIC logical mode IPI, 4 higher
616 * bits are used as cluster address, 4 lower bits are
617 * the bitmask for local APICs in the cluster. The IPI
618 * is delivered to an APIC if the cluster address
619 * matches and the APIC's address bit in the cluster is
620 * set in bitmask of destination ID in IPI.
621 *
622 * The cluster address ranges from 0 - 14, the cluster
623 * address 15 (0xf) is the broadcast address to all
624 * clusters.
625 */
626 if ((dest & 0xf0) == 0xf0 ||
627 (dest & 0xf0) == (apic->log_dest & 0xf0)) {
628 if (dest & apic->log_dest & 0x0f) {
629 apic_set_bit(deliver_bitmask, i);
630 }
631 }
632 }
633 }
634 }
635 }
636 }
637
apic_startup(APICCommonState * s,int vector_num)638 static void apic_startup(APICCommonState *s, int vector_num)
639 {
640 s->sipi_vector = vector_num;
641 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI);
642 }
643
apic_sipi(DeviceState * dev)644 void apic_sipi(DeviceState *dev)
645 {
646 APICCommonState *s = APIC(dev);
647
648 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI);
649
650 if (!s->wait_for_sipi)
651 return;
652 cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector);
653 s->wait_for_sipi = 0;
654 }
655
apic_deliver(DeviceState * dev,uint32_t dest,uint8_t dest_mode,uint8_t delivery_mode,uint8_t vector_num,uint8_t trigger_mode,uint8_t dest_shorthand)656 static void apic_deliver(DeviceState *dev, uint32_t dest, uint8_t dest_mode,
657 uint8_t delivery_mode, uint8_t vector_num,
658 uint8_t trigger_mode, uint8_t dest_shorthand)
659 {
660 APICCommonState *s = APIC(dev);
661 APICCommonState *apic_iter;
662 uint32_t deliver_bitmask_size = max_apic_words * sizeof(uint32_t);
663 g_autofree uint32_t *deliver_bitmask = g_new(uint32_t, max_apic_words);
664 uint32_t current_apic_id;
665
666 if (is_x2apic_mode(dev)) {
667 current_apic_id = s->initial_apic_id;
668 } else {
669 current_apic_id = s->id;
670 }
671
672 switch (dest_shorthand) {
673 case 0:
674 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
675 break;
676 case 1:
677 memset(deliver_bitmask, 0x00, deliver_bitmask_size);
678 apic_set_bit(deliver_bitmask, current_apic_id);
679 break;
680 case 2:
681 memset(deliver_bitmask, 0xff, deliver_bitmask_size);
682 break;
683 case 3:
684 memset(deliver_bitmask, 0xff, deliver_bitmask_size);
685 apic_reset_bit(deliver_bitmask, current_apic_id);
686 break;
687 }
688
689 switch (delivery_mode) {
690 case APIC_DM_INIT:
691 {
692 int trig_mode = (s->icr[0] >> 15) & 1;
693 int level = (s->icr[0] >> 14) & 1;
694 if (level == 0 && trig_mode == 1) {
695 foreach_apic(apic_iter, deliver_bitmask,
696 apic_iter->arb_id = apic_iter->id );
697 return;
698 }
699 }
700 break;
701
702 case APIC_DM_SIPI:
703 foreach_apic(apic_iter, deliver_bitmask,
704 apic_startup(apic_iter, vector_num) );
705 return;
706 }
707
708 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
709 }
710
apic_check_pic(APICCommonState * s)711 static bool apic_check_pic(APICCommonState *s)
712 {
713 DeviceState *dev = (DeviceState *)s;
714
715 if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) {
716 return false;
717 }
718 apic_deliver_pic_intr(dev, 1);
719 return true;
720 }
721
apic_get_interrupt(DeviceState * dev)722 int apic_get_interrupt(DeviceState *dev)
723 {
724 APICCommonState *s = APIC(dev);
725 int intno;
726
727 /* if the APIC is installed or enabled, we let the 8259 handle the
728 IRQs */
729 if (!s)
730 return -1;
731 if (!(s->spurious_vec & APIC_SV_ENABLE))
732 return -1;
733
734 apic_sync_vapic(s, SYNC_FROM_VAPIC);
735 intno = apic_irq_pending(s);
736
737 /* if there is an interrupt from the 8259, let the caller handle
738 * that first since ExtINT interrupts ignore the priority.
739 */
740 if (intno == 0 || apic_check_pic(s)) {
741 apic_sync_vapic(s, SYNC_TO_VAPIC);
742 return -1;
743 } else if (intno < 0) {
744 apic_sync_vapic(s, SYNC_TO_VAPIC);
745 return s->spurious_vec & 0xff;
746 }
747 apic_reset_bit(s->irr, intno);
748 apic_set_bit(s->isr, intno);
749 apic_sync_vapic(s, SYNC_TO_VAPIC);
750
751 apic_update_irq(s);
752
753 return intno;
754 }
755
apic_accept_pic_intr(DeviceState * dev)756 int apic_accept_pic_intr(DeviceState *dev)
757 {
758 APICCommonState *s = APIC(dev);
759 uint32_t lvt0;
760
761 if (!s)
762 return -1;
763
764 lvt0 = s->lvt[APIC_LVT_LINT0];
765
766 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 ||
767 (lvt0 & APIC_LVT_MASKED) == 0)
768 return isa_pic != NULL;
769
770 return 0;
771 }
772
apic_timer_update(APICCommonState * s,int64_t current_time)773 static void apic_timer_update(APICCommonState *s, int64_t current_time)
774 {
775 if (apic_next_timer(s, current_time)) {
776 timer_mod(s->timer, s->next_time);
777 } else {
778 timer_del(s->timer);
779 }
780 }
781
apic_timer(void * opaque)782 static void apic_timer(void *opaque)
783 {
784 APICCommonState *s = opaque;
785
786 apic_local_deliver(s, APIC_LVT_TIMER);
787 apic_timer_update(s, s->next_time);
788 }
789
apic_register_read(int index,uint64_t * value)790 static int apic_register_read(int index, uint64_t *value)
791 {
792 DeviceState *dev;
793 APICCommonState *s;
794 uint32_t val;
795 int ret = 0;
796
797 dev = cpu_get_current_apic();
798 if (!dev) {
799 return -1;
800 }
801 s = APIC(dev);
802
803 switch(index) {
804 case 0x02: /* id */
805 if (is_x2apic_mode(dev)) {
806 val = s->initial_apic_id;
807 } else {
808 val = s->id << 24;
809 }
810 break;
811 case 0x03: /* version */
812 val = s->version | ((APIC_LVT_NB - 1) << 16);
813 break;
814 case 0x08:
815 apic_sync_vapic(s, SYNC_FROM_VAPIC);
816 if (apic_report_tpr_access) {
817 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ);
818 }
819 val = s->tpr;
820 break;
821 case 0x09:
822 val = apic_get_arb_pri(s);
823 break;
824 case 0x0a:
825 /* ppr */
826 val = apic_get_ppr(s);
827 break;
828 case 0x0b:
829 val = 0;
830 break;
831 case 0x0d:
832 if (is_x2apic_mode(dev)) {
833 val = s->extended_log_dest;
834 } else {
835 val = s->log_dest << 24;
836 }
837 break;
838 case 0x0e:
839 if (is_x2apic_mode(dev)) {
840 val = 0;
841 ret = -1;
842 } else {
843 val = (s->dest_mode << 28) | 0xfffffff;
844 }
845 break;
846 case 0x0f:
847 val = s->spurious_vec;
848 break;
849 case 0x10 ... 0x17:
850 val = s->isr[index & 7];
851 break;
852 case 0x18 ... 0x1f:
853 val = s->tmr[index & 7];
854 break;
855 case 0x20 ... 0x27:
856 val = s->irr[index & 7];
857 break;
858 case 0x28:
859 val = s->esr;
860 break;
861 case 0x30:
862 case 0x31:
863 val = s->icr[index & 1];
864 break;
865 case 0x32 ... 0x37:
866 val = s->lvt[index - 0x32];
867 break;
868 case 0x38:
869 val = s->initial_count;
870 break;
871 case 0x39:
872 val = apic_get_current_count(s);
873 break;
874 case 0x3e:
875 val = s->divide_conf;
876 break;
877 default:
878 s->esr |= APIC_ESR_ILLEGAL_ADDRESS;
879 val = 0;
880 ret = -1;
881 break;
882 }
883
884 trace_apic_register_read(index, val);
885 *value = val;
886 return ret;
887 }
888
apic_mem_read(void * opaque,hwaddr addr,unsigned size)889 static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size)
890 {
891 uint64_t val;
892 int index;
893
894 if (size < 4) {
895 return 0;
896 }
897
898 index = (addr >> 4) & 0xff;
899 apic_register_read(index, &val);
900
901 return val;
902 }
903
apic_msr_read(int index,uint64_t * val)904 int apic_msr_read(int index, uint64_t *val)
905 {
906 DeviceState *dev;
907
908 dev = cpu_get_current_apic();
909 if (!dev) {
910 return -1;
911 }
912
913 if (!is_x2apic_mode(dev)) {
914 return -1;
915 }
916
917 return apic_register_read(index, val);
918 }
919
apic_send_msi(MSIMessage * msi)920 static void apic_send_msi(MSIMessage *msi)
921 {
922 uint64_t addr = msi->address;
923 uint32_t data = msi->data;
924 uint32_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
925 /*
926 * The higher 3 bytes of destination id is stored in higher word of
927 * msi address. See x86_iommu_irq_to_msi_message()
928 */
929 dest = dest | (addr >> 32);
930 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
931 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
932 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
933 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
934 /* XXX: Ignore redirection hint. */
935 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode);
936 }
937
apic_register_write(int index,uint64_t val)938 static int apic_register_write(int index, uint64_t val)
939 {
940 DeviceState *dev;
941 APICCommonState *s;
942
943 dev = cpu_get_current_apic();
944 if (!dev) {
945 return -1;
946 }
947 s = APIC(dev);
948
949 trace_apic_register_write(index, val);
950
951 switch(index) {
952 case 0x02:
953 if (is_x2apic_mode(dev)) {
954 return -1;
955 }
956
957 s->id = (val >> 24);
958 break;
959 case 0x03:
960 break;
961 case 0x08:
962 if (apic_report_tpr_access) {
963 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE);
964 }
965 s->tpr = val;
966 apic_sync_vapic(s, SYNC_TO_VAPIC);
967 apic_update_irq(s);
968 break;
969 case 0x09:
970 case 0x0a:
971 break;
972 case 0x0b: /* EOI */
973 apic_eoi(s);
974 break;
975 case 0x0d:
976 if (is_x2apic_mode(dev)) {
977 return -1;
978 }
979
980 s->log_dest = val >> 24;
981 break;
982 case 0x0e:
983 if (is_x2apic_mode(dev)) {
984 return -1;
985 }
986
987 s->dest_mode = val >> 28;
988 break;
989 case 0x0f:
990 s->spurious_vec = val & 0x1ff;
991 apic_update_irq(s);
992 break;
993 case 0x10 ... 0x17:
994 case 0x18 ... 0x1f:
995 case 0x20 ... 0x27:
996 case 0x28:
997 break;
998 case 0x30: {
999 uint32_t dest;
1000
1001 s->icr[0] = val;
1002 if (is_x2apic_mode(dev)) {
1003 s->icr[1] = val >> 32;
1004 dest = s->icr[1];
1005 } else {
1006 dest = (s->icr[1] >> 24) & 0xff;
1007 }
1008
1009 apic_deliver(dev, dest, (s->icr[0] >> 11) & 1,
1010 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff),
1011 (s->icr[0] >> 15) & 1, (s->icr[0] >> 18) & 3);
1012 break;
1013 }
1014 case 0x31:
1015 if (is_x2apic_mode(dev)) {
1016 return -1;
1017 }
1018
1019 s->icr[1] = val;
1020 break;
1021 case 0x32 ... 0x37:
1022 {
1023 int n = index - 0x32;
1024 s->lvt[n] = val;
1025 if (n == APIC_LVT_TIMER) {
1026 apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
1027 } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) {
1028 apic_update_irq(s);
1029 }
1030 }
1031 break;
1032 case 0x38:
1033 s->initial_count = val;
1034 s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1035 apic_timer_update(s, s->initial_count_load_time);
1036 break;
1037 case 0x39:
1038 break;
1039 case 0x3e:
1040 {
1041 int v;
1042 s->divide_conf = val & 0xb;
1043 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4);
1044 s->count_shift = (v + 1) & 7;
1045 }
1046 break;
1047 case 0x3f: {
1048 int vector = val & 0xff;
1049
1050 if (!is_x2apic_mode(dev)) {
1051 return -1;
1052 }
1053
1054 /*
1055 * Self IPI is identical to IPI with
1056 * - Destination shorthand: 1 (Self)
1057 * - Trigger mode: 0 (Edge)
1058 * - Delivery mode: 0 (Fixed)
1059 */
1060 apic_deliver(dev, 0, 0, APIC_DM_FIXED, vector, 0, 1);
1061
1062 break;
1063 }
1064 default:
1065 s->esr |= APIC_ESR_ILLEGAL_ADDRESS;
1066 return -1;
1067 }
1068
1069 return 0;
1070 }
1071
apic_mem_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1072 static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val,
1073 unsigned size)
1074 {
1075 int index = (addr >> 4) & 0xff;
1076
1077 if (size < 4) {
1078 return;
1079 }
1080
1081 if (addr > 0xfff || !index) {
1082 /*
1083 * MSI and MMIO APIC are at the same memory location,
1084 * but actually not on the global bus: MSI is on PCI bus
1085 * APIC is connected directly to the CPU.
1086 * Mapping them on the global bus happens to work because
1087 * MSI registers are reserved in APIC MMIO and vice versa.
1088 */
1089 MSIMessage msi = { .address = addr, .data = val };
1090 apic_send_msi(&msi);
1091 return;
1092 }
1093
1094 apic_register_write(index, val);
1095 }
1096
apic_msr_write(int index,uint64_t val)1097 int apic_msr_write(int index, uint64_t val)
1098 {
1099 DeviceState *dev;
1100
1101 dev = cpu_get_current_apic();
1102 if (!dev) {
1103 return -1;
1104 }
1105
1106 if (!is_x2apic_mode(dev)) {
1107 return -1;
1108 }
1109
1110 return apic_register_write(index, val);
1111 }
1112
apic_pre_save(APICCommonState * s)1113 static void apic_pre_save(APICCommonState *s)
1114 {
1115 apic_sync_vapic(s, SYNC_FROM_VAPIC);
1116 }
1117
apic_post_load(APICCommonState * s)1118 static void apic_post_load(APICCommonState *s)
1119 {
1120 if (s->timer_expiry != -1) {
1121 timer_mod(s->timer, s->timer_expiry);
1122 } else {
1123 timer_del(s->timer);
1124 }
1125 }
1126
1127 static const MemoryRegionOps apic_io_ops = {
1128 .read = apic_mem_read,
1129 .write = apic_mem_write,
1130 .impl.min_access_size = 1,
1131 .impl.max_access_size = 4,
1132 .valid.min_access_size = 1,
1133 .valid.max_access_size = 4,
1134 .endianness = DEVICE_NATIVE_ENDIAN,
1135 };
1136
apic_realize(DeviceState * dev,Error ** errp)1137 static void apic_realize(DeviceState *dev, Error **errp)
1138 {
1139 APICCommonState *s = APIC(dev);
1140
1141 if (kvm_enabled()) {
1142 warn_report("Userspace local APIC is deprecated for KVM.");
1143 warn_report("Do not use kernel-irqchip except for the -M isapc machine type.");
1144 }
1145
1146 memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi",
1147 APIC_SPACE_SIZE);
1148
1149 /*
1150 * apic-msi's apic_mem_write can call into ioapic_eoi_broadcast, which can
1151 * write back to apic-msi. As such mark the apic-msi region re-entrancy
1152 * safe.
1153 */
1154 s->io_memory.disable_reentrancy_guard = true;
1155
1156 s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s);
1157
1158 /*
1159 * The --machine none does not call apic_set_max_apic_id before creating
1160 * apic, so we need to call it here and set it to 1 which is the max cpus
1161 * in machine none.
1162 */
1163 if (!local_apics) {
1164 apic_set_max_apic_id(1);
1165 }
1166 local_apics[s->initial_apic_id] = s;
1167
1168 msi_nonbroken = true;
1169 }
1170
apic_unrealize(DeviceState * dev)1171 static void apic_unrealize(DeviceState *dev)
1172 {
1173 APICCommonState *s = APIC(dev);
1174
1175 timer_free(s->timer);
1176 local_apics[s->initial_apic_id] = NULL;
1177 }
1178
apic_class_init(ObjectClass * klass,const void * data)1179 static void apic_class_init(ObjectClass *klass, const void *data)
1180 {
1181 APICCommonClass *k = APIC_COMMON_CLASS(klass);
1182
1183 k->realize = apic_realize;
1184 k->unrealize = apic_unrealize;
1185 k->set_base = apic_set_base;
1186 k->set_tpr = apic_set_tpr;
1187 k->get_tpr = apic_get_tpr;
1188 k->vapic_base_update = apic_vapic_base_update;
1189 k->external_nmi = apic_external_nmi;
1190 k->pre_save = apic_pre_save;
1191 k->post_load = apic_post_load;
1192 k->send_msi = apic_send_msi;
1193 }
1194
1195 static const TypeInfo apic_info = {
1196 .name = TYPE_APIC,
1197 .instance_size = sizeof(APICCommonState),
1198 .parent = TYPE_APIC_COMMON,
1199 .class_init = apic_class_init,
1200 };
1201
apic_register_types(void)1202 static void apic_register_types(void)
1203 {
1204 type_register_static(&apic_info);
1205 }
1206
1207 type_init(apic_register_types)
1208