1 /* 2 * APIC support 3 * 4 * Copyright (c) 2004-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/> 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/thread.h" 21 #include "qemu/error-report.h" 22 #include "hw/i386/apic_internal.h" 23 #include "hw/i386/apic.h" 24 #include "hw/intc/ioapic.h" 25 #include "hw/intc/i8259.h" 26 #include "hw/intc/kvm_irqcount.h" 27 #include "hw/pci/msi.h" 28 #include "qemu/host-utils.h" 29 #include "sysemu/kvm.h" 30 #include "trace.h" 31 #include "hw/i386/apic-msidef.h" 32 #include "qapi/error.h" 33 #include "qom/object.h" 34 35 #define MAX_APICS 255 36 #define MAX_APIC_WORDS 8 37 38 #define SYNC_FROM_VAPIC 0x1 39 #define SYNC_TO_VAPIC 0x2 40 #define SYNC_ISR_IRR_TO_VAPIC 0x4 41 42 static APICCommonState *local_apics[MAX_APICS + 1]; 43 44 #define TYPE_APIC "apic" 45 /*This is reusing the APICCommonState typedef from APIC_COMMON */ 46 DECLARE_INSTANCE_CHECKER(APICCommonState, APIC, 47 TYPE_APIC) 48 49 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode); 50 static void apic_update_irq(APICCommonState *s); 51 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 52 uint8_t dest, uint8_t dest_mode); 53 54 /* Find first bit starting from msb */ 55 static int apic_fls_bit(uint32_t value) 56 { 57 return 31 - clz32(value); 58 } 59 60 /* Find first bit starting from lsb */ 61 static int apic_ffs_bit(uint32_t value) 62 { 63 return ctz32(value); 64 } 65 66 static inline void apic_reset_bit(uint32_t *tab, int index) 67 { 68 int i, mask; 69 i = index >> 5; 70 mask = 1 << (index & 0x1f); 71 tab[i] &= ~mask; 72 } 73 74 /* return -1 if no bit is set */ 75 static int get_highest_priority_int(uint32_t *tab) 76 { 77 int i; 78 for (i = 7; i >= 0; i--) { 79 if (tab[i] != 0) { 80 return i * 32 + apic_fls_bit(tab[i]); 81 } 82 } 83 return -1; 84 } 85 86 static void apic_sync_vapic(APICCommonState *s, int sync_type) 87 { 88 VAPICState vapic_state; 89 size_t length; 90 off_t start; 91 int vector; 92 93 if (!s->vapic_paddr) { 94 return; 95 } 96 if (sync_type & SYNC_FROM_VAPIC) { 97 cpu_physical_memory_read(s->vapic_paddr, &vapic_state, 98 sizeof(vapic_state)); 99 s->tpr = vapic_state.tpr; 100 } 101 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { 102 start = offsetof(VAPICState, isr); 103 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); 104 105 if (sync_type & SYNC_TO_VAPIC) { 106 assert(qemu_cpu_is_self(CPU(s->cpu))); 107 108 vapic_state.tpr = s->tpr; 109 vapic_state.enabled = 1; 110 start = 0; 111 length = sizeof(VAPICState); 112 } 113 114 vector = get_highest_priority_int(s->isr); 115 if (vector < 0) { 116 vector = 0; 117 } 118 vapic_state.isr = vector & 0xf0; 119 120 vapic_state.zero = 0; 121 122 vector = get_highest_priority_int(s->irr); 123 if (vector < 0) { 124 vector = 0; 125 } 126 vapic_state.irr = vector & 0xff; 127 128 address_space_write_rom(&address_space_memory, 129 s->vapic_paddr + start, 130 MEMTXATTRS_UNSPECIFIED, 131 ((void *)&vapic_state) + start, length); 132 } 133 } 134 135 static void apic_vapic_base_update(APICCommonState *s) 136 { 137 apic_sync_vapic(s, SYNC_TO_VAPIC); 138 } 139 140 static void apic_local_deliver(APICCommonState *s, int vector) 141 { 142 uint32_t lvt = s->lvt[vector]; 143 int trigger_mode; 144 145 trace_apic_local_deliver(vector, (lvt >> 8) & 7); 146 147 if (lvt & APIC_LVT_MASKED) 148 return; 149 150 switch ((lvt >> 8) & 7) { 151 case APIC_DM_SMI: 152 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI); 153 break; 154 155 case APIC_DM_NMI: 156 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI); 157 break; 158 159 case APIC_DM_EXTINT: 160 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); 161 break; 162 163 case APIC_DM_FIXED: 164 trigger_mode = APIC_TRIGGER_EDGE; 165 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) && 166 (lvt & APIC_LVT_LEVEL_TRIGGER)) 167 trigger_mode = APIC_TRIGGER_LEVEL; 168 apic_set_irq(s, lvt & 0xff, trigger_mode); 169 } 170 } 171 172 void apic_deliver_pic_intr(DeviceState *dev, int level) 173 { 174 APICCommonState *s = APIC(dev); 175 176 if (level) { 177 apic_local_deliver(s, APIC_LVT_LINT0); 178 } else { 179 uint32_t lvt = s->lvt[APIC_LVT_LINT0]; 180 181 switch ((lvt >> 8) & 7) { 182 case APIC_DM_FIXED: 183 if (!(lvt & APIC_LVT_LEVEL_TRIGGER)) 184 break; 185 apic_reset_bit(s->irr, lvt & 0xff); 186 /* fall through */ 187 case APIC_DM_EXTINT: 188 apic_update_irq(s); 189 break; 190 } 191 } 192 } 193 194 static void apic_external_nmi(APICCommonState *s) 195 { 196 apic_local_deliver(s, APIC_LVT_LINT1); 197 } 198 199 #define foreach_apic(apic, deliver_bitmask, code) \ 200 {\ 201 int __i, __j;\ 202 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ 203 uint32_t __mask = deliver_bitmask[__i];\ 204 if (__mask) {\ 205 for(__j = 0; __j < 32; __j++) {\ 206 if (__mask & (1U << __j)) {\ 207 apic = local_apics[__i * 32 + __j];\ 208 if (apic) {\ 209 code;\ 210 }\ 211 }\ 212 }\ 213 }\ 214 }\ 215 } 216 217 static void apic_bus_deliver(const uint32_t *deliver_bitmask, 218 uint8_t delivery_mode, uint8_t vector_num, 219 uint8_t trigger_mode) 220 { 221 APICCommonState *apic_iter; 222 223 switch (delivery_mode) { 224 case APIC_DM_LOWPRI: 225 /* XXX: search for focus processor, arbitration */ 226 { 227 int i, d; 228 d = -1; 229 for(i = 0; i < MAX_APIC_WORDS; i++) { 230 if (deliver_bitmask[i]) { 231 d = i * 32 + apic_ffs_bit(deliver_bitmask[i]); 232 break; 233 } 234 } 235 if (d >= 0) { 236 apic_iter = local_apics[d]; 237 if (apic_iter) { 238 apic_set_irq(apic_iter, vector_num, trigger_mode); 239 } 240 } 241 } 242 return; 243 244 case APIC_DM_FIXED: 245 break; 246 247 case APIC_DM_SMI: 248 foreach_apic(apic_iter, deliver_bitmask, 249 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI) 250 ); 251 return; 252 253 case APIC_DM_NMI: 254 foreach_apic(apic_iter, deliver_bitmask, 255 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI) 256 ); 257 return; 258 259 case APIC_DM_INIT: 260 /* normal INIT IPI sent to processors */ 261 foreach_apic(apic_iter, deliver_bitmask, 262 cpu_interrupt(CPU(apic_iter->cpu), 263 CPU_INTERRUPT_INIT) 264 ); 265 return; 266 267 case APIC_DM_EXTINT: 268 /* handled in I/O APIC code */ 269 break; 270 271 default: 272 return; 273 } 274 275 foreach_apic(apic_iter, deliver_bitmask, 276 apic_set_irq(apic_iter, vector_num, trigger_mode) ); 277 } 278 279 void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, 280 uint8_t vector_num, uint8_t trigger_mode) 281 { 282 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 283 284 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num, 285 trigger_mode); 286 287 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 288 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 289 } 290 291 bool is_x2apic_mode(DeviceState *dev) 292 { 293 APICCommonState *s = APIC(dev); 294 295 return s->apicbase & MSR_IA32_APICBASE_EXTD; 296 } 297 298 static void apic_set_base(APICCommonState *s, uint64_t val) 299 { 300 s->apicbase = (val & 0xfffff000) | 301 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); 302 /* if disabled, cannot be enabled again */ 303 if (!(val & MSR_IA32_APICBASE_ENABLE)) { 304 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; 305 cpu_clear_apic_feature(&s->cpu->env); 306 s->spurious_vec &= ~APIC_SV_ENABLE; 307 } 308 } 309 310 static void apic_set_tpr(APICCommonState *s, uint8_t val) 311 { 312 /* Updates from cr8 are ignored while the VAPIC is active */ 313 if (!s->vapic_paddr) { 314 s->tpr = val << 4; 315 apic_update_irq(s); 316 } 317 } 318 319 int apic_get_highest_priority_irr(DeviceState *dev) 320 { 321 APICCommonState *s; 322 323 if (!dev) { 324 /* no interrupts */ 325 return -1; 326 } 327 s = APIC_COMMON(dev); 328 return get_highest_priority_int(s->irr); 329 } 330 331 static uint8_t apic_get_tpr(APICCommonState *s) 332 { 333 apic_sync_vapic(s, SYNC_FROM_VAPIC); 334 return s->tpr >> 4; 335 } 336 337 int apic_get_ppr(APICCommonState *s) 338 { 339 int tpr, isrv, ppr; 340 341 tpr = (s->tpr >> 4); 342 isrv = get_highest_priority_int(s->isr); 343 if (isrv < 0) 344 isrv = 0; 345 isrv >>= 4; 346 if (tpr >= isrv) 347 ppr = s->tpr; 348 else 349 ppr = isrv << 4; 350 return ppr; 351 } 352 353 static int apic_get_arb_pri(APICCommonState *s) 354 { 355 /* XXX: arbitration */ 356 return 0; 357 } 358 359 360 /* 361 * <0 - low prio interrupt, 362 * 0 - no interrupt, 363 * >0 - interrupt number 364 */ 365 static int apic_irq_pending(APICCommonState *s) 366 { 367 int irrv, ppr; 368 369 if (!(s->spurious_vec & APIC_SV_ENABLE)) { 370 return 0; 371 } 372 373 irrv = get_highest_priority_int(s->irr); 374 if (irrv < 0) { 375 return 0; 376 } 377 ppr = apic_get_ppr(s); 378 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) { 379 return -1; 380 } 381 382 return irrv; 383 } 384 385 /* signal the CPU if an irq is pending */ 386 static void apic_update_irq(APICCommonState *s) 387 { 388 CPUState *cpu; 389 DeviceState *dev = (DeviceState *)s; 390 391 cpu = CPU(s->cpu); 392 if (!qemu_cpu_is_self(cpu)) { 393 cpu_interrupt(cpu, CPU_INTERRUPT_POLL); 394 } else if (apic_irq_pending(s) > 0) { 395 cpu_interrupt(cpu, CPU_INTERRUPT_HARD); 396 } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { 397 cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); 398 } 399 } 400 401 void apic_poll_irq(DeviceState *dev) 402 { 403 APICCommonState *s = APIC(dev); 404 405 apic_sync_vapic(s, SYNC_FROM_VAPIC); 406 apic_update_irq(s); 407 } 408 409 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode) 410 { 411 kvm_report_irq_delivered(!apic_get_bit(s->irr, vector_num)); 412 413 apic_set_bit(s->irr, vector_num); 414 if (trigger_mode) 415 apic_set_bit(s->tmr, vector_num); 416 else 417 apic_reset_bit(s->tmr, vector_num); 418 if (s->vapic_paddr) { 419 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC); 420 /* 421 * The vcpu thread needs to see the new IRR before we pull its current 422 * TPR value. That way, if we miss a lowering of the TRP, the guest 423 * has the chance to notice the new IRR and poll for IRQs on its own. 424 */ 425 smp_wmb(); 426 apic_sync_vapic(s, SYNC_FROM_VAPIC); 427 } 428 apic_update_irq(s); 429 } 430 431 static void apic_eoi(APICCommonState *s) 432 { 433 int isrv; 434 isrv = get_highest_priority_int(s->isr); 435 if (isrv < 0) 436 return; 437 apic_reset_bit(s->isr, isrv); 438 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) { 439 ioapic_eoi_broadcast(isrv); 440 } 441 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC); 442 apic_update_irq(s); 443 } 444 445 static int apic_find_dest(uint8_t dest) 446 { 447 APICCommonState *apic = local_apics[dest]; 448 int i; 449 450 if (apic && apic->id == dest) 451 return dest; /* shortcut in case apic->id == local_apics[dest]->id */ 452 453 for (i = 0; i < MAX_APICS; i++) { 454 apic = local_apics[i]; 455 if (apic && apic->id == dest) 456 return i; 457 if (!apic) 458 break; 459 } 460 461 return -1; 462 } 463 464 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 465 uint8_t dest, uint8_t dest_mode) 466 { 467 APICCommonState *apic_iter; 468 int i; 469 470 if (dest_mode == 0) { 471 if (dest == 0xff) { 472 memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t)); 473 } else { 474 int idx = apic_find_dest(dest); 475 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 476 if (idx >= 0) 477 apic_set_bit(deliver_bitmask, idx); 478 } 479 } else { 480 /* XXX: cluster mode */ 481 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 482 for(i = 0; i < MAX_APICS; i++) { 483 apic_iter = local_apics[i]; 484 if (apic_iter) { 485 if (apic_iter->dest_mode == 0xf) { 486 if (dest & apic_iter->log_dest) 487 apic_set_bit(deliver_bitmask, i); 488 } else if (apic_iter->dest_mode == 0x0) { 489 if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) && 490 (dest & apic_iter->log_dest & 0x0f)) { 491 apic_set_bit(deliver_bitmask, i); 492 } 493 } 494 } else { 495 break; 496 } 497 } 498 } 499 } 500 501 static void apic_startup(APICCommonState *s, int vector_num) 502 { 503 s->sipi_vector = vector_num; 504 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 505 } 506 507 void apic_sipi(DeviceState *dev) 508 { 509 APICCommonState *s = APIC(dev); 510 511 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 512 513 if (!s->wait_for_sipi) 514 return; 515 cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector); 516 s->wait_for_sipi = 0; 517 } 518 519 static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode, 520 uint8_t delivery_mode, uint8_t vector_num, 521 uint8_t trigger_mode) 522 { 523 APICCommonState *s = APIC(dev); 524 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 525 int dest_shorthand = (s->icr[0] >> 18) & 3; 526 APICCommonState *apic_iter; 527 528 switch (dest_shorthand) { 529 case 0: 530 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 531 break; 532 case 1: 533 memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask)); 534 apic_set_bit(deliver_bitmask, s->id); 535 break; 536 case 2: 537 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 538 break; 539 case 3: 540 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 541 apic_reset_bit(deliver_bitmask, s->id); 542 break; 543 } 544 545 switch (delivery_mode) { 546 case APIC_DM_INIT: 547 { 548 int trig_mode = (s->icr[0] >> 15) & 1; 549 int level = (s->icr[0] >> 14) & 1; 550 if (level == 0 && trig_mode == 1) { 551 foreach_apic(apic_iter, deliver_bitmask, 552 apic_iter->arb_id = apic_iter->id ); 553 return; 554 } 555 } 556 break; 557 558 case APIC_DM_SIPI: 559 foreach_apic(apic_iter, deliver_bitmask, 560 apic_startup(apic_iter, vector_num) ); 561 return; 562 } 563 564 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 565 } 566 567 static bool apic_check_pic(APICCommonState *s) 568 { 569 DeviceState *dev = (DeviceState *)s; 570 571 if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { 572 return false; 573 } 574 apic_deliver_pic_intr(dev, 1); 575 return true; 576 } 577 578 int apic_get_interrupt(DeviceState *dev) 579 { 580 APICCommonState *s = APIC(dev); 581 int intno; 582 583 /* if the APIC is installed or enabled, we let the 8259 handle the 584 IRQs */ 585 if (!s) 586 return -1; 587 if (!(s->spurious_vec & APIC_SV_ENABLE)) 588 return -1; 589 590 apic_sync_vapic(s, SYNC_FROM_VAPIC); 591 intno = apic_irq_pending(s); 592 593 /* if there is an interrupt from the 8259, let the caller handle 594 * that first since ExtINT interrupts ignore the priority. 595 */ 596 if (intno == 0 || apic_check_pic(s)) { 597 apic_sync_vapic(s, SYNC_TO_VAPIC); 598 return -1; 599 } else if (intno < 0) { 600 apic_sync_vapic(s, SYNC_TO_VAPIC); 601 return s->spurious_vec & 0xff; 602 } 603 apic_reset_bit(s->irr, intno); 604 apic_set_bit(s->isr, intno); 605 apic_sync_vapic(s, SYNC_TO_VAPIC); 606 607 apic_update_irq(s); 608 609 return intno; 610 } 611 612 int apic_accept_pic_intr(DeviceState *dev) 613 { 614 APICCommonState *s = APIC(dev); 615 uint32_t lvt0; 616 617 if (!s) 618 return -1; 619 620 lvt0 = s->lvt[APIC_LVT_LINT0]; 621 622 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 || 623 (lvt0 & APIC_LVT_MASKED) == 0) 624 return isa_pic != NULL; 625 626 return 0; 627 } 628 629 static void apic_timer_update(APICCommonState *s, int64_t current_time) 630 { 631 if (apic_next_timer(s, current_time)) { 632 timer_mod(s->timer, s->next_time); 633 } else { 634 timer_del(s->timer); 635 } 636 } 637 638 static void apic_timer(void *opaque) 639 { 640 APICCommonState *s = opaque; 641 642 apic_local_deliver(s, APIC_LVT_TIMER); 643 apic_timer_update(s, s->next_time); 644 } 645 646 static int apic_register_read(int index, uint64_t *value) 647 { 648 DeviceState *dev; 649 APICCommonState *s; 650 uint32_t val; 651 int ret = 0; 652 653 dev = cpu_get_current_apic(); 654 if (!dev) { 655 return -1; 656 } 657 s = APIC(dev); 658 659 switch(index) { 660 case 0x02: /* id */ 661 val = s->id << 24; 662 break; 663 case 0x03: /* version */ 664 val = s->version | ((APIC_LVT_NB - 1) << 16); 665 break; 666 case 0x08: 667 apic_sync_vapic(s, SYNC_FROM_VAPIC); 668 if (apic_report_tpr_access) { 669 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ); 670 } 671 val = s->tpr; 672 break; 673 case 0x09: 674 val = apic_get_arb_pri(s); 675 break; 676 case 0x0a: 677 /* ppr */ 678 val = apic_get_ppr(s); 679 break; 680 case 0x0b: 681 val = 0; 682 break; 683 case 0x0d: 684 val = s->log_dest << 24; 685 break; 686 case 0x0e: 687 val = (s->dest_mode << 28) | 0xfffffff; 688 break; 689 case 0x0f: 690 val = s->spurious_vec; 691 break; 692 case 0x10 ... 0x17: 693 val = s->isr[index & 7]; 694 break; 695 case 0x18 ... 0x1f: 696 val = s->tmr[index & 7]; 697 break; 698 case 0x20 ... 0x27: 699 val = s->irr[index & 7]; 700 break; 701 case 0x28: 702 val = s->esr; 703 break; 704 case 0x30: 705 case 0x31: 706 val = s->icr[index & 1]; 707 break; 708 case 0x32 ... 0x37: 709 val = s->lvt[index - 0x32]; 710 break; 711 case 0x38: 712 val = s->initial_count; 713 break; 714 case 0x39: 715 val = apic_get_current_count(s); 716 break; 717 case 0x3e: 718 val = s->divide_conf; 719 break; 720 default: 721 s->esr |= APIC_ESR_ILLEGAL_ADDRESS; 722 val = 0; 723 ret = -1; 724 break; 725 } 726 727 trace_apic_register_read(index, val); 728 *value = val; 729 return ret; 730 } 731 732 static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) 733 { 734 uint64_t val; 735 int index; 736 737 if (size < 4) { 738 return 0; 739 } 740 741 index = (addr >> 4) & 0xff; 742 apic_register_read(index, &val); 743 744 return val; 745 } 746 747 int apic_msr_read(int index, uint64_t *val) 748 { 749 DeviceState *dev; 750 751 dev = cpu_get_current_apic(); 752 if (!dev) { 753 return -1; 754 } 755 756 if (!is_x2apic_mode(dev)) { 757 return -1; 758 } 759 760 return apic_register_read(index, val); 761 } 762 763 static void apic_send_msi(MSIMessage *msi) 764 { 765 uint64_t addr = msi->address; 766 uint32_t data = msi->data; 767 uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 768 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 769 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 770 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 771 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; 772 /* XXX: Ignore redirection hint. */ 773 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); 774 } 775 776 static int apic_register_write(int index, uint64_t val) 777 { 778 DeviceState *dev; 779 APICCommonState *s; 780 781 dev = cpu_get_current_apic(); 782 if (!dev) { 783 return -1; 784 } 785 s = APIC(dev); 786 787 trace_apic_register_write(index, val); 788 789 switch(index) { 790 case 0x02: 791 s->id = (val >> 24); 792 break; 793 case 0x03: 794 break; 795 case 0x08: 796 if (apic_report_tpr_access) { 797 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE); 798 } 799 s->tpr = val; 800 apic_sync_vapic(s, SYNC_TO_VAPIC); 801 apic_update_irq(s); 802 break; 803 case 0x09: 804 case 0x0a: 805 break; 806 case 0x0b: /* EOI */ 807 apic_eoi(s); 808 break; 809 case 0x0d: 810 s->log_dest = val >> 24; 811 break; 812 case 0x0e: 813 s->dest_mode = val >> 28; 814 break; 815 case 0x0f: 816 s->spurious_vec = val & 0x1ff; 817 apic_update_irq(s); 818 break; 819 case 0x10 ... 0x17: 820 case 0x18 ... 0x1f: 821 case 0x20 ... 0x27: 822 case 0x28: 823 break; 824 case 0x30: 825 s->icr[0] = val; 826 apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1, 827 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), 828 (s->icr[0] >> 15) & 1); 829 break; 830 case 0x31: 831 s->icr[1] = val; 832 break; 833 case 0x32 ... 0x37: 834 { 835 int n = index - 0x32; 836 s->lvt[n] = val; 837 if (n == APIC_LVT_TIMER) { 838 apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 839 } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) { 840 apic_update_irq(s); 841 } 842 } 843 break; 844 case 0x38: 845 s->initial_count = val; 846 s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 847 apic_timer_update(s, s->initial_count_load_time); 848 break; 849 case 0x39: 850 break; 851 case 0x3e: 852 { 853 int v; 854 s->divide_conf = val & 0xb; 855 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4); 856 s->count_shift = (v + 1) & 7; 857 } 858 break; 859 default: 860 s->esr |= APIC_ESR_ILLEGAL_ADDRESS; 861 return -1; 862 } 863 864 return 0; 865 } 866 867 static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, 868 unsigned size) 869 { 870 int index = (addr >> 4) & 0xff; 871 872 if (size < 4) { 873 return; 874 } 875 876 if (addr > 0xfff || !index) { 877 /* 878 * MSI and MMIO APIC are at the same memory location, 879 * but actually not on the global bus: MSI is on PCI bus 880 * APIC is connected directly to the CPU. 881 * Mapping them on the global bus happens to work because 882 * MSI registers are reserved in APIC MMIO and vice versa. 883 */ 884 MSIMessage msi = { .address = addr, .data = val }; 885 apic_send_msi(&msi); 886 return; 887 } 888 889 apic_register_write(index, val); 890 } 891 892 int apic_msr_write(int index, uint64_t val) 893 { 894 DeviceState *dev; 895 896 dev = cpu_get_current_apic(); 897 if (!dev) { 898 return -1; 899 } 900 901 if (!is_x2apic_mode(dev)) { 902 return -1; 903 } 904 905 return apic_register_write(index, val); 906 } 907 908 static void apic_pre_save(APICCommonState *s) 909 { 910 apic_sync_vapic(s, SYNC_FROM_VAPIC); 911 } 912 913 static void apic_post_load(APICCommonState *s) 914 { 915 if (s->timer_expiry != -1) { 916 timer_mod(s->timer, s->timer_expiry); 917 } else { 918 timer_del(s->timer); 919 } 920 } 921 922 static const MemoryRegionOps apic_io_ops = { 923 .read = apic_mem_read, 924 .write = apic_mem_write, 925 .impl.min_access_size = 1, 926 .impl.max_access_size = 4, 927 .valid.min_access_size = 1, 928 .valid.max_access_size = 4, 929 .endianness = DEVICE_NATIVE_ENDIAN, 930 }; 931 932 static void apic_realize(DeviceState *dev, Error **errp) 933 { 934 APICCommonState *s = APIC(dev); 935 936 if (s->id >= MAX_APICS) { 937 error_setg(errp, "%s initialization failed. APIC ID %d is invalid", 938 object_get_typename(OBJECT(dev)), s->id); 939 return; 940 } 941 942 if (kvm_enabled()) { 943 warn_report("Userspace local APIC is deprecated for KVM."); 944 warn_report("Do not use kernel-irqchip except for the -M isapc machine type."); 945 } 946 947 memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", 948 APIC_SPACE_SIZE); 949 950 /* 951 * apic-msi's apic_mem_write can call into ioapic_eoi_broadcast, which can 952 * write back to apic-msi. As such mark the apic-msi region re-entrancy 953 * safe. 954 */ 955 s->io_memory.disable_reentrancy_guard = true; 956 957 s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); 958 local_apics[s->id] = s; 959 960 msi_nonbroken = true; 961 } 962 963 static void apic_unrealize(DeviceState *dev) 964 { 965 APICCommonState *s = APIC(dev); 966 967 timer_free(s->timer); 968 local_apics[s->id] = NULL; 969 } 970 971 static void apic_class_init(ObjectClass *klass, void *data) 972 { 973 APICCommonClass *k = APIC_COMMON_CLASS(klass); 974 975 k->realize = apic_realize; 976 k->unrealize = apic_unrealize; 977 k->set_base = apic_set_base; 978 k->set_tpr = apic_set_tpr; 979 k->get_tpr = apic_get_tpr; 980 k->vapic_base_update = apic_vapic_base_update; 981 k->external_nmi = apic_external_nmi; 982 k->pre_save = apic_pre_save; 983 k->post_load = apic_post_load; 984 k->send_msi = apic_send_msi; 985 } 986 987 static const TypeInfo apic_info = { 988 .name = TYPE_APIC, 989 .instance_size = sizeof(APICCommonState), 990 .parent = TYPE_APIC_COMMON, 991 .class_init = apic_class_init, 992 }; 993 994 static void apic_register_types(void) 995 { 996 type_register_static(&apic_info); 997 } 998 999 type_init(apic_register_types) 1000