Lines Matching refs:apic

30 #include <asm/apic.h>
77 static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
78 static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
80 static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
82 apic_set_reg(apic->regs, reg_off, val);
85 static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
87 return apic_get_reg64(apic->regs, reg);
90 static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
93 apic_set_reg64(apic->regs, reg, val);
98 struct kvm_lapic *apic = vcpu->arch.apic;
100 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
101 apic_test_vector(vector, apic->regs + APIC_IRR);
110 static inline int apic_enabled(struct kvm_lapic *apic)
112 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
122 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
124 return apic->vcpu->vcpu_id;
194 struct kvm_lapic *apic = vcpu->arch.apic;
195 u32 x2apic_id = kvm_x2apic_id(apic);
196 u32 xapic_id = kvm_xapic_id(apic);
222 if (!apic_x2apic_mode(apic) && xapic_id != (u8)vcpu->vcpu_id)
241 if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
242 new->phys_map[x2apic_id] = apic;
244 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
245 new->phys_map[xapic_id] = apic;
252 if (apic_x2apic_mode(apic))
260 new->phys_map[physical_id] = apic;
269 struct kvm_lapic *apic = vcpu->arch.apic;
278 if (!kvm_apic_sw_enabled(apic))
281 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
285 if (apic_x2apic_mode(apic)) {
289 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
313 if (apic_x2apic_mode(apic))
329 cluster[ldr] = apic;
388 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
443 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
456 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
460 kvm_lapic_set_reg(apic, APIC_SPIV, val);
462 if (enabled != apic->sw_enabled) {
463 apic->sw_enabled = enabled;
469 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
474 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
475 kvm_xen_sw_enable_lapic(apic->vcpu);
479 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
481 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
482 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
485 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
487 kvm_lapic_set_reg(apic, APIC_LDR, id);
488 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
491 static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
493 kvm_lapic_set_reg(apic, APIC_DFR, val);
494 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
497 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
501 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
503 kvm_lapic_set_reg(apic, APIC_ID, id);
504 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
505 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
508 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
510 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
513 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
515 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
518 static inline int apic_lvtt_period(struct kvm_lapic *apic)
520 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
523 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
525 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
533 static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
535 return apic->nr_lvt_entries > lvt_index;
545 struct kvm_lapic *apic = vcpu->arch.apic;
551 v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
563 kvm_lapic_set_reg(apic, APIC_LVR, v);
569 struct kvm_lapic *apic = vcpu->arch.apic;
572 if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
576 for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
577 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
579 apic->nr_lvt_entries = nr_lvt_entries;
649 struct kvm_lapic *apic = vcpu->arch.apic;
650 bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
652 if (unlikely(!apic->apicv_active && irr_updated))
653 apic->irr_pending = true;
658 static inline int apic_search_irr(struct kvm_lapic *apic)
660 return apic_find_highest_vector(apic->regs + APIC_IRR);
663 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
671 if (!apic->irr_pending)
674 result = apic_search_irr(apic);
680 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
682 if (unlikely(apic->apicv_active)) {
683 apic_clear_vector(vec, apic->regs + APIC_IRR);
685 apic->irr_pending = false;
686 apic_clear_vector(vec, apic->regs + APIC_IRR);
687 if (apic_search_irr(apic) != -1)
688 apic->irr_pending = true;
694 apic_clear_irr(vec, vcpu->arch.apic);
698 static void *apic_vector_to_isr(int vec, struct kvm_lapic *apic)
700 return apic->regs + APIC_ISR + APIC_VECTOR_TO_REG_OFFSET(vec);
703 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
706 apic_vector_to_isr(vec, apic)))
714 if (unlikely(apic->apicv_active))
715 kvm_x86_call(hwapic_isr_update)(apic->vcpu, vec);
717 ++apic->isr_count;
718 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
724 apic->highest_isr_cache = vec;
728 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
736 if (!apic->isr_count)
738 if (likely(apic->highest_isr_cache != -1))
739 return apic->highest_isr_cache;
741 result = apic_find_highest_vector(apic->regs + APIC_ISR);
747 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
750 apic_vector_to_isr(vec, apic)))
760 if (unlikely(apic->apicv_active))
761 kvm_x86_call(hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic));
763 --apic->isr_count;
764 BUG_ON(apic->isr_count < 0);
765 apic->highest_isr_cache = -1;
771 struct kvm_lapic *apic = vcpu->arch.apic;
773 if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active)
776 kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
787 return apic_find_highest_irr(vcpu->arch.apic);
791 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
798 struct kvm_lapic *apic = vcpu->arch.apic;
800 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
904 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
908 highest_irr = kvm_x86_call(sync_pir_to_irr)(apic->vcpu);
910 highest_irr = apic_find_highest_irr(apic);
916 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
921 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
922 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
923 isr = apic_find_highest_isr(apic);
933 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
938 static void apic_update_ppr(struct kvm_lapic *apic)
942 if (__apic_update_ppr(apic, &ppr) &&
943 apic_has_interrupt_for_ppr(apic, ppr) != -1)
944 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
949 apic_update_ppr(vcpu->arch.apic);
953 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
955 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
956 apic_update_ppr(apic);
959 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
961 return mda == (apic_x2apic_mode(apic) ?
965 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
967 if (kvm_apic_broadcast(apic, mda))
979 if (apic_x2apic_mode(apic) || mda > 0xff)
980 return mda == kvm_x2apic_id(apic);
982 return mda == kvm_xapic_id(apic);
985 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
989 if (kvm_apic_broadcast(apic, mda))
992 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
994 if (apic_x2apic_mode(apic))
1000 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
1042 struct kvm_lapic *target = vcpu->arch.apic;
1263 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1268 struct kvm_vcpu *vcpu = apic->vcpu;
1281 if (unlikely(!apic_enabled(apic)))
1291 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1293 apic_set_vector(vector, apic->regs + APIC_TMR);
1295 apic_clear_vector(vector, apic->regs + APIC_TMR);
1298 kvm_x86_call(deliver_interrupt)(apic, delivery_mode,
1326 apic->pending_events = (1UL << KVM_APIC_INIT);
1334 apic->sipi_vector = vector;
1337 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1407 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1409 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1412 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1417 if (!kvm_ioapic_handles_vector(apic, vector))
1425 if (apic->vcpu->arch.highest_stale_pending_ioapic_eoi == vector)
1426 kvm_make_request(KVM_REQ_SCAN_IOAPIC, apic->vcpu);
1429 if (irqchip_split(apic->vcpu->kvm)) {
1430 apic->vcpu->arch.pending_ioapic_eoi = vector;
1431 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1436 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1441 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1445 static int apic_set_eoi(struct kvm_lapic *apic)
1447 int vector = apic_find_highest_isr(apic);
1449 trace_kvm_eoi(apic, vector);
1458 apic_clear_isr(vector, apic);
1459 apic_update_ppr(apic);
1461 if (kvm_hv_synic_has_vector(apic->vcpu, vector))
1462 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1464 kvm_ioapic_send_eoi(apic, vector);
1465 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1475 struct kvm_lapic *apic = vcpu->arch.apic;
1477 trace_kvm_eoi(apic, vector);
1479 kvm_ioapic_send_eoi(apic, vector);
1480 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1484 void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1498 if (apic_x2apic_mode(apic))
1505 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1509 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1514 ASSERT(apic != NULL);
1517 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1518 apic->lapic_timer.period == 0)
1522 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1526 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1527 return div64_u64(ns, (apic->vcpu->kvm->arch.apic_bus_cycle_ns *
1528 apic->divide_count));
1531 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1533 struct kvm_vcpu *vcpu = apic->vcpu;
1541 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1543 if (apic->vcpu->arch.tpr_access_reporting)
1544 __report_tpr_access(apic, write);
1547 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1559 if (apic_lvtt_tscdeadline(apic))
1562 val = apic_get_tmcct(apic);
1565 apic_update_ppr(apic);
1566 val = kvm_lapic_get_reg(apic, offset);
1569 report_tpr_access(apic, false);
1572 val = kvm_lapic_get_reg(apic, offset);
1588 u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
1613 if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1617 if (!apic_x2apic_mode(apic))
1626 static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1636 WARN_ON_ONCE(apic_x2apic_mode(apic) && offset == APIC_ICR);
1642 !(kvm_lapic_readable_reg_mask(apic) & APIC_REG_MASK(offset)))
1645 result = __apic_read(apic, offset & ~0xf);
1663 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1665 return addr >= apic->base_address &&
1666 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1672 struct kvm_lapic *apic = to_lapic(this);
1673 u32 offset = address - apic->base_address;
1675 if (!apic_mmio_in_range(apic, address))
1678 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1687 kvm_lapic_reg_read(apic, offset, len, data);
1692 static void update_divide_count(struct kvm_lapic *apic)
1696 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1699 apic->divide_count = 0x1 << (tmp2 & 0x7);
1702 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1709 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1712 if (apic->lapic_timer.period < min_period) {
1716 apic->vcpu->vcpu_id,
1717 apic->lapic_timer.period, min_period);
1718 apic->lapic_timer.period = min_period;
1723 static void cancel_hv_timer(struct kvm_lapic *apic);
1725 static void cancel_apic_timer(struct kvm_lapic *apic)
1727 hrtimer_cancel(&apic->lapic_timer.timer);
1729 if (apic->lapic_timer.hv_timer_in_use)
1730 cancel_hv_timer(apic);
1732 atomic_set(&apic->lapic_timer.pending, 0);
1735 static void apic_update_lvtt(struct kvm_lapic *apic)
1737 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1738 apic->lapic_timer.timer_mode_mask;
1740 if (apic->lapic_timer.timer_mode != timer_mode) {
1741 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1743 cancel_apic_timer(apic);
1744 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1745 apic->lapic_timer.period = 0;
1746 apic->lapic_timer.tscdeadline = 0;
1748 apic->lapic_timer.timer_mode = timer_mode;
1749 limit_periodic_timer_frequency(apic);
1760 struct kvm_lapic *apic = vcpu->arch.apic;
1768 if (apic->guest_apic_protected)
1771 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1772 if (kvm_apic_hw_enabled(apic)) {
1774 void *bitmap = apic->regs + APIC_ISR;
1776 if (apic->apicv_active)
1777 bitmap = apic->regs + APIC_IRR;
1787 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1808 struct kvm_lapic *apic = vcpu->arch.apic;
1809 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1831 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1836 struct kvm_lapic *apic = vcpu->arch.apic;
1839 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1840 apic->lapic_timer.expired_tscdeadline = 0;
1860 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1861 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1867 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1869 struct kvm_timer *ktimer = &apic->lapic_timer;
1871 kvm_apic_local_deliver(apic, APIC_LVTT);
1872 if (apic_lvtt_tscdeadline(apic)) {
1874 } else if (apic_lvtt_oneshot(apic)) {
1880 static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1882 struct kvm_vcpu *vcpu = apic->vcpu;
1883 struct kvm_timer *ktimer = &apic->lapic_timer;
1885 if (atomic_read(&apic->lapic_timer.pending))
1888 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1891 if (!from_timer_fn && apic->apicv_active) {
1893 kvm_apic_inject_pending_timer_irqs(apic);
1897 if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1905 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1906 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1908 kvm_apic_inject_pending_timer_irqs(apic);
1912 atomic_inc(&apic->lapic_timer.pending);
1918 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1920 struct kvm_timer *ktimer = &apic->lapic_timer;
1924 struct kvm_vcpu *vcpu = apic->vcpu;
1941 likely(ns > apic->lapic_timer.timer_advance_ns)) {
1946 apic_timer_expired(apic, false);
1951 static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1953 return (u64)tmict * apic->vcpu->kvm->arch.apic_bus_cycle_ns *
1954 (u64)apic->divide_count;
1957 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1962 apic->lapic_timer.period =
1963 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1964 limit_periodic_timer_frequency(apic);
1967 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1973 apic->divide_count, old_divisor);
1975 apic->lapic_timer.tscdeadline +=
1976 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1977 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1978 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1981 static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1988 apic->lapic_timer.period =
1989 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1991 if (!apic->lapic_timer.period) {
1992 apic->lapic_timer.tscdeadline = 0;
1996 limit_periodic_timer_frequency(apic);
1997 deadline = apic->lapic_timer.period;
1999 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
2001 deadline = tmict_to_ns(apic,
2002 kvm_lapic_get_reg(apic, count_reg));
2004 if (apic_lvtt_period(apic))
2005 deadline = apic->lapic_timer.period;
2009 else if (unlikely(deadline > apic->lapic_timer.period)) {
2014 apic->vcpu->vcpu_id,
2016 kvm_lapic_get_reg(apic, count_reg),
2017 deadline, apic->lapic_timer.period);
2018 kvm_lapic_set_reg(apic, count_reg, 0);
2019 deadline = apic->lapic_timer.period;
2024 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2025 nsec_to_cycles(apic->vcpu, deadline);
2026 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
2031 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
2044 apic->lapic_timer.target_expiration =
2045 ktime_add_ns(apic->lapic_timer.target_expiration,
2046 apic->lapic_timer.period);
2047 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
2048 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2049 nsec_to_cycles(apic->vcpu, delta);
2052 static void start_sw_period(struct kvm_lapic *apic)
2054 if (!apic->lapic_timer.period)
2058 apic->lapic_timer.target_expiration)) {
2059 apic_timer_expired(apic, false);
2061 if (apic_lvtt_oneshot(apic))
2064 advance_periodic_target_expiration(apic);
2067 hrtimer_start(&apic->lapic_timer.timer,
2068 apic->lapic_timer.target_expiration,
2077 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2080 static void cancel_hv_timer(struct kvm_lapic *apic)
2083 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2084 kvm_x86_call(cancel_hv_timer)(apic->vcpu);
2085 apic->lapic_timer.hv_timer_in_use = false;
2088 static bool start_hv_timer(struct kvm_lapic *apic)
2090 struct kvm_timer *ktimer = &apic->lapic_timer;
2091 struct kvm_vcpu *vcpu = apic->vcpu;
2112 if (!apic_lvtt_period(apic)) {
2118 cancel_hv_timer(apic);
2120 apic_timer_expired(apic, false);
2121 cancel_hv_timer(apic);
2130 static void start_sw_timer(struct kvm_lapic *apic)
2132 struct kvm_timer *ktimer = &apic->lapic_timer;
2135 if (apic->lapic_timer.hv_timer_in_use)
2136 cancel_hv_timer(apic);
2137 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
2140 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2141 start_sw_period(apic);
2142 else if (apic_lvtt_tscdeadline(apic))
2143 start_sw_tscdeadline(apic);
2144 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
2147 static void restart_apic_timer(struct kvm_lapic *apic)
2151 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
2154 if (!start_hv_timer(apic))
2155 start_sw_timer(apic);
2162 struct kvm_lapic *apic = vcpu->arch.apic;
2166 if (!apic->lapic_timer.hv_timer_in_use)
2169 apic_timer_expired(apic, false);
2170 cancel_hv_timer(apic);
2172 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2173 advance_periodic_target_expiration(apic);
2174 restart_apic_timer(apic);
2183 restart_apic_timer(vcpu->arch.apic);
2188 struct kvm_lapic *apic = vcpu->arch.apic;
2192 if (apic->lapic_timer.hv_timer_in_use)
2193 start_sw_timer(apic);
2199 struct kvm_lapic *apic = vcpu->arch.apic;
2201 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2202 restart_apic_timer(apic);
2205 static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2207 atomic_set(&apic->lapic_timer.pending, 0);
2209 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2210 && !set_target_expiration(apic, count_reg))
2213 restart_apic_timer(apic);
2216 static void start_apic_timer(struct kvm_lapic *apic)
2218 __start_apic_timer(apic, APIC_TMICT);
2221 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2225 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2226 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2228 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2230 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2244 static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2252 if (!apic_x2apic_mode(apic)) {
2253 kvm_apic_set_xapic_id(apic, val >> 24);
2260 report_tpr_access(apic, true);
2261 apic_set_tpr(apic, val & 0xff);
2265 apic_set_eoi(apic);
2269 if (!apic_x2apic_mode(apic))
2270 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2276 if (!apic_x2apic_mode(apic))
2277 kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2284 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2286 apic_set_spiv(apic, val & mask);
2290 for (i = 0; i < apic->nr_lvt_entries; i++) {
2291 kvm_lapic_set_reg(apic, APIC_LVTx(i),
2292 kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
2294 apic_update_lvtt(apic);
2295 atomic_set(&apic->lapic_timer.pending, 0);
2301 WARN_ON_ONCE(apic_x2apic_mode(apic));
2305 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2306 kvm_lapic_set_reg(apic, APIC_ICR, val);
2309 if (apic_x2apic_mode(apic))
2312 kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2316 apic_manage_nmi_watchdog(apic, val);
2324 if (!kvm_lapic_lvt_supported(apic, index)) {
2328 if (!kvm_apic_sw_enabled(apic))
2331 kvm_lapic_set_reg(apic, reg, val);
2336 if (!kvm_apic_sw_enabled(apic))
2338 val &= (apic_lvt_mask[LVT_TIMER] | apic->lapic_timer.timer_mode_mask);
2339 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2340 apic_update_lvtt(apic);
2344 if (apic_lvtt_tscdeadline(apic))
2347 cancel_apic_timer(apic);
2348 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2349 start_apic_timer(apic);
2353 uint32_t old_divisor = apic->divide_count;
2355 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2356 update_divide_count(apic);
2357 if (apic->divide_count != old_divisor &&
2358 apic->lapic_timer.period) {
2359 hrtimer_cancel(&apic->lapic_timer.timer);
2360 update_target_expiration(apic, old_divisor);
2361 restart_apic_timer(apic);
2366 if (apic_x2apic_mode(apic) && val != 0)
2375 if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK))
2378 kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0);
2390 kvm_recalculate_apic_map(apic->vcpu->kvm);
2398 struct kvm_lapic *apic = to_lapic(this);
2399 unsigned int offset = address - apic->base_address;
2402 if (!apic_mmio_in_range(apic, address))
2405 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2423 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2430 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2436 int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
2451 kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
2453 kvm_lapic_set_reg(apic, APIC_ICR, data);
2454 kvm_lapic_set_reg(apic, APIC_ICR2, data >> 32);
2456 kvm_lapic_set_reg64(apic, APIC_ICR, data);
2462 static u64 kvm_x2apic_icr_read(struct kvm_lapic *apic)
2465 return (u64)kvm_lapic_get_reg(apic, APIC_ICR) |
2466 (u64)kvm_lapic_get_reg(apic, APIC_ICR2) << 32;
2468 return kvm_lapic_get_reg64(apic, APIC_ICR);
2474 struct kvm_lapic *apic = vcpu->arch.apic;
2487 if (apic_x2apic_mode(apic) && offset == APIC_ICR)
2488 WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_x2apic_icr_read(apic)));
2490 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
2496 struct kvm_lapic *apic = vcpu->arch.apic;
2498 if (!vcpu->arch.apic) {
2503 hrtimer_cancel(&apic->lapic_timer.timer);
2508 if (!apic->sw_enabled)
2511 if (apic->regs)
2512 free_page((unsigned long)apic->regs);
2514 kfree(apic);
2524 struct kvm_lapic *apic = vcpu->arch.apic;
2526 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2529 return apic->lapic_timer.tscdeadline;
2534 struct kvm_lapic *apic = vcpu->arch.apic;
2536 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2539 hrtimer_cancel(&apic->lapic_timer.timer);
2540 apic->lapic_timer.tscdeadline = data;
2541 start_apic_timer(apic);
2546 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2553 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2561 struct kvm_lapic *apic = vcpu->arch.apic;
2568 if (!apic)
2574 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2580 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2586 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2588 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2596 apic->base_address = apic->vcpu->arch.apic_base &
2600 apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2601 kvm_set_apicv_inhibit(apic->vcpu->kvm,
2634 struct kvm_lapic *apic = vcpu->arch.apic;
2649 apic->irr_pending = true;
2651 if (apic->apicv_active)
2652 apic->isr_count = 1;
2654 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2656 apic->highest_isr_cache = -1;
2720 struct kvm_lapic *apic = vcpu->arch.apic;
2740 if (!apic)
2743 /* Stop the timer in case it's a reset to an active apic */
2744 hrtimer_cancel(&apic->lapic_timer.timer);
2748 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2749 kvm_apic_set_version(apic->vcpu);
2751 for (i = 0; i < apic->nr_lvt_entries; i++)
2752 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2753 apic_update_lvtt(apic);
2756 kvm_lapic_set_reg(apic, APIC_LVT0,
2758 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2760 kvm_apic_set_dfr(apic, 0xffffffffU);
2761 apic_set_spiv(apic, 0xff);
2762 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2763 if (!apic_x2apic_mode(apic))
2764 kvm_apic_set_ldr(apic, 0);
2765 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2766 if (!apic_x2apic_mode(apic)) {
2767 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2768 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2770 kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2772 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2773 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2775 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2776 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2777 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2780 update_divide_count(apic);
2781 atomic_set(&apic->lapic_timer.pending, 0);
2784 apic_update_ppr(apic);
2785 if (apic->apicv_active) {
2802 static bool lapic_is_periodic(struct kvm_lapic *apic)
2804 return apic_lvtt_period(apic);
2809 struct kvm_lapic *apic = vcpu->arch.apic;
2811 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2812 return atomic_read(&apic->lapic_timer.pending);
2817 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2819 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2823 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2828 r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
2830 guest_cpuid_is_intel_compatible(apic->vcpu))
2831 kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
2839 struct kvm_lapic *apic = vcpu->arch.apic;
2841 if (apic)
2842 kvm_apic_local_deliver(apic, APIC_LVT0);
2853 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2855 apic_timer_expired(apic, true);
2857 if (lapic_is_periodic(apic)) {
2858 advance_periodic_target_expiration(apic);
2867 struct kvm_lapic *apic;
2876 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2877 if (!apic)
2880 vcpu->arch.apic = apic;
2883 apic->regs = kvm_x86_call(alloc_apic_backing_page)(vcpu);
2885 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2886 if (!apic->regs) {
2887 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2891 apic->vcpu = vcpu;
2893 apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2895 hrtimer_setup(&apic->lapic_timer.timer, apic_timer_fn, CLOCK_MONOTONIC,
2898 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2906 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2919 apic->apicv_active = true;
2925 kfree(apic);
2926 vcpu->arch.apic = NULL;
2933 struct kvm_lapic *apic = vcpu->arch.apic;
2939 if (apic->guest_apic_protected)
2942 __apic_update_ppr(apic, &ppr);
2943 return apic_has_interrupt_for_ppr(apic, ppr);
2949 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2951 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2961 struct kvm_lapic *apic = vcpu->arch.apic;
2963 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2964 kvm_apic_inject_pending_timer_irqs(apic);
2965 atomic_set(&apic->lapic_timer.pending, 0);
2971 struct kvm_lapic *apic = vcpu->arch.apic;
2974 if (WARN_ON_ONCE(vector < 0 || !apic))
2984 apic_clear_irr(vector, apic);
2991 apic_update_ppr(apic);
2999 apic_set_isr(vector, apic);
3000 __apic_update_ppr(apic, &ppr);
3009 if (apic_x2apic_mode(vcpu->arch.apic)) {
3010 u32 x2apic_id = kvm_x2apic_id(vcpu->arch.apic);
3060 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
3066 apic_set_reg(s->regs, APIC_TMCCT, __apic_read(vcpu->arch.apic, APIC_TMCCT));
3073 struct kvm_lapic *apic = vcpu->arch.apic;
3079 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
3086 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
3088 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
3092 apic_update_ppr(apic);
3093 cancel_apic_timer(apic);
3094 apic->lapic_timer.expired_tscdeadline = 0;
3095 apic_update_lvtt(apic);
3096 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
3097 update_divide_count(apic);
3098 __start_apic_timer(apic, APIC_TMCCT);
3099 kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
3101 if (apic->apicv_active) {
3103 kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
3125 timer = &vcpu->arch.apic->lapic_timer.timer;
3138 struct kvm_lapic *apic)
3156 vector = apic_set_eoi(apic);
3157 trace_kvm_pv_eoi(apic, vector);
3165 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
3170 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3174 apic_set_tpr(vcpu->arch.apic, data & 0xff);
3184 struct kvm_lapic *apic)
3188 apic->irr_pending ||
3190 apic->highest_isr_cache == -1 ||
3192 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
3200 pv_eoi_set_pending(apic->vcpu);
3207 struct kvm_lapic *apic = vcpu->arch.apic;
3209 apic_sync_pv_eoi_to_guest(vcpu, apic);
3214 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
3215 max_irr = apic_find_highest_irr(apic);
3218 max_isr = apic_find_highest_isr(apic);
3223 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3231 &vcpu->arch.apic->vapic_cache,
3239 vcpu->arch.apic->vapic_addr = vapic_addr;
3243 static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
3248 *data = kvm_x2apic_icr_read(apic);
3252 if (kvm_lapic_reg_read(apic, reg, 4, &low))
3260 static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
3268 return kvm_x2apic_icr_write(apic, data);
3274 return kvm_lapic_reg_write(apic, reg, (u32)data);
3279 struct kvm_lapic *apic = vcpu->arch.apic;
3282 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3285 return kvm_lapic_msr_write(apic, reg, data);
3290 struct kvm_lapic *apic = vcpu->arch.apic;
3293 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3296 return kvm_lapic_msr_read(apic, reg, data);
3304 return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
3312 return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
3343 struct kvm_lapic *apic = vcpu->arch.apic;
3368 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3372 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3374 if (kvm_vcpu_is_bsp(apic->vcpu))
3379 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3383 sipi_vector = apic->sipi_vector;