1 /* 2 * Copyright (C) 2001 MandrakeSoft S.A. 3 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 4 * 5 * MandrakeSoft S.A. 6 * 43, rue d'Aboukir 7 * 75002 Paris - France 8 * http://www.linux-mandrake.com/ 9 * http://www.mandrakesoft.com/ 10 * 11 * This library is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU Lesser General Public 13 * License as published by the Free Software Foundation; either 14 * version 2 of the License, or (at your option) any later version. 15 * 16 * This library is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * Lesser General Public License for more details. 20 * 21 * You should have received a copy of the GNU Lesser General Public 22 * License along with this library; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 * 25 * Yunhong Jiang <yunhong.jiang@intel.com> 26 * Yaozu (Eddie) Dong <eddie.dong@intel.com> 27 * Based on Xen 3.1 code. 28 */ 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/kvm_host.h> 32 #include <linux/kvm.h> 33 #include <linux/mm.h> 34 #include <linux/highmem.h> 35 #include <linux/smp.h> 36 #include <linux/hrtimer.h> 37 #include <linux/io.h> 38 #include <linux/slab.h> 39 #include <linux/export.h> 40 #include <linux/nospec.h> 41 #include <asm/processor.h> 42 #include <asm/page.h> 43 #include <asm/current.h> 44 #include <trace/events/kvm.h> 45 46 #include "ioapic.h" 47 #include "lapic.h" 48 #include "irq.h" 49 50 static int ioapic_service(struct kvm_ioapic *vioapic, int irq, 51 bool line_status); 52 53 static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu, 54 struct kvm_ioapic *ioapic, 55 int trigger_mode, 56 int pin); 57 58 static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic) 59 { 60 unsigned long result = 0; 61 62 switch (ioapic->ioregsel) { 63 case IOAPIC_REG_VERSION: 64 result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) 65 | (IOAPIC_VERSION_ID & 0xff)); 66 break; 67 68 case IOAPIC_REG_APIC_ID: 69 case IOAPIC_REG_ARB_ID: 70 result = ((ioapic->id & 0xf) << 24); 71 break; 72 73 default: 74 { 75 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; 76 u64 redir_content = ~0ULL; 77 78 if (redir_index < IOAPIC_NUM_PINS) { 79 u32 index = array_index_nospec( 80 redir_index, IOAPIC_NUM_PINS); 81 82 redir_content = ioapic->redirtbl[index].bits; 83 } 84 85 result = (ioapic->ioregsel & 0x1) ? 86 (redir_content >> 32) & 0xffffffff : 87 redir_content & 0xffffffff; 88 break; 89 } 90 } 91 92 return result; 93 } 94 95 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) 96 { 97 ioapic->rtc_status.pending_eoi = 0; 98 bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_IDS); 99 } 100 101 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); 102 103 static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic) 104 { 105 if (WARN_ON(ioapic->rtc_status.pending_eoi < 0)) 106 kvm_rtc_eoi_tracking_restore_all(ioapic); 107 } 108 109 static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) 110 { 111 bool new_val, old_val; 112 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; 113 struct dest_map *dest_map = &ioapic->rtc_status.dest_map; 114 union kvm_ioapic_redirect_entry *e; 115 116 e = &ioapic->redirtbl[RTC_GSI]; 117 if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, 118 e->fields.dest_id, 119 kvm_lapic_irq_dest_mode(!!e->fields.dest_mode))) 120 return; 121 122 new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); 123 old_val = test_bit(vcpu->vcpu_id, dest_map->map); 124 125 if (new_val == old_val) 126 return; 127 128 if (new_val) { 129 __set_bit(vcpu->vcpu_id, dest_map->map); 130 dest_map->vectors[vcpu->vcpu_id] = e->fields.vector; 131 ioapic->rtc_status.pending_eoi++; 132 } else { 133 __clear_bit(vcpu->vcpu_id, dest_map->map); 134 ioapic->rtc_status.pending_eoi--; 135 rtc_status_pending_eoi_check_valid(ioapic); 136 } 137 } 138 139 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) 140 { 141 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; 142 143 spin_lock(&ioapic->lock); 144 __rtc_irq_eoi_tracking_restore_one(vcpu); 145 spin_unlock(&ioapic->lock); 146 } 147 148 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) 149 { 150 struct kvm_vcpu *vcpu; 151 unsigned long i; 152 153 if (RTC_GSI >= IOAPIC_NUM_PINS) 154 return; 155 156 rtc_irq_eoi_tracking_reset(ioapic); 157 kvm_for_each_vcpu(i, vcpu, ioapic->kvm) 158 __rtc_irq_eoi_tracking_restore_one(vcpu); 159 } 160 161 static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu, 162 int vector) 163 { 164 struct dest_map *dest_map = &ioapic->rtc_status.dest_map; 165 166 /* RTC special handling */ 167 if (test_bit(vcpu->vcpu_id, dest_map->map) && 168 (vector == dest_map->vectors[vcpu->vcpu_id]) && 169 (test_and_clear_bit(vcpu->vcpu_id, 170 ioapic->rtc_status.dest_map.map))) { 171 --ioapic->rtc_status.pending_eoi; 172 rtc_status_pending_eoi_check_valid(ioapic); 173 } 174 } 175 176 static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) 177 { 178 if (ioapic->rtc_status.pending_eoi > 0) 179 return true; /* coalesced */ 180 181 return false; 182 } 183 184 static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq) 185 { 186 unsigned long i; 187 struct kvm_vcpu *vcpu; 188 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; 189 190 kvm_for_each_vcpu(i, vcpu, ioapic->kvm) { 191 if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, 192 entry->fields.dest_id, 193 entry->fields.dest_mode) || 194 kvm_apic_pending_eoi(vcpu, entry->fields.vector)) 195 continue; 196 197 /* 198 * If no longer has pending EOI in LAPICs, update 199 * EOI for this vector. 200 */ 201 rtc_irq_eoi(ioapic, vcpu, entry->fields.vector); 202 break; 203 } 204 } 205 206 static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq, 207 int irq_level, bool line_status) 208 { 209 union kvm_ioapic_redirect_entry entry; 210 u32 mask = 1 << irq; 211 u32 old_irr; 212 int edge, ret; 213 214 entry = ioapic->redirtbl[irq]; 215 edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); 216 217 if (!irq_level) { 218 ioapic->irr &= ~mask; 219 ret = 1; 220 goto out; 221 } 222 223 /* 224 * AMD SVM AVIC accelerate EOI write iff the interrupt is edge 225 * triggered, in which case the in-kernel IOAPIC will not be able 226 * to receive the EOI. In this case, we do a lazy update of the 227 * pending EOI when trying to set IOAPIC irq. 228 */ 229 if (edge && kvm_apicv_activated(ioapic->kvm)) 230 ioapic_lazy_update_eoi(ioapic, irq); 231 232 /* 233 * Return 0 for coalesced interrupts; for edge-triggered interrupts, 234 * this only happens if a previous edge has not been delivered due 235 * to masking. For level interrupts, the remote_irr field tells 236 * us if the interrupt is waiting for an EOI. 237 * 238 * RTC is special: it is edge-triggered, but userspace likes to know 239 * if it has been already ack-ed via EOI because coalesced RTC 240 * interrupts lead to time drift in Windows guests. So we track 241 * EOI manually for the RTC interrupt. 242 */ 243 if (irq == RTC_GSI && line_status && 244 rtc_irq_check_coalesced(ioapic)) { 245 ret = 0; 246 goto out; 247 } 248 249 old_irr = ioapic->irr; 250 ioapic->irr |= mask; 251 if (edge) { 252 ioapic->irr_delivered &= ~mask; 253 if (old_irr == ioapic->irr) { 254 ret = 0; 255 goto out; 256 } 257 } 258 259 ret = ioapic_service(ioapic, irq, line_status); 260 261 out: 262 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); 263 return ret; 264 } 265 266 static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr) 267 { 268 u32 idx; 269 270 rtc_irq_eoi_tracking_reset(ioapic); 271 for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS) 272 ioapic_set_irq(ioapic, idx, 1, true); 273 274 kvm_rtc_eoi_tracking_restore_all(ioapic); 275 } 276 277 278 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors) 279 { 280 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; 281 struct dest_map *dest_map = &ioapic->rtc_status.dest_map; 282 union kvm_ioapic_redirect_entry *e; 283 int index; 284 285 spin_lock(&ioapic->lock); 286 287 /* Make sure we see any missing RTC EOI */ 288 if (test_bit(vcpu->vcpu_id, dest_map->map)) 289 __set_bit(dest_map->vectors[vcpu->vcpu_id], 290 ioapic_handled_vectors); 291 292 for (index = 0; index < IOAPIC_NUM_PINS; index++) { 293 e = &ioapic->redirtbl[index]; 294 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || 295 kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) || 296 index == RTC_GSI) { 297 u16 dm = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode); 298 299 kvm_scan_ioapic_irq(vcpu, e->fields.dest_id, dm, 300 e->fields.vector, ioapic_handled_vectors); 301 } 302 } 303 spin_unlock(&ioapic->lock); 304 } 305 306 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) 307 { 308 if (!ioapic_in_kernel(kvm)) 309 return; 310 kvm_make_scan_ioapic_request(kvm); 311 } 312 313 static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) 314 { 315 unsigned index; 316 bool mask_before, mask_after; 317 union kvm_ioapic_redirect_entry *e; 318 int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode; 319 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); 320 321 switch (ioapic->ioregsel) { 322 case IOAPIC_REG_VERSION: 323 /* Writes are ignored. */ 324 break; 325 326 case IOAPIC_REG_APIC_ID: 327 ioapic->id = (val >> 24) & 0xf; 328 break; 329 330 case IOAPIC_REG_ARB_ID: 331 break; 332 333 default: 334 index = (ioapic->ioregsel - 0x10) >> 1; 335 336 if (index >= IOAPIC_NUM_PINS) 337 return; 338 index = array_index_nospec(index, IOAPIC_NUM_PINS); 339 e = &ioapic->redirtbl[index]; 340 mask_before = e->fields.mask; 341 /* Preserve read-only fields */ 342 old_remote_irr = e->fields.remote_irr; 343 old_delivery_status = e->fields.delivery_status; 344 old_dest_id = e->fields.dest_id; 345 old_dest_mode = e->fields.dest_mode; 346 if (ioapic->ioregsel & 1) { 347 e->bits &= 0xffffffff; 348 e->bits |= (u64) val << 32; 349 } else { 350 e->bits &= ~0xffffffffULL; 351 e->bits |= (u32) val; 352 } 353 e->fields.remote_irr = old_remote_irr; 354 e->fields.delivery_status = old_delivery_status; 355 356 /* 357 * Some OSes (Linux, Xen) assume that Remote IRR bit will 358 * be cleared by IOAPIC hardware when the entry is configured 359 * as edge-triggered. This behavior is used to simulate an 360 * explicit EOI on IOAPICs that don't have the EOI register. 361 */ 362 if (e->fields.trig_mode == IOAPIC_EDGE_TRIG) 363 e->fields.remote_irr = 0; 364 365 mask_after = e->fields.mask; 366 if (mask_before != mask_after) 367 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); 368 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG && 369 ioapic->irr & (1 << index) && !e->fields.mask && !e->fields.remote_irr) { 370 /* 371 * Pending status in irr may be outdated: the IRQ line may have 372 * already been deasserted by a device while the IRQ was masked. 373 * This occurs, for instance, if the interrupt is handled in a 374 * Linux guest as a oneshot interrupt (IRQF_ONESHOT). In this 375 * case the guest acknowledges the interrupt to the device in 376 * its threaded irq handler, i.e. after the EOI but before 377 * unmasking, so at the time of unmasking the IRQ line is 378 * already down but our pending irr bit is still set. In such 379 * cases, injecting this pending interrupt to the guest is 380 * buggy: the guest will receive an extra unwanted interrupt. 381 * 382 * So we need to check here if the IRQ is actually still pending. 383 * As we are generally not able to probe the IRQ line status 384 * directly, we do it through irqfd resampler. Namely, we clear 385 * the pending status and notify the resampler that this interrupt 386 * is done, without actually injecting it into the guest. If the 387 * IRQ line is actually already deasserted, we are done. If it is 388 * still asserted, a new interrupt will be shortly triggered 389 * through irqfd and injected into the guest. 390 * 391 * If, however, it's not possible to resample (no irqfd resampler 392 * registered for this irq), then unconditionally inject this 393 * pending interrupt into the guest, so the guest will not miss 394 * an interrupt, although may get an extra unwanted interrupt. 395 */ 396 if (kvm_notify_irqfd_resampler(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index)) 397 ioapic->irr &= ~(1 << index); 398 else 399 ioapic_service(ioapic, index, false); 400 } 401 if (e->fields.delivery_mode == APIC_DM_FIXED) { 402 struct kvm_lapic_irq irq; 403 404 irq.vector = e->fields.vector; 405 irq.delivery_mode = e->fields.delivery_mode << 8; 406 irq.dest_mode = 407 kvm_lapic_irq_dest_mode(!!e->fields.dest_mode); 408 irq.level = false; 409 irq.trig_mode = e->fields.trig_mode; 410 irq.shorthand = APIC_DEST_NOSHORT; 411 irq.dest_id = e->fields.dest_id; 412 irq.msi_redir_hint = false; 413 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); 414 kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, 415 vcpu_bitmap); 416 if (old_dest_mode != e->fields.dest_mode || 417 old_dest_id != e->fields.dest_id) { 418 /* 419 * Update vcpu_bitmap with vcpus specified in 420 * the previous request as well. This is done to 421 * keep ioapic_handled_vectors synchronized. 422 */ 423 irq.dest_id = old_dest_id; 424 irq.dest_mode = 425 kvm_lapic_irq_dest_mode( 426 !!e->fields.dest_mode); 427 kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, 428 vcpu_bitmap); 429 } 430 kvm_make_scan_ioapic_request_mask(ioapic->kvm, 431 vcpu_bitmap); 432 } else { 433 kvm_make_scan_ioapic_request(ioapic->kvm); 434 } 435 break; 436 } 437 } 438 439 static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status) 440 { 441 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; 442 struct kvm_lapic_irq irqe; 443 int ret; 444 445 if (entry->fields.mask || 446 (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG && 447 entry->fields.remote_irr)) 448 return -1; 449 450 irqe.dest_id = entry->fields.dest_id; 451 irqe.vector = entry->fields.vector; 452 irqe.dest_mode = kvm_lapic_irq_dest_mode(!!entry->fields.dest_mode); 453 irqe.trig_mode = entry->fields.trig_mode; 454 irqe.delivery_mode = entry->fields.delivery_mode << 8; 455 irqe.level = 1; 456 irqe.shorthand = APIC_DEST_NOSHORT; 457 irqe.msi_redir_hint = false; 458 459 if (irqe.trig_mode == IOAPIC_EDGE_TRIG) 460 ioapic->irr_delivered |= 1 << irq; 461 462 if (irq == RTC_GSI && line_status) { 463 /* 464 * pending_eoi cannot ever become negative (see 465 * rtc_status_pending_eoi_check_valid) and the caller 466 * ensures that it is only called if it is >= zero, namely 467 * if rtc_irq_check_coalesced returns false). 468 */ 469 BUG_ON(ioapic->rtc_status.pending_eoi != 0); 470 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, 471 &ioapic->rtc_status.dest_map); 472 ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret); 473 } else 474 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); 475 476 if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG) 477 entry->fields.remote_irr = 1; 478 479 return ret; 480 } 481 482 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, 483 int level, bool line_status) 484 { 485 int ret, irq_level; 486 487 BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS); 488 489 spin_lock(&ioapic->lock); 490 irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq], 491 irq_source_id, level); 492 ret = ioapic_set_irq(ioapic, irq, irq_level, line_status); 493 494 spin_unlock(&ioapic->lock); 495 496 return ret; 497 } 498 499 void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id) 500 { 501 int i; 502 503 spin_lock(&ioapic->lock); 504 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) 505 __clear_bit(irq_source_id, &ioapic->irq_states[i]); 506 spin_unlock(&ioapic->lock); 507 } 508 509 static void kvm_ioapic_eoi_inject_work(struct work_struct *work) 510 { 511 int i; 512 struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic, 513 eoi_inject.work); 514 spin_lock(&ioapic->lock); 515 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 516 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; 517 518 if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG) 519 continue; 520 521 if (ioapic->irr & (1 << i) && !ent->fields.remote_irr) 522 ioapic_service(ioapic, i, false); 523 } 524 spin_unlock(&ioapic->lock); 525 } 526 527 #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000 528 static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu, 529 struct kvm_ioapic *ioapic, 530 int trigger_mode, 531 int pin) 532 { 533 struct kvm_lapic *apic = vcpu->arch.apic; 534 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[pin]; 535 536 /* 537 * We are dropping lock while calling ack notifiers because ack 538 * notifier callbacks for assigned devices call into IOAPIC 539 * recursively. Since remote_irr is cleared only after call 540 * to notifiers if the same vector will be delivered while lock 541 * is dropped it will be put into irr and will be delivered 542 * after ack notifier returns. 543 */ 544 spin_unlock(&ioapic->lock); 545 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin); 546 spin_lock(&ioapic->lock); 547 548 if (trigger_mode != IOAPIC_LEVEL_TRIG || 549 kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) 550 return; 551 552 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 553 ent->fields.remote_irr = 0; 554 if (!ent->fields.mask && (ioapic->irr & (1 << pin))) { 555 ++ioapic->irq_eoi[pin]; 556 if (ioapic->irq_eoi[pin] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) { 557 /* 558 * Real hardware does not deliver the interrupt 559 * immediately during eoi broadcast, and this 560 * lets a buggy guest make slow progress 561 * even if it does not correctly handle a 562 * level-triggered interrupt. Emulate this 563 * behavior if we detect an interrupt storm. 564 */ 565 schedule_delayed_work(&ioapic->eoi_inject, HZ / 100); 566 ioapic->irq_eoi[pin] = 0; 567 trace_kvm_ioapic_delayed_eoi_inj(ent->bits); 568 } else { 569 ioapic_service(ioapic, pin, false); 570 } 571 } else { 572 ioapic->irq_eoi[pin] = 0; 573 } 574 } 575 576 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) 577 { 578 int i; 579 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; 580 581 spin_lock(&ioapic->lock); 582 rtc_irq_eoi(ioapic, vcpu, vector); 583 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 584 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; 585 586 if (ent->fields.vector != vector) 587 continue; 588 kvm_ioapic_update_eoi_one(vcpu, ioapic, trigger_mode, i); 589 } 590 spin_unlock(&ioapic->lock); 591 } 592 593 static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) 594 { 595 return container_of(dev, struct kvm_ioapic, dev); 596 } 597 598 static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) 599 { 600 return ((addr >= ioapic->base_address && 601 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); 602 } 603 604 static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, 605 gpa_t addr, int len, void *val) 606 { 607 struct kvm_ioapic *ioapic = to_ioapic(this); 608 u32 result; 609 if (!ioapic_in_range(ioapic, addr)) 610 return -EOPNOTSUPP; 611 612 ASSERT(!(addr & 0xf)); /* check alignment */ 613 614 addr &= 0xff; 615 spin_lock(&ioapic->lock); 616 switch (addr) { 617 case IOAPIC_REG_SELECT: 618 result = ioapic->ioregsel; 619 break; 620 621 case IOAPIC_REG_WINDOW: 622 result = ioapic_read_indirect(ioapic); 623 break; 624 625 default: 626 result = 0; 627 break; 628 } 629 spin_unlock(&ioapic->lock); 630 631 switch (len) { 632 case 8: 633 *(u64 *) val = result; 634 break; 635 case 1: 636 case 2: 637 case 4: 638 memcpy(val, (char *)&result, len); 639 break; 640 default: 641 printk(KERN_WARNING "ioapic: wrong length %d\n", len); 642 } 643 return 0; 644 } 645 646 static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, 647 gpa_t addr, int len, const void *val) 648 { 649 struct kvm_ioapic *ioapic = to_ioapic(this); 650 u32 data; 651 if (!ioapic_in_range(ioapic, addr)) 652 return -EOPNOTSUPP; 653 654 ASSERT(!(addr & 0xf)); /* check alignment */ 655 656 switch (len) { 657 case 8: 658 case 4: 659 data = *(u32 *) val; 660 break; 661 case 2: 662 data = *(u16 *) val; 663 break; 664 case 1: 665 data = *(u8 *) val; 666 break; 667 default: 668 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); 669 return 0; 670 } 671 672 addr &= 0xff; 673 spin_lock(&ioapic->lock); 674 switch (addr) { 675 case IOAPIC_REG_SELECT: 676 ioapic->ioregsel = data & 0xFF; /* 8-bit register */ 677 break; 678 679 case IOAPIC_REG_WINDOW: 680 ioapic_write_indirect(ioapic, data); 681 break; 682 683 default: 684 break; 685 } 686 spin_unlock(&ioapic->lock); 687 return 0; 688 } 689 690 static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) 691 { 692 int i; 693 694 cancel_delayed_work_sync(&ioapic->eoi_inject); 695 for (i = 0; i < IOAPIC_NUM_PINS; i++) 696 ioapic->redirtbl[i].fields.mask = 1; 697 ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; 698 ioapic->ioregsel = 0; 699 ioapic->irr = 0; 700 ioapic->irr_delivered = 0; 701 ioapic->id = 0; 702 memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi)); 703 rtc_irq_eoi_tracking_reset(ioapic); 704 } 705 706 static const struct kvm_io_device_ops ioapic_mmio_ops = { 707 .read = ioapic_mmio_read, 708 .write = ioapic_mmio_write, 709 }; 710 711 int kvm_ioapic_init(struct kvm *kvm) 712 { 713 struct kvm_ioapic *ioapic; 714 int ret; 715 716 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL_ACCOUNT); 717 if (!ioapic) 718 return -ENOMEM; 719 spin_lock_init(&ioapic->lock); 720 INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work); 721 kvm->arch.vioapic = ioapic; 722 kvm_ioapic_reset(ioapic); 723 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); 724 ioapic->kvm = kvm; 725 mutex_lock(&kvm->slots_lock); 726 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address, 727 IOAPIC_MEM_LENGTH, &ioapic->dev); 728 mutex_unlock(&kvm->slots_lock); 729 if (ret < 0) { 730 kvm->arch.vioapic = NULL; 731 kfree(ioapic); 732 } 733 734 return ret; 735 } 736 737 void kvm_ioapic_destroy(struct kvm *kvm) 738 { 739 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 740 741 if (!ioapic) 742 return; 743 744 cancel_delayed_work_sync(&ioapic->eoi_inject); 745 mutex_lock(&kvm->slots_lock); 746 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); 747 mutex_unlock(&kvm->slots_lock); 748 kvm->arch.vioapic = NULL; 749 kfree(ioapic); 750 } 751 752 void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) 753 { 754 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 755 756 spin_lock(&ioapic->lock); 757 memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); 758 state->irr &= ~ioapic->irr_delivered; 759 spin_unlock(&ioapic->lock); 760 } 761 762 void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) 763 { 764 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 765 766 spin_lock(&ioapic->lock); 767 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); 768 ioapic->irr = 0; 769 ioapic->irr_delivered = 0; 770 kvm_make_scan_ioapic_request(kvm); 771 kvm_ioapic_inject_all(ioapic, state->irr); 772 spin_unlock(&ioapic->lock); 773 } 774