129443dabSPekka Enberg #include "kvm/kvm.h" 2*9b735910SMarc Zyngier #include "kvm/kvm-cpu.h" 36b1994caSSasha Levin #include "kvm/rbtree-interval.h" 4f588adbbSSasha Levin #include "kvm/brlock.h" 529443dabSPekka Enberg 629443dabSPekka Enberg #include <stdio.h> 76b1994caSSasha Levin #include <stdlib.h> 86b1994caSSasha Levin 973f7e5b3SSasha Levin #include <sys/ioctl.h> 1073f7e5b3SSasha Levin #include <linux/kvm.h> 113fdf659dSSasha Levin #include <linux/types.h> 126b1994caSSasha Levin #include <linux/rbtree.h> 13495fbd4eSSasha Levin #include <linux/err.h> 14495fbd4eSSasha Levin #include <errno.h> 156b1994caSSasha Levin 166b1994caSSasha Levin #define mmio_node(n) rb_entry(n, struct mmio_mapping, node) 176b1994caSSasha Levin 186b1994caSSasha Levin struct mmio_mapping { 196b1994caSSasha Levin struct rb_int_node node; 20*9b735910SMarc Zyngier void (*mmio_fn)(struct kvm_cpu *vcpu, u64 addr, u8 *data, u32 len, u8 is_write, void *ptr); 21d0b0df59SSasha Levin void *ptr; 226b1994caSSasha Levin }; 236b1994caSSasha Levin 246b1994caSSasha Levin static struct rb_root mmio_tree = RB_ROOT; 256b1994caSSasha Levin 266b1994caSSasha Levin static struct mmio_mapping *mmio_search(struct rb_root *root, u64 addr, u64 len) 276b1994caSSasha Levin { 286b1994caSSasha Levin struct rb_int_node *node; 296b1994caSSasha Levin 306b1994caSSasha Levin node = rb_int_search_range(root, addr, addr + len); 316b1994caSSasha Levin if (node == NULL) 326b1994caSSasha Levin return NULL; 336b1994caSSasha Levin 346b1994caSSasha Levin return mmio_node(node); 356b1994caSSasha Levin } 366b1994caSSasha Levin 376b1994caSSasha Levin /* Find lowest match, Check for overlap */ 386b1994caSSasha Levin static struct mmio_mapping *mmio_search_single(struct rb_root *root, u64 addr) 396b1994caSSasha Levin { 406b1994caSSasha Levin struct rb_int_node *node; 416b1994caSSasha Levin 426b1994caSSasha Levin node = rb_int_search_single(root, addr); 436b1994caSSasha Levin if (node == NULL) 446b1994caSSasha Levin return NULL; 456b1994caSSasha Levin 466b1994caSSasha Levin return mmio_node(node); 476b1994caSSasha Levin } 486b1994caSSasha Levin 496b1994caSSasha Levin static int mmio_insert(struct rb_root *root, struct mmio_mapping *data) 506b1994caSSasha Levin { 516b1994caSSasha Levin return rb_int_insert(root, &data->node); 526b1994caSSasha Levin } 5329443dabSPekka Enberg 543fdf659dSSasha Levin static const char *to_direction(u8 is_write) 5529443dabSPekka Enberg { 5629443dabSPekka Enberg if (is_write) 5729443dabSPekka Enberg return "write"; 5829443dabSPekka Enberg 5929443dabSPekka Enberg return "read"; 6029443dabSPekka Enberg } 6129443dabSPekka Enberg 62495fbd4eSSasha Levin int kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, bool coalesce, 63*9b735910SMarc Zyngier void (*mmio_fn)(struct kvm_cpu *vcpu, u64 addr, u8 *data, u32 len, u8 is_write, void *ptr), 649aa9d62aSSasha Levin void *ptr) 656b1994caSSasha Levin { 666b1994caSSasha Levin struct mmio_mapping *mmio; 6773f7e5b3SSasha Levin struct kvm_coalesced_mmio_zone zone; 68f588adbbSSasha Levin int ret; 696b1994caSSasha Levin 706b1994caSSasha Levin mmio = malloc(sizeof(*mmio)); 716b1994caSSasha Levin if (mmio == NULL) 72495fbd4eSSasha Levin return -ENOMEM; 736b1994caSSasha Levin 746b1994caSSasha Levin *mmio = (struct mmio_mapping) { 756b1994caSSasha Levin .node = RB_INT_INIT(phys_addr, phys_addr + phys_addr_len), 76d0b0df59SSasha Levin .mmio_fn = mmio_fn, 77d0b0df59SSasha Levin .ptr = ptr, 786b1994caSSasha Levin }; 796b1994caSSasha Levin 809aa9d62aSSasha Levin if (coalesce) { 8173f7e5b3SSasha Levin zone = (struct kvm_coalesced_mmio_zone) { 8273f7e5b3SSasha Levin .addr = phys_addr, 8373f7e5b3SSasha Levin .size = phys_addr_len, 8473f7e5b3SSasha Levin }; 8573f7e5b3SSasha Levin ret = ioctl(kvm->vm_fd, KVM_REGISTER_COALESCED_MMIO, &zone); 8673f7e5b3SSasha Levin if (ret < 0) { 8773f7e5b3SSasha Levin free(mmio); 88495fbd4eSSasha Levin return -errno; 8973f7e5b3SSasha Levin } 909aa9d62aSSasha Levin } 914346fd8fSSasha Levin br_write_lock(kvm); 92f588adbbSSasha Levin ret = mmio_insert(&mmio_tree, mmio); 934346fd8fSSasha Levin br_write_unlock(kvm); 94f588adbbSSasha Levin 95f588adbbSSasha Levin return ret; 966b1994caSSasha Levin } 976b1994caSSasha Levin 9873f7e5b3SSasha Levin bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr) 996b1994caSSasha Levin { 1006b1994caSSasha Levin struct mmio_mapping *mmio; 10173f7e5b3SSasha Levin struct kvm_coalesced_mmio_zone zone; 1026b1994caSSasha Levin 1034346fd8fSSasha Levin br_write_lock(kvm); 1046b1994caSSasha Levin mmio = mmio_search_single(&mmio_tree, phys_addr); 105f588adbbSSasha Levin if (mmio == NULL) { 1064346fd8fSSasha Levin br_write_unlock(kvm); 1076b1994caSSasha Levin return false; 108f588adbbSSasha Levin } 1096b1994caSSasha Levin 11073f7e5b3SSasha Levin zone = (struct kvm_coalesced_mmio_zone) { 11173f7e5b3SSasha Levin .addr = phys_addr, 11273f7e5b3SSasha Levin .size = 1, 11373f7e5b3SSasha Levin }; 11473f7e5b3SSasha Levin ioctl(kvm->vm_fd, KVM_UNREGISTER_COALESCED_MMIO, &zone); 11573f7e5b3SSasha Levin 1166b1994caSSasha Levin rb_int_erase(&mmio_tree, &mmio->node); 1174346fd8fSSasha Levin br_write_unlock(kvm); 118f588adbbSSasha Levin 1196b1994caSSasha Levin free(mmio); 1206b1994caSSasha Levin return true; 1216b1994caSSasha Levin } 1226b1994caSSasha Levin 123*9b735910SMarc Zyngier bool kvm__emulate_mmio(struct kvm_cpu *vcpu, u64 phys_addr, u8 *data, u32 len, u8 is_write) 12429443dabSPekka Enberg { 125f588adbbSSasha Levin struct mmio_mapping *mmio; 126f588adbbSSasha Levin 127f588adbbSSasha Levin br_read_lock(); 128f588adbbSSasha Levin mmio = mmio_search(&mmio_tree, phys_addr, len); 1296b1994caSSasha Levin 1306b1994caSSasha Levin if (mmio) 131*9b735910SMarc Zyngier mmio->mmio_fn(vcpu, phys_addr, data, len, is_write, mmio->ptr); 132d562e086SCyrill Gorcunov else { 133*9b735910SMarc Zyngier if (vcpu->kvm->cfg.mmio_debug) 1343fdf659dSSasha Levin fprintf(stderr, "Warning: Ignoring MMIO %s at %016llx (length %u)\n", 13529443dabSPekka Enberg to_direction(is_write), phys_addr, len); 136d562e086SCyrill Gorcunov } 137f588adbbSSasha Levin br_read_unlock(); 13829443dabSPekka Enberg 13929443dabSPekka Enberg return true; 14029443dabSPekka Enberg } 141