xref: /kvmtool/mmio.c (revision 73f7e5b39938fbb3e2c900701f07e730853432cf)
129443dabSPekka Enberg #include "kvm/kvm.h"
26b1994caSSasha Levin #include "kvm/rbtree-interval.h"
3f588adbbSSasha Levin #include "kvm/brlock.h"
429443dabSPekka Enberg 
529443dabSPekka Enberg #include <stdio.h>
66b1994caSSasha Levin #include <stdlib.h>
76b1994caSSasha Levin 
8*73f7e5b3SSasha Levin #include <sys/ioctl.h>
9*73f7e5b3SSasha Levin #include <linux/kvm.h>
103fdf659dSSasha Levin #include <linux/types.h>
116b1994caSSasha Levin #include <linux/rbtree.h>
126b1994caSSasha Levin 
136b1994caSSasha Levin #define mmio_node(n) rb_entry(n, struct mmio_mapping, node)
146b1994caSSasha Levin 
156b1994caSSasha Levin struct mmio_mapping {
166b1994caSSasha Levin 	struct rb_int_node	node;
176b1994caSSasha Levin 	void			(*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write);
186b1994caSSasha Levin };
196b1994caSSasha Levin 
206b1994caSSasha Levin static struct rb_root mmio_tree = RB_ROOT;
216b1994caSSasha Levin 
226b1994caSSasha Levin static struct mmio_mapping *mmio_search(struct rb_root *root, u64 addr, u64 len)
236b1994caSSasha Levin {
246b1994caSSasha Levin 	struct rb_int_node *node;
256b1994caSSasha Levin 
266b1994caSSasha Levin 	node = rb_int_search_range(root, addr, addr + len);
276b1994caSSasha Levin 	if (node == NULL)
286b1994caSSasha Levin 		return NULL;
296b1994caSSasha Levin 
306b1994caSSasha Levin 	return mmio_node(node);
316b1994caSSasha Levin }
326b1994caSSasha Levin 
336b1994caSSasha Levin /* Find lowest match, Check for overlap */
346b1994caSSasha Levin static struct mmio_mapping *mmio_search_single(struct rb_root *root, u64 addr)
356b1994caSSasha Levin {
366b1994caSSasha Levin 	struct rb_int_node *node;
376b1994caSSasha Levin 
386b1994caSSasha Levin 	node = rb_int_search_single(root, addr);
396b1994caSSasha Levin 	if (node == NULL)
406b1994caSSasha Levin 		return NULL;
416b1994caSSasha Levin 
426b1994caSSasha Levin 	return mmio_node(node);
436b1994caSSasha Levin }
446b1994caSSasha Levin 
456b1994caSSasha Levin static int mmio_insert(struct rb_root *root, struct mmio_mapping *data)
466b1994caSSasha Levin {
476b1994caSSasha Levin 	return rb_int_insert(root, &data->node);
486b1994caSSasha Levin }
4929443dabSPekka Enberg 
503fdf659dSSasha Levin static const char *to_direction(u8 is_write)
5129443dabSPekka Enberg {
5229443dabSPekka Enberg 	if (is_write)
5329443dabSPekka Enberg 		return "write";
5429443dabSPekka Enberg 
5529443dabSPekka Enberg 	return "read";
5629443dabSPekka Enberg }
5729443dabSPekka Enberg 
58*73f7e5b3SSasha Levin bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write))
596b1994caSSasha Levin {
606b1994caSSasha Levin 	struct mmio_mapping *mmio;
61*73f7e5b3SSasha Levin 	struct kvm_coalesced_mmio_zone zone;
62f588adbbSSasha Levin 	int ret;
636b1994caSSasha Levin 
646b1994caSSasha Levin 	mmio = malloc(sizeof(*mmio));
656b1994caSSasha Levin 	if (mmio == NULL)
666b1994caSSasha Levin 		return false;
676b1994caSSasha Levin 
686b1994caSSasha Levin 	*mmio = (struct mmio_mapping) {
696b1994caSSasha Levin 		.node = RB_INT_INIT(phys_addr, phys_addr + phys_addr_len),
706b1994caSSasha Levin 		.kvm_mmio_callback_fn = kvm_mmio_callback_fn,
716b1994caSSasha Levin 	};
726b1994caSSasha Levin 
73*73f7e5b3SSasha Levin 	zone = (struct kvm_coalesced_mmio_zone) {
74*73f7e5b3SSasha Levin 		.addr	= phys_addr,
75*73f7e5b3SSasha Levin 		.size	= phys_addr_len,
76*73f7e5b3SSasha Levin 	};
77*73f7e5b3SSasha Levin 	ret = ioctl(kvm->vm_fd, KVM_REGISTER_COALESCED_MMIO, &zone);
78*73f7e5b3SSasha Levin 	if (ret < 0) {
79*73f7e5b3SSasha Levin 		free(mmio);
80*73f7e5b3SSasha Levin 		return false;
81*73f7e5b3SSasha Levin 	}
82*73f7e5b3SSasha Levin 
83f588adbbSSasha Levin 	br_write_lock();
84f588adbbSSasha Levin 	ret = mmio_insert(&mmio_tree, mmio);
85f588adbbSSasha Levin 	br_write_unlock();
86f588adbbSSasha Levin 
87f588adbbSSasha Levin 	return ret;
886b1994caSSasha Levin }
896b1994caSSasha Levin 
90*73f7e5b3SSasha Levin bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr)
916b1994caSSasha Levin {
926b1994caSSasha Levin 	struct mmio_mapping *mmio;
93*73f7e5b3SSasha Levin 	struct kvm_coalesced_mmio_zone zone;
946b1994caSSasha Levin 
95f588adbbSSasha Levin 	br_write_lock();
966b1994caSSasha Levin 	mmio = mmio_search_single(&mmio_tree, phys_addr);
97f588adbbSSasha Levin 	if (mmio == NULL) {
98f588adbbSSasha Levin 		br_write_unlock();
996b1994caSSasha Levin 		return false;
100f588adbbSSasha Levin 	}
1016b1994caSSasha Levin 
102*73f7e5b3SSasha Levin 	zone = (struct kvm_coalesced_mmio_zone) {
103*73f7e5b3SSasha Levin 		.addr	= phys_addr,
104*73f7e5b3SSasha Levin 		.size	= 1,
105*73f7e5b3SSasha Levin 	};
106*73f7e5b3SSasha Levin 	ioctl(kvm->vm_fd, KVM_UNREGISTER_COALESCED_MMIO, &zone);
107*73f7e5b3SSasha Levin 
1086b1994caSSasha Levin 	rb_int_erase(&mmio_tree, &mmio->node);
109f588adbbSSasha Levin 	br_write_unlock();
110f588adbbSSasha Levin 
1116b1994caSSasha Levin 	free(mmio);
1126b1994caSSasha Levin 	return true;
1136b1994caSSasha Levin }
1146b1994caSSasha Levin 
11543835ac9SSasha Levin bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write)
11629443dabSPekka Enberg {
117f588adbbSSasha Levin 	struct mmio_mapping *mmio;
118f588adbbSSasha Levin 
119f588adbbSSasha Levin 	br_read_lock();
120f588adbbSSasha Levin 	mmio = mmio_search(&mmio_tree, phys_addr, len);
1216b1994caSSasha Levin 
1226b1994caSSasha Levin 	if (mmio)
1236b1994caSSasha Levin 		mmio->kvm_mmio_callback_fn(phys_addr, data, len, is_write);
1246b1994caSSasha Levin 	else
1253fdf659dSSasha Levin 		fprintf(stderr, "Warning: Ignoring MMIO %s at %016llx (length %u)\n",
12629443dabSPekka Enberg 			to_direction(is_write), phys_addr, len);
127f588adbbSSasha Levin 	br_read_unlock();
12829443dabSPekka Enberg 
12929443dabSPekka Enberg 	return true;
13029443dabSPekka Enberg }
131