1 #include <sys/epoll.h>
2 #include <sys/ioctl.h>
3 #include <pthread.h>
4 #include <unistd.h>
5 #include <stdio.h>
6 #include <signal.h>
7
8 #include <linux/kernel.h>
9 #include <linux/kvm.h>
10 #include <linux/types.h>
11
12 #include "kvm/epoll.h"
13 #include "kvm/ioeventfd.h"
14 #include "kvm/kvm.h"
15 #include "kvm/util.h"
16
17 #define IOEVENTFD_MAX_EVENTS 20
18
19 static LIST_HEAD(used_ioevents);
20 static bool ioeventfd_avail;
21 static struct kvm__epoll epoll;
22
ioeventfd__handle_event(struct kvm * kvm,struct epoll_event * ev)23 static void ioeventfd__handle_event(struct kvm *kvm, struct epoll_event *ev)
24 {
25 u64 tmp;
26 struct ioevent *ioevent = ev->data.ptr;
27
28 if (read(ioevent->fd, &tmp, sizeof(tmp)) < 0)
29 die("Failed reading event");
30
31 ioevent->fn(ioevent->fn_kvm, ioevent->fn_ptr);
32 }
33
ioeventfd__init(struct kvm * kvm)34 int ioeventfd__init(struct kvm *kvm)
35 {
36 ioeventfd_avail = kvm__supports_extension(kvm, KVM_CAP_IOEVENTFD);
37 if (!ioeventfd_avail)
38 return 1; /* Not fatal, but let caller determine no-go. */
39
40 return epoll__init(kvm, &epoll, "ioeventfd-worker",
41 ioeventfd__handle_event);
42 }
43 base_init(ioeventfd__init);
44
ioeventfd__exit(struct kvm * kvm)45 int ioeventfd__exit(struct kvm *kvm)
46 {
47 if (!ioeventfd_avail)
48 return 0;
49
50 epoll__exit(&epoll);
51 return 0;
52 }
53 base_exit(ioeventfd__exit);
54
ioeventfd__add_event(struct ioevent * ioevent,int flags)55 int ioeventfd__add_event(struct ioevent *ioevent, int flags)
56 {
57 struct kvm_ioeventfd kvm_ioevent;
58 struct epoll_event epoll_event;
59 struct ioevent *new_ioevent;
60 int event, r;
61
62 if (!ioeventfd_avail)
63 return -ENOSYS;
64
65 new_ioevent = malloc(sizeof(*new_ioevent));
66 if (new_ioevent == NULL)
67 return -ENOMEM;
68
69 *new_ioevent = *ioevent;
70 event = new_ioevent->fd;
71
72 kvm_ioevent = (struct kvm_ioeventfd) {
73 .addr = ioevent->io_addr,
74 .len = ioevent->io_len,
75 .datamatch = ioevent->datamatch,
76 .fd = event,
77 .flags = KVM_IOEVENTFD_FLAG_DATAMATCH,
78 };
79
80 /*
81 * For architectures that don't recognize PIO accesses, always register
82 * on the MMIO bus. Otherwise PIO accesses will cause returns to
83 * userspace.
84 */
85 if (KVM_IOEVENTFD_HAS_PIO && flags & IOEVENTFD_FLAG_PIO)
86 kvm_ioevent.flags |= KVM_IOEVENTFD_FLAG_PIO;
87
88 r = ioctl(ioevent->fn_kvm->vm_fd, KVM_IOEVENTFD, &kvm_ioevent);
89 if (r) {
90 r = -errno;
91 goto cleanup;
92 }
93
94 if (flags & IOEVENTFD_FLAG_USER_POLL) {
95 epoll_event = (struct epoll_event) {
96 .events = EPOLLIN,
97 .data.ptr = new_ioevent,
98 };
99
100 r = epoll_ctl(epoll.fd, EPOLL_CTL_ADD, event, &epoll_event);
101 if (r) {
102 r = -errno;
103 goto cleanup;
104 }
105 }
106
107 new_ioevent->flags = kvm_ioevent.flags;
108 list_add_tail(&new_ioevent->list, &used_ioevents);
109
110 return 0;
111
112 cleanup:
113 free(new_ioevent);
114 return r;
115 }
116
ioeventfd__del_event(u64 addr,u64 datamatch)117 int ioeventfd__del_event(u64 addr, u64 datamatch)
118 {
119 struct kvm_ioeventfd kvm_ioevent;
120 struct ioevent *ioevent;
121 u8 found = 0;
122
123 if (!ioeventfd_avail)
124 return -ENOSYS;
125
126 list_for_each_entry(ioevent, &used_ioevents, list) {
127 if (ioevent->io_addr == addr &&
128 ioevent->datamatch == datamatch) {
129 found = 1;
130 break;
131 }
132 }
133
134 if (found == 0 || ioevent == NULL)
135 return -ENOENT;
136
137 kvm_ioevent = (struct kvm_ioeventfd) {
138 .fd = ioevent->fd,
139 .addr = ioevent->io_addr,
140 .len = ioevent->io_len,
141 .datamatch = ioevent->datamatch,
142 .flags = ioevent->flags
143 | KVM_IOEVENTFD_FLAG_DEASSIGN,
144 };
145
146 ioctl(ioevent->fn_kvm->vm_fd, KVM_IOEVENTFD, &kvm_ioevent);
147
148 epoll_ctl(epoll.fd, EPOLL_CTL_DEL, ioevent->fd, NULL);
149
150 list_del(&ioevent->list);
151
152 close(ioevent->fd);
153 free(ioevent);
154
155 return 0;
156 }
157