1 #include <stdlib.h>
2 #include <sys/ioctl.h>
3 #include <linux/types.h>
4 #include <linux/kvm.h>
5 #include <errno.h>
6
7 #include "kvm/kvm.h"
8 #include "kvm/irq.h"
9 #include "kvm/kvm-arch.h"
10
11 static u8 next_line = KVM_IRQ_OFFSET;
12 static int allocated_gsis = 0;
13
14 int next_gsi;
15
16 struct msi_routing_ops irq__default_routing_ops;
17 struct msi_routing_ops *msi_routing_ops = &irq__default_routing_ops;
18
19 struct kvm_irq_routing *irq_routing = NULL;
20
irq__alloc_line(void)21 int irq__alloc_line(void)
22 {
23 return next_line++;
24 }
25
irq__get_nr_allocated_lines(void)26 int irq__get_nr_allocated_lines(void)
27 {
28 return next_line - KVM_IRQ_OFFSET;
29 }
30
irq__allocate_routing_entry(void)31 int irq__allocate_routing_entry(void)
32 {
33 size_t table_size = sizeof(struct kvm_irq_routing);
34 size_t old_size = table_size;
35 int nr_entries = 0;
36
37 if (irq_routing)
38 nr_entries = irq_routing->nr;
39
40 if (nr_entries < allocated_gsis)
41 return 0;
42
43 old_size += sizeof(struct kvm_irq_routing_entry) * allocated_gsis;
44 allocated_gsis = ALIGN(nr_entries + 1, 32);
45 table_size += sizeof(struct kvm_irq_routing_entry) * allocated_gsis;
46 irq_routing = realloc(irq_routing, table_size);
47
48 if (irq_routing == NULL)
49 return -ENOMEM;
50 memset((void *)irq_routing + old_size, 0, table_size - old_size);
51
52 irq_routing->nr = nr_entries;
53 irq_routing->flags = 0;
54
55 return 0;
56 }
57
check_for_irq_routing(struct kvm * kvm)58 static bool check_for_irq_routing(struct kvm *kvm)
59 {
60 static int has_irq_routing = 0;
61
62 if (has_irq_routing == 0) {
63 if (kvm__supports_extension(kvm, KVM_CAP_IRQ_ROUTING))
64 has_irq_routing = 1;
65 else
66 has_irq_routing = -1;
67 }
68
69 return has_irq_routing > 0;
70 }
71
irq__update_msix_routes(struct kvm * kvm,struct kvm_irq_routing_entry * entry)72 static int irq__update_msix_routes(struct kvm *kvm,
73 struct kvm_irq_routing_entry *entry)
74 {
75 return ioctl(kvm->vm_fd, KVM_SET_GSI_ROUTING, irq_routing);
76 }
77
irq__default_can_signal_msi(struct kvm * kvm)78 static bool irq__default_can_signal_msi(struct kvm *kvm)
79 {
80 return kvm__supports_extension(kvm, KVM_CAP_SIGNAL_MSI);
81 }
82
irq__default_signal_msi(struct kvm * kvm,struct kvm_msi * msi)83 static int irq__default_signal_msi(struct kvm *kvm, struct kvm_msi *msi)
84 {
85 return ioctl(kvm->vm_fd, KVM_SIGNAL_MSI, msi);
86 }
87
88 struct msi_routing_ops irq__default_routing_ops = {
89 .update_route = irq__update_msix_routes,
90 .signal_msi = irq__default_signal_msi,
91 .can_signal_msi = irq__default_can_signal_msi,
92 };
93
irq__can_signal_msi(struct kvm * kvm)94 bool irq__can_signal_msi(struct kvm *kvm)
95 {
96 return msi_routing_ops->can_signal_msi(kvm);
97 }
98
irq__signal_msi(struct kvm * kvm,struct kvm_msi * msi)99 int irq__signal_msi(struct kvm *kvm, struct kvm_msi *msi)
100 {
101 return msi_routing_ops->signal_msi(kvm, msi);
102 }
103
irq__add_msix_route(struct kvm * kvm,struct msi_msg * msg,u32 device_id)104 int irq__add_msix_route(struct kvm *kvm, struct msi_msg *msg, u32 device_id)
105 {
106 int r;
107 struct kvm_irq_routing_entry *entry;
108
109 if (!check_for_irq_routing(kvm))
110 return -ENXIO;
111
112 r = irq__allocate_routing_entry();
113 if (r)
114 return r;
115
116 entry = &irq_routing->entries[irq_routing->nr];
117 *entry = (struct kvm_irq_routing_entry) {
118 .gsi = next_gsi,
119 .type = KVM_IRQ_ROUTING_MSI,
120 .u.msi.address_hi = msg->address_hi,
121 .u.msi.address_lo = msg->address_lo,
122 .u.msi.data = msg->data,
123 };
124
125 if (kvm->msix_needs_devid) {
126 entry->flags = KVM_MSI_VALID_DEVID;
127 entry->u.msi.devid = device_id;
128 }
129
130 irq_routing->nr++;
131
132 r = msi_routing_ops->update_route(kvm, entry);
133 if (r)
134 return r;
135
136 return next_gsi++;
137 }
138
update_data(u32 * ptr,u32 newdata)139 static bool update_data(u32 *ptr, u32 newdata)
140 {
141 if (*ptr == newdata)
142 return false;
143
144 *ptr = newdata;
145 return true;
146 }
147
irq__update_msix_route(struct kvm * kvm,u32 gsi,struct msi_msg * msg)148 void irq__update_msix_route(struct kvm *kvm, u32 gsi, struct msi_msg *msg)
149 {
150 struct kvm_irq_routing_msi *entry;
151 unsigned int i;
152 bool changed;
153
154 for (i = 0; i < irq_routing->nr; i++)
155 if (gsi == irq_routing->entries[i].gsi)
156 break;
157 if (i == irq_routing->nr)
158 return;
159
160 entry = &irq_routing->entries[i].u.msi;
161
162 changed = update_data(&entry->address_hi, msg->address_hi);
163 changed |= update_data(&entry->address_lo, msg->address_lo);
164 changed |= update_data(&entry->data, msg->data);
165
166 if (!changed)
167 return;
168
169 if (msi_routing_ops->update_route(kvm, &irq_routing->entries[i]))
170 die_perror("KVM_SET_GSI_ROUTING");
171 }
172
irq__common_add_irqfd(struct kvm * kvm,unsigned int gsi,int trigger_fd,int resample_fd)173 int irq__common_add_irqfd(struct kvm *kvm, unsigned int gsi, int trigger_fd,
174 int resample_fd)
175 {
176 struct kvm_irqfd irqfd = {
177 .fd = trigger_fd,
178 .gsi = gsi,
179 .flags = resample_fd > 0 ? KVM_IRQFD_FLAG_RESAMPLE : 0,
180 .resamplefd = resample_fd,
181 };
182
183 /* If we emulate MSI routing, translate the MSI to the corresponding IRQ */
184 if (msi_routing_ops->translate_gsi)
185 irqfd.gsi = msi_routing_ops->translate_gsi(kvm, gsi);
186
187 return ioctl(kvm->vm_fd, KVM_IRQFD, &irqfd);
188 }
189
irq__common_del_irqfd(struct kvm * kvm,unsigned int gsi,int trigger_fd)190 void irq__common_del_irqfd(struct kvm *kvm, unsigned int gsi, int trigger_fd)
191 {
192 struct kvm_irqfd irqfd = {
193 .fd = trigger_fd,
194 .gsi = gsi,
195 .flags = KVM_IRQFD_FLAG_DEASSIGN,
196 };
197
198 if (msi_routing_ops->translate_gsi)
199 irqfd.gsi = msi_routing_ops->translate_gsi(kvm, gsi);
200
201 ioctl(kvm->vm_fd, KVM_IRQFD, &irqfd);
202 }
203
irq__exit(struct kvm * kvm)204 int __attribute__((weak)) irq__exit(struct kvm *kvm)
205 {
206 free(irq_routing);
207 return 0;
208 }
209 dev_base_exit(irq__exit);
210