1 /*
2 * VFIO regions
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23
24 #include "hw/vfio/vfio-region.h"
25 #include "hw/vfio/vfio-device.h"
26 #include "hw/hw.h"
27 #include "trace.h"
28 #include "qapi/error.h"
29 #include "qemu/error-report.h"
30 #include "qemu/units.h"
31 #include "monitor/monitor.h"
32 #include "vfio-helpers.h"
33
34 /*
35 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
36 */
vfio_region_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)37 void vfio_region_write(void *opaque, hwaddr addr,
38 uint64_t data, unsigned size)
39 {
40 VFIORegion *region = opaque;
41 VFIODevice *vbasedev = region->vbasedev;
42 union {
43 uint8_t byte;
44 uint16_t word;
45 uint32_t dword;
46 uint64_t qword;
47 } buf;
48 int ret;
49
50 switch (size) {
51 case 1:
52 buf.byte = data;
53 break;
54 case 2:
55 buf.word = cpu_to_le16(data);
56 break;
57 case 4:
58 buf.dword = cpu_to_le32(data);
59 break;
60 case 8:
61 buf.qword = cpu_to_le64(data);
62 break;
63 default:
64 hw_error("vfio: unsupported write size, %u bytes", size);
65 break;
66 }
67
68 ret = vbasedev->io_ops->region_write(vbasedev, region->nr,
69 addr, size, &buf, region->post_wr);
70 if (ret != size) {
71 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
72 ",%d) failed: %s",
73 __func__, vbasedev->name, region->nr,
74 addr, data, size, strwriteerror(ret));
75 }
76
77 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
78
79 /*
80 * A read or write to a BAR always signals an INTx EOI. This will
81 * do nothing if not pending (including not in INTx mode). We assume
82 * that a BAR access is in response to an interrupt and that BAR
83 * accesses will service the interrupt. Unfortunately, we don't know
84 * which access will service the interrupt, so we're potentially
85 * getting quite a few host interrupts per guest interrupt.
86 */
87 vbasedev->ops->vfio_eoi(vbasedev);
88 }
89
vfio_region_read(void * opaque,hwaddr addr,unsigned size)90 uint64_t vfio_region_read(void *opaque,
91 hwaddr addr, unsigned size)
92 {
93 VFIORegion *region = opaque;
94 VFIODevice *vbasedev = region->vbasedev;
95 union {
96 uint8_t byte;
97 uint16_t word;
98 uint32_t dword;
99 uint64_t qword;
100 } buf;
101 uint64_t data = 0;
102 int ret;
103
104 ret = vbasedev->io_ops->region_read(vbasedev, region->nr, addr, size, &buf);
105 if (ret != size) {
106 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %s",
107 __func__, vbasedev->name, region->nr,
108 addr, size, strreaderror(ret));
109 return (uint64_t)-1;
110 }
111 switch (size) {
112 case 1:
113 data = buf.byte;
114 break;
115 case 2:
116 data = le16_to_cpu(buf.word);
117 break;
118 case 4:
119 data = le32_to_cpu(buf.dword);
120 break;
121 case 8:
122 data = le64_to_cpu(buf.qword);
123 break;
124 default:
125 hw_error("vfio: unsupported read size, %u bytes", size);
126 break;
127 }
128
129 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
130
131 /* Same as write above */
132 vbasedev->ops->vfio_eoi(vbasedev);
133
134 return data;
135 }
136
137 static const MemoryRegionOps vfio_region_ops = {
138 .read = vfio_region_read,
139 .write = vfio_region_write,
140 .endianness = DEVICE_LITTLE_ENDIAN,
141 .valid = {
142 .min_access_size = 1,
143 .max_access_size = 8,
144 },
145 .impl = {
146 .min_access_size = 1,
147 .max_access_size = 8,
148 },
149 };
150
vfio_setup_region_sparse_mmaps(VFIORegion * region,struct vfio_region_info * info)151 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
152 struct vfio_region_info *info)
153 {
154 struct vfio_info_cap_header *hdr;
155 struct vfio_region_info_cap_sparse_mmap *sparse;
156 int i, j;
157
158 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
159 if (!hdr) {
160 return -ENODEV;
161 }
162
163 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
164
165 trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
166 region->nr, sparse->nr_areas);
167
168 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
169
170 for (i = 0, j = 0; i < sparse->nr_areas; i++) {
171 if (sparse->areas[i].size) {
172 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
173 sparse->areas[i].offset +
174 sparse->areas[i].size - 1);
175 region->mmaps[j].offset = sparse->areas[i].offset;
176 region->mmaps[j].size = sparse->areas[i].size;
177 j++;
178 }
179 }
180
181 region->nr_mmaps = j;
182 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
183
184 return 0;
185 }
186
vfio_region_setup(Object * obj,VFIODevice * vbasedev,VFIORegion * region,int index,const char * name)187 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
188 int index, const char *name)
189 {
190 struct vfio_region_info *info = NULL;
191 int ret;
192
193 ret = vfio_device_get_region_info(vbasedev, index, &info);
194 if (ret) {
195 return ret;
196 }
197
198 region->vbasedev = vbasedev;
199 region->flags = info->flags;
200 region->size = info->size;
201 region->fd_offset = info->offset;
202 region->nr = index;
203 region->post_wr = false;
204
205 if (region->size) {
206 region->mem = g_new0(MemoryRegion, 1);
207 memory_region_init_io(region->mem, obj, &vfio_region_ops,
208 region, name, region->size);
209
210 if (!vbasedev->no_mmap &&
211 region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
212
213 ret = vfio_setup_region_sparse_mmaps(region, info);
214
215 if (ret) {
216 region->nr_mmaps = 1;
217 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
218 region->mmaps[0].offset = 0;
219 region->mmaps[0].size = region->size;
220 }
221 }
222 }
223
224 trace_vfio_region_setup(vbasedev->name, index, name,
225 region->flags, region->fd_offset, region->size);
226 return 0;
227 }
228
vfio_subregion_unmap(VFIORegion * region,int index)229 static void vfio_subregion_unmap(VFIORegion *region, int index)
230 {
231 trace_vfio_region_unmap(memory_region_name(®ion->mmaps[index].mem),
232 region->mmaps[index].offset,
233 region->mmaps[index].offset +
234 region->mmaps[index].size - 1);
235 memory_region_del_subregion(region->mem, ®ion->mmaps[index].mem);
236 munmap(region->mmaps[index].mmap, region->mmaps[index].size);
237 object_unparent(OBJECT(®ion->mmaps[index].mem));
238 region->mmaps[index].mmap = NULL;
239 }
240
vfio_region_mmap(VFIORegion * region)241 int vfio_region_mmap(VFIORegion *region)
242 {
243 int i, ret, prot = 0;
244 char *name;
245 int fd;
246
247 if (!region->mem) {
248 return 0;
249 }
250
251 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
252 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
253
254 for (i = 0; i < region->nr_mmaps; i++) {
255 size_t align = MIN(1ULL << ctz64(region->mmaps[i].size), 1 * GiB);
256 void *map_base, *map_align;
257
258 /*
259 * Align the mmap for more efficient mapping in the kernel. Ideally
260 * we'd know the PMD and PUD mapping sizes to use as discrete alignment
261 * intervals, but we don't. As of Linux v6.12, the largest PUD size
262 * supporting huge pfnmap is 1GiB (ARCH_SUPPORTS_PUD_PFNMAP is only set
263 * on x86_64). Align by power-of-two size, capped at 1GiB.
264 *
265 * NB. qemu_memalign() and friends actually allocate memory, whereas
266 * the region size here can exceed host memory, therefore we manually
267 * create an oversized anonymous mapping and clean it up for alignment.
268 */
269 map_base = mmap(0, region->mmaps[i].size + align, PROT_NONE,
270 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
271 if (map_base == MAP_FAILED) {
272 ret = -errno;
273 goto no_mmap;
274 }
275
276 /* Use the per-region fd if set, or the shared fd. */
277 fd = region->vbasedev->region_fds ?
278 region->vbasedev->region_fds[region->nr] :
279 region->vbasedev->fd,
280
281 map_align = (void *)ROUND_UP((uintptr_t)map_base, (uintptr_t)align);
282 munmap(map_base, map_align - map_base);
283 munmap(map_align + region->mmaps[i].size,
284 align - (map_align - map_base));
285
286 region->mmaps[i].mmap = mmap(map_align, region->mmaps[i].size, prot,
287 MAP_SHARED | MAP_FIXED, fd,
288 region->fd_offset +
289 region->mmaps[i].offset);
290 if (region->mmaps[i].mmap == MAP_FAILED) {
291 ret = -errno;
292 goto no_mmap;
293 }
294
295 name = g_strdup_printf("%s mmaps[%d]",
296 memory_region_name(region->mem), i);
297 memory_region_init_ram_device_ptr(®ion->mmaps[i].mem,
298 memory_region_owner(region->mem),
299 name, region->mmaps[i].size,
300 region->mmaps[i].mmap);
301 g_free(name);
302 memory_region_add_subregion(region->mem, region->mmaps[i].offset,
303 ®ion->mmaps[i].mem);
304
305 trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem),
306 region->mmaps[i].offset,
307 region->mmaps[i].offset +
308 region->mmaps[i].size - 1);
309 }
310
311 return 0;
312
313 no_mmap:
314 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
315 region->fd_offset + region->mmaps[i].offset,
316 region->fd_offset + region->mmaps[i].offset +
317 region->mmaps[i].size - 1, ret);
318
319 region->mmaps[i].mmap = NULL;
320
321 for (i--; i >= 0; i--) {
322 vfio_subregion_unmap(region, i);
323 }
324
325 return ret;
326 }
327
vfio_region_unmap(VFIORegion * region)328 void vfio_region_unmap(VFIORegion *region)
329 {
330 int i;
331
332 if (!region->mem) {
333 return;
334 }
335
336 for (i = 0; i < region->nr_mmaps; i++) {
337 if (region->mmaps[i].mmap) {
338 vfio_subregion_unmap(region, i);
339 }
340 }
341 }
342
vfio_region_exit(VFIORegion * region)343 void vfio_region_exit(VFIORegion *region)
344 {
345 int i;
346
347 if (!region->mem) {
348 return;
349 }
350
351 for (i = 0; i < region->nr_mmaps; i++) {
352 if (region->mmaps[i].mmap) {
353 memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem);
354 }
355 }
356
357 trace_vfio_region_exit(region->vbasedev->name, region->nr);
358 }
359
vfio_region_finalize(VFIORegion * region)360 void vfio_region_finalize(VFIORegion *region)
361 {
362 int i;
363
364 if (!region->mem) {
365 return;
366 }
367
368 for (i = 0; i < region->nr_mmaps; i++) {
369 if (region->mmaps[i].mmap) {
370 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
371 object_unparent(OBJECT(®ion->mmaps[i].mem));
372 }
373 }
374
375 object_unparent(OBJECT(region->mem));
376
377 g_free(region->mem);
378 g_free(region->mmaps);
379
380 trace_vfio_region_finalize(region->vbasedev->name, region->nr);
381
382 region->mem = NULL;
383 region->mmaps = NULL;
384 region->nr_mmaps = 0;
385 region->size = 0;
386 region->flags = 0;
387 region->nr = 0;
388 }
389
vfio_region_mmaps_set_enabled(VFIORegion * region,bool enabled)390 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
391 {
392 int i;
393
394 if (!region->mem) {
395 return;
396 }
397
398 for (i = 0; i < region->nr_mmaps; i++) {
399 if (region->mmaps[i].mmap) {
400 memory_region_set_enabled(®ion->mmaps[i].mem, enabled);
401 }
402 }
403
404 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
405 enabled);
406 }
407