xref: /qemu/hw/vfio/region.c (revision 7be29f2f1a3f5b037d27eedbd5df9f441e8c8c16)
1 /*
2  * VFIO regions
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Alex Williamson <alex.williamson@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Based on qemu-kvm device-assignment:
13  *  Adapted for KVM by Qumranet.
14  *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15  *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16  *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17  *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18  *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19  */
20 
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 
24 #include "hw/vfio/vfio-region.h"
25 #include "hw/vfio/vfio-device.h"
26 #include "hw/hw.h"
27 #include "trace.h"
28 #include "qapi/error.h"
29 #include "qemu/error-report.h"
30 #include "qemu/units.h"
31 #include "monitor/monitor.h"
32 #include "vfio-helpers.h"
33 
34 /*
35  * IO Port/MMIO - Beware of the endians, VFIO is always little endian
36  */
vfio_region_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)37 void vfio_region_write(void *opaque, hwaddr addr,
38                        uint64_t data, unsigned size)
39 {
40     VFIORegion *region = opaque;
41     VFIODevice *vbasedev = region->vbasedev;
42     union {
43         uint8_t byte;
44         uint16_t word;
45         uint32_t dword;
46         uint64_t qword;
47     } buf;
48     int ret;
49 
50     switch (size) {
51     case 1:
52         buf.byte = data;
53         break;
54     case 2:
55         buf.word = cpu_to_le16(data);
56         break;
57     case 4:
58         buf.dword = cpu_to_le32(data);
59         break;
60     case 8:
61         buf.qword = cpu_to_le64(data);
62         break;
63     default:
64         hw_error("vfio: unsupported write size, %u bytes", size);
65         break;
66     }
67 
68     ret = vbasedev->io_ops->region_write(vbasedev, region->nr,
69                                          addr, size, &buf);
70     if (ret != size) {
71         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
72                      ",%d) failed: %s",
73                      __func__, vbasedev->name, region->nr,
74                      addr, data, size, strwriteerror(ret));
75     }
76 
77     trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
78 
79     /*
80      * A read or write to a BAR always signals an INTx EOI.  This will
81      * do nothing if not pending (including not in INTx mode).  We assume
82      * that a BAR access is in response to an interrupt and that BAR
83      * accesses will service the interrupt.  Unfortunately, we don't know
84      * which access will service the interrupt, so we're potentially
85      * getting quite a few host interrupts per guest interrupt.
86      */
87     vbasedev->ops->vfio_eoi(vbasedev);
88 }
89 
vfio_region_read(void * opaque,hwaddr addr,unsigned size)90 uint64_t vfio_region_read(void *opaque,
91                           hwaddr addr, unsigned size)
92 {
93     VFIORegion *region = opaque;
94     VFIODevice *vbasedev = region->vbasedev;
95     union {
96         uint8_t byte;
97         uint16_t word;
98         uint32_t dword;
99         uint64_t qword;
100     } buf;
101     uint64_t data = 0;
102     int ret;
103 
104     ret = vbasedev->io_ops->region_read(vbasedev, region->nr, addr, size, &buf);
105     if (ret != size) {
106         error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %s",
107                      __func__, vbasedev->name, region->nr,
108                      addr, size, strreaderror(ret));
109         return (uint64_t)-1;
110     }
111     switch (size) {
112     case 1:
113         data = buf.byte;
114         break;
115     case 2:
116         data = le16_to_cpu(buf.word);
117         break;
118     case 4:
119         data = le32_to_cpu(buf.dword);
120         break;
121     case 8:
122         data = le64_to_cpu(buf.qword);
123         break;
124     default:
125         hw_error("vfio: unsupported read size, %u bytes", size);
126         break;
127     }
128 
129     trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
130 
131     /* Same as write above */
132     vbasedev->ops->vfio_eoi(vbasedev);
133 
134     return data;
135 }
136 
137 static const MemoryRegionOps vfio_region_ops = {
138     .read = vfio_region_read,
139     .write = vfio_region_write,
140     .endianness = DEVICE_LITTLE_ENDIAN,
141     .valid = {
142         .min_access_size = 1,
143         .max_access_size = 8,
144     },
145     .impl = {
146         .min_access_size = 1,
147         .max_access_size = 8,
148     },
149 };
150 
vfio_setup_region_sparse_mmaps(VFIORegion * region,struct vfio_region_info * info)151 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
152                                           struct vfio_region_info *info)
153 {
154     struct vfio_info_cap_header *hdr;
155     struct vfio_region_info_cap_sparse_mmap *sparse;
156     int i, j;
157 
158     hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
159     if (!hdr) {
160         return -ENODEV;
161     }
162 
163     sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
164 
165     trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
166                                          region->nr, sparse->nr_areas);
167 
168     region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
169 
170     for (i = 0, j = 0; i < sparse->nr_areas; i++) {
171         if (sparse->areas[i].size) {
172             trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
173                                             sparse->areas[i].offset +
174                                             sparse->areas[i].size - 1);
175             region->mmaps[j].offset = sparse->areas[i].offset;
176             region->mmaps[j].size = sparse->areas[i].size;
177             j++;
178         }
179     }
180 
181     region->nr_mmaps = j;
182     region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
183 
184     return 0;
185 }
186 
vfio_region_setup(Object * obj,VFIODevice * vbasedev,VFIORegion * region,int index,const char * name)187 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
188                       int index, const char *name)
189 {
190     struct vfio_region_info *info = NULL;
191     int ret;
192 
193     ret = vfio_device_get_region_info(vbasedev, index, &info);
194     if (ret) {
195         return ret;
196     }
197 
198     region->vbasedev = vbasedev;
199     region->flags = info->flags;
200     region->size = info->size;
201     region->fd_offset = info->offset;
202     region->nr = index;
203 
204     if (region->size) {
205         region->mem = g_new0(MemoryRegion, 1);
206         memory_region_init_io(region->mem, obj, &vfio_region_ops,
207                               region, name, region->size);
208 
209         if (!vbasedev->no_mmap &&
210             region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
211 
212             ret = vfio_setup_region_sparse_mmaps(region, info);
213 
214             if (ret) {
215                 region->nr_mmaps = 1;
216                 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
217                 region->mmaps[0].offset = 0;
218                 region->mmaps[0].size = region->size;
219             }
220         }
221     }
222 
223     trace_vfio_region_setup(vbasedev->name, index, name,
224                             region->flags, region->fd_offset, region->size);
225     return 0;
226 }
227 
vfio_subregion_unmap(VFIORegion * region,int index)228 static void vfio_subregion_unmap(VFIORegion *region, int index)
229 {
230     trace_vfio_region_unmap(memory_region_name(&region->mmaps[index].mem),
231                             region->mmaps[index].offset,
232                             region->mmaps[index].offset +
233                             region->mmaps[index].size - 1);
234     memory_region_del_subregion(region->mem, &region->mmaps[index].mem);
235     munmap(region->mmaps[index].mmap, region->mmaps[index].size);
236     object_unparent(OBJECT(&region->mmaps[index].mem));
237     region->mmaps[index].mmap = NULL;
238 }
239 
vfio_region_mmap(VFIORegion * region)240 int vfio_region_mmap(VFIORegion *region)
241 {
242     int i, ret, prot = 0;
243     char *name;
244 
245     if (!region->mem) {
246         return 0;
247     }
248 
249     prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
250     prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
251 
252     for (i = 0; i < region->nr_mmaps; i++) {
253         size_t align = MIN(1ULL << ctz64(region->mmaps[i].size), 1 * GiB);
254         void *map_base, *map_align;
255 
256         /*
257          * Align the mmap for more efficient mapping in the kernel.  Ideally
258          * we'd know the PMD and PUD mapping sizes to use as discrete alignment
259          * intervals, but we don't.  As of Linux v6.12, the largest PUD size
260          * supporting huge pfnmap is 1GiB (ARCH_SUPPORTS_PUD_PFNMAP is only set
261          * on x86_64).  Align by power-of-two size, capped at 1GiB.
262          *
263          * NB. qemu_memalign() and friends actually allocate memory, whereas
264          * the region size here can exceed host memory, therefore we manually
265          * create an oversized anonymous mapping and clean it up for alignment.
266          */
267         map_base = mmap(0, region->mmaps[i].size + align, PROT_NONE,
268                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
269         if (map_base == MAP_FAILED) {
270             ret = -errno;
271             goto no_mmap;
272         }
273 
274         map_align = (void *)ROUND_UP((uintptr_t)map_base, (uintptr_t)align);
275         munmap(map_base, map_align - map_base);
276         munmap(map_align + region->mmaps[i].size,
277                align - (map_align - map_base));
278 
279         region->mmaps[i].mmap = mmap(map_align, region->mmaps[i].size, prot,
280                                      MAP_SHARED | MAP_FIXED,
281                                      region->vbasedev->fd,
282                                      region->fd_offset +
283                                      region->mmaps[i].offset);
284         if (region->mmaps[i].mmap == MAP_FAILED) {
285             ret = -errno;
286             goto no_mmap;
287         }
288 
289         name = g_strdup_printf("%s mmaps[%d]",
290                                memory_region_name(region->mem), i);
291         memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
292                                           memory_region_owner(region->mem),
293                                           name, region->mmaps[i].size,
294                                           region->mmaps[i].mmap);
295         g_free(name);
296         memory_region_add_subregion(region->mem, region->mmaps[i].offset,
297                                     &region->mmaps[i].mem);
298 
299         trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
300                                region->mmaps[i].offset,
301                                region->mmaps[i].offset +
302                                region->mmaps[i].size - 1);
303     }
304 
305     return 0;
306 
307 no_mmap:
308     trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
309                                  region->fd_offset + region->mmaps[i].offset,
310                                  region->fd_offset + region->mmaps[i].offset +
311                                  region->mmaps[i].size - 1, ret);
312 
313     region->mmaps[i].mmap = NULL;
314 
315     for (i--; i >= 0; i--) {
316         vfio_subregion_unmap(region, i);
317     }
318 
319     return ret;
320 }
321 
vfio_region_unmap(VFIORegion * region)322 void vfio_region_unmap(VFIORegion *region)
323 {
324     int i;
325 
326     if (!region->mem) {
327         return;
328     }
329 
330     for (i = 0; i < region->nr_mmaps; i++) {
331         if (region->mmaps[i].mmap) {
332             vfio_subregion_unmap(region, i);
333         }
334     }
335 }
336 
vfio_region_exit(VFIORegion * region)337 void vfio_region_exit(VFIORegion *region)
338 {
339     int i;
340 
341     if (!region->mem) {
342         return;
343     }
344 
345     for (i = 0; i < region->nr_mmaps; i++) {
346         if (region->mmaps[i].mmap) {
347             memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
348         }
349     }
350 
351     trace_vfio_region_exit(region->vbasedev->name, region->nr);
352 }
353 
vfio_region_finalize(VFIORegion * region)354 void vfio_region_finalize(VFIORegion *region)
355 {
356     int i;
357 
358     if (!region->mem) {
359         return;
360     }
361 
362     for (i = 0; i < region->nr_mmaps; i++) {
363         if (region->mmaps[i].mmap) {
364             munmap(region->mmaps[i].mmap, region->mmaps[i].size);
365             object_unparent(OBJECT(&region->mmaps[i].mem));
366         }
367     }
368 
369     object_unparent(OBJECT(region->mem));
370 
371     g_free(region->mem);
372     g_free(region->mmaps);
373 
374     trace_vfio_region_finalize(region->vbasedev->name, region->nr);
375 
376     region->mem = NULL;
377     region->mmaps = NULL;
378     region->nr_mmaps = 0;
379     region->size = 0;
380     region->flags = 0;
381     region->nr = 0;
382 }
383 
vfio_region_mmaps_set_enabled(VFIORegion * region,bool enabled)384 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
385 {
386     int i;
387 
388     if (!region->mem) {
389         return;
390     }
391 
392     for (i = 0; i < region->nr_mmaps; i++) {
393         if (region->mmaps[i].mmap) {
394             memory_region_set_enabled(&region->mmaps[i].mem, enabled);
395         }
396     }
397 
398     trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
399                                         enabled);
400 }
401